1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DVA-based Adjustable Relpacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slowes the flow of new data 51 * into the cache until we can make space avaiable. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory preasure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefor exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefor choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73 /* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() inerface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefor provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 */ 113 114 #include <sys/spa.h> 115 #include <sys/zio.h> 116 #include <sys/zfs_context.h> 117 #include <sys/arc.h> 118 #include <sys/refcount.h> 119 #ifdef _KERNEL 120 #include <sys/vmsystm.h> 121 #include <vm/anon.h> 122 #include <sys/fs/swapnode.h> 123 #include <sys/dnlc.h> 124 #endif 125 #include <sys/callb.h> 126 127 static kmutex_t arc_reclaim_thr_lock; 128 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 129 static uint8_t arc_thread_exit; 130 131 #define ARC_REDUCE_DNLC_PERCENT 3 132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 133 134 typedef enum arc_reclaim_strategy { 135 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 136 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 137 } arc_reclaim_strategy_t; 138 139 /* number of seconds before growing cache again */ 140 static int arc_grow_retry = 60; 141 142 /* 143 * minimum lifespan of a prefetch block in clock ticks 144 * (initialized in arc_init()) 145 */ 146 static int arc_min_prefetch_lifespan; 147 148 static kmutex_t arc_reclaim_lock; 149 static int arc_dead; 150 151 /* 152 * These tunables are for performance analysis. 153 */ 154 uint64_t zfs_arc_max; 155 uint64_t zfs_arc_min; 156 157 /* 158 * Note that buffers can be on one of 5 states: 159 * ARC_anon - anonymous (discussed below) 160 * ARC_mru - recently used, currently cached 161 * ARC_mru_ghost - recentely used, no longer in cache 162 * ARC_mfu - frequently used, currently cached 163 * ARC_mfu_ghost - frequently used, no longer in cache 164 * When there are no active references to the buffer, they 165 * are linked onto one of the lists in arc. These are the 166 * only buffers that can be evicted or deleted. 167 * 168 * Anonymous buffers are buffers that are not associated with 169 * a DVA. These are buffers that hold dirty block copies 170 * before they are written to stable storage. By definition, 171 * they are "ref'd" and are considered part of arc_mru 172 * that cannot be freed. Generally, they will aquire a DVA 173 * as they are written and migrate onto the arc_mru list. 174 */ 175 176 typedef struct arc_state { 177 list_t list; /* linked list of evictable buffer in state */ 178 uint64_t lsize; /* total size of buffers in the linked list */ 179 uint64_t size; /* total size of all buffers in this state */ 180 uint64_t hits; 181 kmutex_t mtx; 182 } arc_state_t; 183 184 /* The 5 states: */ 185 static arc_state_t ARC_anon; 186 static arc_state_t ARC_mru; 187 static arc_state_t ARC_mru_ghost; 188 static arc_state_t ARC_mfu; 189 static arc_state_t ARC_mfu_ghost; 190 191 static struct arc { 192 arc_state_t *anon; 193 arc_state_t *mru; 194 arc_state_t *mru_ghost; 195 arc_state_t *mfu; 196 arc_state_t *mfu_ghost; 197 uint64_t size; /* Actual total arc size */ 198 uint64_t p; /* Target size (in bytes) of mru */ 199 uint64_t c; /* Target size of cache (in bytes) */ 200 uint64_t c_min; /* Minimum target cache size */ 201 uint64_t c_max; /* Maximum target cache size */ 202 203 /* performance stats */ 204 uint64_t hits; 205 uint64_t misses; 206 uint64_t deleted; 207 uint64_t recycle_miss; 208 uint64_t mutex_miss; 209 uint64_t evict_skip; 210 uint64_t hash_elements; 211 uint64_t hash_elements_max; 212 uint64_t hash_collisions; 213 uint64_t hash_chains; 214 uint32_t hash_chain_max; 215 216 int no_grow; /* Don't try to grow cache size */ 217 } arc; 218 219 static uint64_t arc_tempreserve; 220 221 typedef struct arc_callback arc_callback_t; 222 223 struct arc_callback { 224 arc_done_func_t *acb_done; 225 void *acb_private; 226 arc_byteswap_func_t *acb_byteswap; 227 arc_buf_t *acb_buf; 228 zio_t *acb_zio_dummy; 229 arc_callback_t *acb_next; 230 }; 231 232 struct arc_buf_hdr { 233 /* immutable */ 234 uint64_t b_size; 235 spa_t *b_spa; 236 237 /* protected by hash lock */ 238 dva_t b_dva; 239 uint64_t b_birth; 240 uint64_t b_cksum0; 241 242 arc_buf_hdr_t *b_hash_next; 243 arc_buf_t *b_buf; 244 uint32_t b_flags; 245 uint32_t b_datacnt; 246 247 kcondvar_t b_cv; 248 arc_callback_t *b_acb; 249 250 /* protected by arc state mutex */ 251 arc_state_t *b_state; 252 list_node_t b_arc_node; 253 254 /* updated atomically */ 255 clock_t b_arc_access; 256 257 /* self protecting */ 258 refcount_t b_refcnt; 259 }; 260 261 static arc_buf_t *arc_eviction_list; 262 static kmutex_t arc_eviction_mtx; 263 static arc_buf_hdr_t arc_eviction_hdr; 264 static void arc_get_data_buf(arc_buf_t *buf); 265 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 266 267 #define GHOST_STATE(state) \ 268 ((state) == arc.mru_ghost || (state) == arc.mfu_ghost) 269 270 /* 271 * Private ARC flags. These flags are private ARC only flags that will show up 272 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 273 * be passed in as arc_flags in things like arc_read. However, these flags 274 * should never be passed and should only be set by ARC code. When adding new 275 * public flags, make sure not to smash the private ones. 276 */ 277 278 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 279 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 280 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 281 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 282 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 283 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 284 285 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 286 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 287 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 288 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 289 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 290 291 /* 292 * Hash table routines 293 */ 294 295 #define HT_LOCK_PAD 64 296 297 struct ht_lock { 298 kmutex_t ht_lock; 299 #ifdef _KERNEL 300 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 301 #endif 302 }; 303 304 #define BUF_LOCKS 256 305 typedef struct buf_hash_table { 306 uint64_t ht_mask; 307 arc_buf_hdr_t **ht_table; 308 struct ht_lock ht_locks[BUF_LOCKS]; 309 } buf_hash_table_t; 310 311 static buf_hash_table_t buf_hash_table; 312 313 #define BUF_HASH_INDEX(spa, dva, birth) \ 314 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 315 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 316 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 317 #define HDR_LOCK(buf) \ 318 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 319 320 uint64_t zfs_crc64_table[256]; 321 322 static uint64_t 323 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 324 { 325 uintptr_t spav = (uintptr_t)spa; 326 uint8_t *vdva = (uint8_t *)dva; 327 uint64_t crc = -1ULL; 328 int i; 329 330 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 331 332 for (i = 0; i < sizeof (dva_t); i++) 333 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 334 335 crc ^= (spav>>8) ^ birth; 336 337 return (crc); 338 } 339 340 #define BUF_EMPTY(buf) \ 341 ((buf)->b_dva.dva_word[0] == 0 && \ 342 (buf)->b_dva.dva_word[1] == 0 && \ 343 (buf)->b_birth == 0) 344 345 #define BUF_EQUAL(spa, dva, birth, buf) \ 346 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 347 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 348 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 349 350 static arc_buf_hdr_t * 351 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 352 { 353 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 354 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 355 arc_buf_hdr_t *buf; 356 357 mutex_enter(hash_lock); 358 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 359 buf = buf->b_hash_next) { 360 if (BUF_EQUAL(spa, dva, birth, buf)) { 361 *lockp = hash_lock; 362 return (buf); 363 } 364 } 365 mutex_exit(hash_lock); 366 *lockp = NULL; 367 return (NULL); 368 } 369 370 /* 371 * Insert an entry into the hash table. If there is already an element 372 * equal to elem in the hash table, then the already existing element 373 * will be returned and the new element will not be inserted. 374 * Otherwise returns NULL. 375 */ 376 static arc_buf_hdr_t * 377 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 378 { 379 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 380 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 381 arc_buf_hdr_t *fbuf; 382 uint32_t max, i; 383 384 ASSERT(!HDR_IN_HASH_TABLE(buf)); 385 *lockp = hash_lock; 386 mutex_enter(hash_lock); 387 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 388 fbuf = fbuf->b_hash_next, i++) { 389 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 390 return (fbuf); 391 } 392 393 buf->b_hash_next = buf_hash_table.ht_table[idx]; 394 buf_hash_table.ht_table[idx] = buf; 395 buf->b_flags |= ARC_IN_HASH_TABLE; 396 397 /* collect some hash table performance data */ 398 if (i > 0) { 399 atomic_add_64(&arc.hash_collisions, 1); 400 if (i == 1) 401 atomic_add_64(&arc.hash_chains, 1); 402 } 403 while (i > (max = arc.hash_chain_max) && 404 max != atomic_cas_32(&arc.hash_chain_max, max, i)) { 405 continue; 406 } 407 atomic_add_64(&arc.hash_elements, 1); 408 if (arc.hash_elements > arc.hash_elements_max) 409 atomic_add_64(&arc.hash_elements_max, 1); 410 411 return (NULL); 412 } 413 414 static void 415 buf_hash_remove(arc_buf_hdr_t *buf) 416 { 417 arc_buf_hdr_t *fbuf, **bufp; 418 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 419 420 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 421 ASSERT(HDR_IN_HASH_TABLE(buf)); 422 423 bufp = &buf_hash_table.ht_table[idx]; 424 while ((fbuf = *bufp) != buf) { 425 ASSERT(fbuf != NULL); 426 bufp = &fbuf->b_hash_next; 427 } 428 *bufp = buf->b_hash_next; 429 buf->b_hash_next = NULL; 430 buf->b_flags &= ~ARC_IN_HASH_TABLE; 431 432 /* collect some hash table performance data */ 433 atomic_add_64(&arc.hash_elements, -1); 434 if (buf_hash_table.ht_table[idx] && 435 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 436 atomic_add_64(&arc.hash_chains, -1); 437 } 438 439 /* 440 * Global data structures and functions for the buf kmem cache. 441 */ 442 static kmem_cache_t *hdr_cache; 443 static kmem_cache_t *buf_cache; 444 445 static void 446 buf_fini(void) 447 { 448 int i; 449 450 kmem_free(buf_hash_table.ht_table, 451 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 452 for (i = 0; i < BUF_LOCKS; i++) 453 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 454 kmem_cache_destroy(hdr_cache); 455 kmem_cache_destroy(buf_cache); 456 } 457 458 /* 459 * Constructor callback - called when the cache is empty 460 * and a new buf is requested. 461 */ 462 /* ARGSUSED */ 463 static int 464 hdr_cons(void *vbuf, void *unused, int kmflag) 465 { 466 arc_buf_hdr_t *buf = vbuf; 467 468 bzero(buf, sizeof (arc_buf_hdr_t)); 469 refcount_create(&buf->b_refcnt); 470 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 471 return (0); 472 } 473 474 /* 475 * Destructor callback - called when a cached buf is 476 * no longer required. 477 */ 478 /* ARGSUSED */ 479 static void 480 hdr_dest(void *vbuf, void *unused) 481 { 482 arc_buf_hdr_t *buf = vbuf; 483 484 refcount_destroy(&buf->b_refcnt); 485 cv_destroy(&buf->b_cv); 486 } 487 488 static int arc_reclaim_needed(void); 489 void arc_kmem_reclaim(void); 490 491 /* 492 * Reclaim callback -- invoked when memory is low. 493 */ 494 /* ARGSUSED */ 495 static void 496 hdr_recl(void *unused) 497 { 498 dprintf("hdr_recl called\n"); 499 if (arc_reclaim_needed()) 500 arc_kmem_reclaim(); 501 } 502 503 static void 504 buf_init(void) 505 { 506 uint64_t *ct; 507 uint64_t hsize = 1ULL << 12; 508 int i, j; 509 510 /* 511 * The hash table is big enough to fill all of physical memory 512 * with an average 64K block size. The table will take up 513 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 514 */ 515 while (hsize * 65536 < physmem * PAGESIZE) 516 hsize <<= 1; 517 retry: 518 buf_hash_table.ht_mask = hsize - 1; 519 buf_hash_table.ht_table = 520 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 521 if (buf_hash_table.ht_table == NULL) { 522 ASSERT(hsize > (1ULL << 8)); 523 hsize >>= 1; 524 goto retry; 525 } 526 527 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 528 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 529 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 530 0, NULL, NULL, NULL, NULL, NULL, 0); 531 532 for (i = 0; i < 256; i++) 533 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 534 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 535 536 for (i = 0; i < BUF_LOCKS; i++) { 537 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 538 NULL, MUTEX_DEFAULT, NULL); 539 } 540 } 541 542 #define ARC_MINTIME (hz>>4) /* 62 ms */ 543 544 static void 545 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 546 { 547 ASSERT(MUTEX_HELD(hash_lock)); 548 549 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 550 (ab->b_state != arc.anon)) { 551 int delta = ab->b_size * ab->b_datacnt; 552 553 ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 554 mutex_enter(&ab->b_state->mtx); 555 ASSERT(list_link_active(&ab->b_arc_node)); 556 list_remove(&ab->b_state->list, ab); 557 if (GHOST_STATE(ab->b_state)) { 558 ASSERT3U(ab->b_datacnt, ==, 0); 559 ASSERT3P(ab->b_buf, ==, NULL); 560 delta = ab->b_size; 561 } 562 ASSERT(delta > 0); 563 ASSERT3U(ab->b_state->lsize, >=, delta); 564 atomic_add_64(&ab->b_state->lsize, -delta); 565 mutex_exit(&ab->b_state->mtx); 566 /* remove the prefetch flag is we get a reference */ 567 if (ab->b_flags & ARC_PREFETCH) 568 ab->b_flags &= ~ARC_PREFETCH; 569 } 570 } 571 572 static int 573 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 574 { 575 int cnt; 576 577 ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock)); 578 ASSERT(!GHOST_STATE(ab->b_state)); 579 580 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 581 (ab->b_state != arc.anon)) { 582 583 ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 584 mutex_enter(&ab->b_state->mtx); 585 ASSERT(!list_link_active(&ab->b_arc_node)); 586 list_insert_head(&ab->b_state->list, ab); 587 ASSERT(ab->b_datacnt > 0); 588 atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt); 589 ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize); 590 mutex_exit(&ab->b_state->mtx); 591 } 592 return (cnt); 593 } 594 595 /* 596 * Move the supplied buffer to the indicated state. The mutex 597 * for the buffer must be held by the caller. 598 */ 599 static void 600 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 601 { 602 arc_state_t *old_state = ab->b_state; 603 int refcnt = refcount_count(&ab->b_refcnt); 604 int from_delta, to_delta; 605 606 ASSERT(MUTEX_HELD(hash_lock)); 607 ASSERT(new_state != old_state); 608 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 609 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 610 611 from_delta = to_delta = ab->b_datacnt * ab->b_size; 612 613 /* 614 * If this buffer is evictable, transfer it from the 615 * old state list to the new state list. 616 */ 617 if (refcnt == 0) { 618 if (old_state != arc.anon) { 619 int use_mutex = !MUTEX_HELD(&old_state->mtx); 620 621 if (use_mutex) 622 mutex_enter(&old_state->mtx); 623 624 ASSERT(list_link_active(&ab->b_arc_node)); 625 list_remove(&old_state->list, ab); 626 627 /* 628 * If prefetching out of the ghost cache, 629 * we will have a non-null datacnt. 630 */ 631 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 632 /* ghost elements have a ghost size */ 633 ASSERT(ab->b_buf == NULL); 634 from_delta = ab->b_size; 635 } 636 ASSERT3U(old_state->lsize, >=, from_delta); 637 atomic_add_64(&old_state->lsize, -from_delta); 638 639 if (use_mutex) 640 mutex_exit(&old_state->mtx); 641 } 642 if (new_state != arc.anon) { 643 int use_mutex = !MUTEX_HELD(&new_state->mtx); 644 645 if (use_mutex) 646 mutex_enter(&new_state->mtx); 647 648 list_insert_head(&new_state->list, ab); 649 650 /* ghost elements have a ghost size */ 651 if (GHOST_STATE(new_state)) { 652 ASSERT(ab->b_datacnt == 0); 653 ASSERT(ab->b_buf == NULL); 654 to_delta = ab->b_size; 655 } 656 atomic_add_64(&new_state->lsize, to_delta); 657 ASSERT3U(new_state->size + to_delta, >=, 658 new_state->lsize); 659 660 if (use_mutex) 661 mutex_exit(&new_state->mtx); 662 } 663 } 664 665 ASSERT(!BUF_EMPTY(ab)); 666 if (new_state == arc.anon && old_state != arc.anon) { 667 buf_hash_remove(ab); 668 } 669 670 /* adjust state sizes */ 671 if (to_delta) 672 atomic_add_64(&new_state->size, to_delta); 673 if (from_delta) { 674 ASSERT3U(old_state->size, >=, from_delta); 675 atomic_add_64(&old_state->size, -from_delta); 676 } 677 ab->b_state = new_state; 678 } 679 680 arc_buf_t * 681 arc_buf_alloc(spa_t *spa, int size, void *tag) 682 { 683 arc_buf_hdr_t *hdr; 684 arc_buf_t *buf; 685 686 ASSERT3U(size, >, 0); 687 hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 688 ASSERT(BUF_EMPTY(hdr)); 689 hdr->b_size = size; 690 hdr->b_spa = spa; 691 hdr->b_state = arc.anon; 692 hdr->b_arc_access = 0; 693 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 694 buf->b_hdr = hdr; 695 buf->b_data = NULL; 696 buf->b_efunc = NULL; 697 buf->b_private = NULL; 698 buf->b_next = NULL; 699 hdr->b_buf = buf; 700 arc_get_data_buf(buf); 701 hdr->b_datacnt = 1; 702 hdr->b_flags = 0; 703 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 704 (void) refcount_add(&hdr->b_refcnt, tag); 705 706 return (buf); 707 } 708 709 static arc_buf_t * 710 arc_buf_clone(arc_buf_t *from) 711 { 712 arc_buf_t *buf; 713 arc_buf_hdr_t *hdr = from->b_hdr; 714 uint64_t size = hdr->b_size; 715 716 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 717 buf->b_hdr = hdr; 718 buf->b_data = NULL; 719 buf->b_efunc = NULL; 720 buf->b_private = NULL; 721 buf->b_next = hdr->b_buf; 722 hdr->b_buf = buf; 723 arc_get_data_buf(buf); 724 bcopy(from->b_data, buf->b_data, size); 725 hdr->b_datacnt += 1; 726 return (buf); 727 } 728 729 void 730 arc_buf_add_ref(arc_buf_t *buf, void* tag) 731 { 732 arc_buf_hdr_t *hdr; 733 kmutex_t *hash_lock; 734 735 /* 736 * Check to see if this buffer is currently being evicted via 737 * arc_do_user_evicts(). 738 */ 739 mutex_enter(&arc_eviction_mtx); 740 hdr = buf->b_hdr; 741 if (hdr == NULL) { 742 mutex_exit(&arc_eviction_mtx); 743 return; 744 } 745 hash_lock = HDR_LOCK(hdr); 746 mutex_exit(&arc_eviction_mtx); 747 748 mutex_enter(hash_lock); 749 if (buf->b_data == NULL) { 750 /* 751 * This buffer is evicted. 752 */ 753 mutex_exit(hash_lock); 754 return; 755 } 756 757 ASSERT(buf->b_hdr == hdr); 758 ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 759 add_reference(hdr, hash_lock, tag); 760 arc_access(hdr, hash_lock); 761 mutex_exit(hash_lock); 762 atomic_add_64(&arc.hits, 1); 763 } 764 765 static void 766 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 767 { 768 arc_buf_t **bufp; 769 770 /* free up data associated with the buf */ 771 if (buf->b_data) { 772 arc_state_t *state = buf->b_hdr->b_state; 773 uint64_t size = buf->b_hdr->b_size; 774 775 if (!recycle) { 776 zio_buf_free(buf->b_data, size); 777 atomic_add_64(&arc.size, -size); 778 } 779 if (list_link_active(&buf->b_hdr->b_arc_node)) { 780 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 781 ASSERT(state != arc.anon); 782 ASSERT3U(state->lsize, >=, size); 783 atomic_add_64(&state->lsize, -size); 784 } 785 ASSERT3U(state->size, >=, size); 786 atomic_add_64(&state->size, -size); 787 buf->b_data = NULL; 788 ASSERT(buf->b_hdr->b_datacnt > 0); 789 buf->b_hdr->b_datacnt -= 1; 790 } 791 792 /* only remove the buf if requested */ 793 if (!all) 794 return; 795 796 /* remove the buf from the hdr list */ 797 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 798 continue; 799 *bufp = buf->b_next; 800 801 ASSERT(buf->b_efunc == NULL); 802 803 /* clean up the buf */ 804 buf->b_hdr = NULL; 805 kmem_cache_free(buf_cache, buf); 806 } 807 808 static void 809 arc_hdr_destroy(arc_buf_hdr_t *hdr) 810 { 811 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 812 ASSERT3P(hdr->b_state, ==, arc.anon); 813 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 814 815 if (!BUF_EMPTY(hdr)) { 816 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 817 bzero(&hdr->b_dva, sizeof (dva_t)); 818 hdr->b_birth = 0; 819 hdr->b_cksum0 = 0; 820 } 821 while (hdr->b_buf) { 822 arc_buf_t *buf = hdr->b_buf; 823 824 if (buf->b_efunc) { 825 mutex_enter(&arc_eviction_mtx); 826 ASSERT(buf->b_hdr != NULL); 827 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 828 hdr->b_buf = buf->b_next; 829 buf->b_hdr = &arc_eviction_hdr; 830 buf->b_next = arc_eviction_list; 831 arc_eviction_list = buf; 832 mutex_exit(&arc_eviction_mtx); 833 } else { 834 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 835 } 836 } 837 838 ASSERT(!list_link_active(&hdr->b_arc_node)); 839 ASSERT3P(hdr->b_hash_next, ==, NULL); 840 ASSERT3P(hdr->b_acb, ==, NULL); 841 kmem_cache_free(hdr_cache, hdr); 842 } 843 844 void 845 arc_buf_free(arc_buf_t *buf, void *tag) 846 { 847 arc_buf_hdr_t *hdr = buf->b_hdr; 848 int hashed = hdr->b_state != arc.anon; 849 850 ASSERT(buf->b_efunc == NULL); 851 ASSERT(buf->b_data != NULL); 852 853 if (hashed) { 854 kmutex_t *hash_lock = HDR_LOCK(hdr); 855 856 mutex_enter(hash_lock); 857 (void) remove_reference(hdr, hash_lock, tag); 858 if (hdr->b_datacnt > 1) 859 arc_buf_destroy(buf, FALSE, TRUE); 860 else 861 hdr->b_flags |= ARC_BUF_AVAILABLE; 862 mutex_exit(hash_lock); 863 } else if (HDR_IO_IN_PROGRESS(hdr)) { 864 int destroy_hdr; 865 /* 866 * We are in the middle of an async write. Don't destroy 867 * this buffer unless the write completes before we finish 868 * decrementing the reference count. 869 */ 870 mutex_enter(&arc_eviction_mtx); 871 (void) remove_reference(hdr, NULL, tag); 872 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 873 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 874 mutex_exit(&arc_eviction_mtx); 875 if (destroy_hdr) 876 arc_hdr_destroy(hdr); 877 } else { 878 if (remove_reference(hdr, NULL, tag) > 0) { 879 ASSERT(HDR_IO_ERROR(hdr)); 880 arc_buf_destroy(buf, FALSE, TRUE); 881 } else { 882 arc_hdr_destroy(hdr); 883 } 884 } 885 } 886 887 int 888 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 889 { 890 arc_buf_hdr_t *hdr = buf->b_hdr; 891 kmutex_t *hash_lock = HDR_LOCK(hdr); 892 int no_callback = (buf->b_efunc == NULL); 893 894 if (hdr->b_state == arc.anon) { 895 arc_buf_free(buf, tag); 896 return (no_callback); 897 } 898 899 mutex_enter(hash_lock); 900 ASSERT(hdr->b_state != arc.anon); 901 ASSERT(buf->b_data != NULL); 902 903 (void) remove_reference(hdr, hash_lock, tag); 904 if (hdr->b_datacnt > 1) { 905 if (no_callback) 906 arc_buf_destroy(buf, FALSE, TRUE); 907 } else if (no_callback) { 908 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 909 hdr->b_flags |= ARC_BUF_AVAILABLE; 910 } 911 ASSERT(no_callback || hdr->b_datacnt > 1 || 912 refcount_is_zero(&hdr->b_refcnt)); 913 mutex_exit(hash_lock); 914 return (no_callback); 915 } 916 917 int 918 arc_buf_size(arc_buf_t *buf) 919 { 920 return (buf->b_hdr->b_size); 921 } 922 923 /* 924 * Evict buffers from list until we've removed the specified number of 925 * bytes. Move the removed buffers to the appropriate evict state. 926 * If the recycle flag is set, then attempt to "recycle" a buffer: 927 * - look for a buffer to evict that is `bytes' long. 928 * - return the data block from this buffer rather than freeing it. 929 * This flag is used by callers that are trying to make space for a 930 * new buffer in a full arc cache. 931 */ 932 static void * 933 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle) 934 { 935 arc_state_t *evicted_state; 936 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 937 arc_buf_hdr_t *ab, *ab_prev = NULL; 938 kmutex_t *hash_lock; 939 boolean_t have_lock; 940 void *stolen = NULL; 941 942 ASSERT(state == arc.mru || state == arc.mfu); 943 944 evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 945 946 mutex_enter(&state->mtx); 947 mutex_enter(&evicted_state->mtx); 948 949 for (ab = list_tail(&state->list); ab; ab = ab_prev) { 950 ab_prev = list_prev(&state->list, ab); 951 /* prefetch buffers have a minimum lifespan */ 952 if (HDR_IO_IN_PROGRESS(ab) || 953 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 954 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 955 skipped++; 956 continue; 957 } 958 /* "lookahead" for better eviction candidate */ 959 if (recycle && ab->b_size != bytes && 960 ab_prev && ab_prev->b_size == bytes) 961 continue; 962 hash_lock = HDR_LOCK(ab); 963 have_lock = MUTEX_HELD(hash_lock); 964 if (have_lock || mutex_tryenter(hash_lock)) { 965 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 966 ASSERT(ab->b_datacnt > 0); 967 while (ab->b_buf) { 968 arc_buf_t *buf = ab->b_buf; 969 if (buf->b_data) { 970 bytes_evicted += ab->b_size; 971 if (recycle && ab->b_size == bytes) { 972 stolen = buf->b_data; 973 recycle = FALSE; 974 } 975 } 976 if (buf->b_efunc) { 977 mutex_enter(&arc_eviction_mtx); 978 arc_buf_destroy(buf, 979 buf->b_data == stolen, FALSE); 980 ab->b_buf = buf->b_next; 981 buf->b_hdr = &arc_eviction_hdr; 982 buf->b_next = arc_eviction_list; 983 arc_eviction_list = buf; 984 mutex_exit(&arc_eviction_mtx); 985 } else { 986 arc_buf_destroy(buf, 987 buf->b_data == stolen, TRUE); 988 } 989 } 990 ASSERT(ab->b_datacnt == 0); 991 arc_change_state(evicted_state, ab, hash_lock); 992 ASSERT(HDR_IN_HASH_TABLE(ab)); 993 ab->b_flags = ARC_IN_HASH_TABLE; 994 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 995 if (!have_lock) 996 mutex_exit(hash_lock); 997 if (bytes >= 0 && bytes_evicted >= bytes) 998 break; 999 } else { 1000 missed += 1; 1001 } 1002 } 1003 mutex_exit(&evicted_state->mtx); 1004 mutex_exit(&state->mtx); 1005 1006 if (bytes_evicted < bytes) 1007 dprintf("only evicted %lld bytes from %x", 1008 (longlong_t)bytes_evicted, state); 1009 1010 if (skipped) 1011 atomic_add_64(&arc.evict_skip, skipped); 1012 if (missed) 1013 atomic_add_64(&arc.mutex_miss, missed); 1014 return (stolen); 1015 } 1016 1017 /* 1018 * Remove buffers from list until we've removed the specified number of 1019 * bytes. Destroy the buffers that are removed. 1020 */ 1021 static void 1022 arc_evict_ghost(arc_state_t *state, int64_t bytes) 1023 { 1024 arc_buf_hdr_t *ab, *ab_prev; 1025 kmutex_t *hash_lock; 1026 uint64_t bytes_deleted = 0; 1027 uint_t bufs_skipped = 0; 1028 1029 ASSERT(GHOST_STATE(state)); 1030 top: 1031 mutex_enter(&state->mtx); 1032 for (ab = list_tail(&state->list); ab; ab = ab_prev) { 1033 ab_prev = list_prev(&state->list, ab); 1034 hash_lock = HDR_LOCK(ab); 1035 if (mutex_tryenter(hash_lock)) { 1036 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1037 ASSERT(ab->b_buf == NULL); 1038 arc_change_state(arc.anon, ab, hash_lock); 1039 mutex_exit(hash_lock); 1040 atomic_add_64(&arc.deleted, 1); 1041 bytes_deleted += ab->b_size; 1042 arc_hdr_destroy(ab); 1043 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1044 if (bytes >= 0 && bytes_deleted >= bytes) 1045 break; 1046 } else { 1047 if (bytes < 0) { 1048 mutex_exit(&state->mtx); 1049 mutex_enter(hash_lock); 1050 mutex_exit(hash_lock); 1051 goto top; 1052 } 1053 bufs_skipped += 1; 1054 } 1055 } 1056 mutex_exit(&state->mtx); 1057 1058 if (bufs_skipped) { 1059 atomic_add_64(&arc.mutex_miss, bufs_skipped); 1060 ASSERT(bytes >= 0); 1061 } 1062 1063 if (bytes_deleted < bytes) 1064 dprintf("only deleted %lld bytes from %p", 1065 (longlong_t)bytes_deleted, state); 1066 } 1067 1068 static void 1069 arc_adjust(void) 1070 { 1071 int64_t top_sz, mru_over, arc_over; 1072 1073 top_sz = arc.anon->size + arc.mru->size; 1074 1075 if (top_sz > arc.p && arc.mru->lsize > 0) { 1076 int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p); 1077 (void) arc_evict(arc.mru, toevict, FALSE); 1078 top_sz = arc.anon->size + arc.mru->size; 1079 } 1080 1081 mru_over = top_sz + arc.mru_ghost->size - arc.c; 1082 1083 if (mru_over > 0) { 1084 if (arc.mru_ghost->lsize > 0) { 1085 int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over); 1086 arc_evict_ghost(arc.mru_ghost, todelete); 1087 } 1088 } 1089 1090 if ((arc_over = arc.size - arc.c) > 0) { 1091 int64_t tbl_over; 1092 1093 if (arc.mfu->lsize > 0) { 1094 int64_t toevict = MIN(arc.mfu->lsize, arc_over); 1095 (void) arc_evict(arc.mfu, toevict, FALSE); 1096 } 1097 1098 tbl_over = arc.size + arc.mru_ghost->lsize + 1099 arc.mfu_ghost->lsize - arc.c*2; 1100 1101 if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) { 1102 int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over); 1103 arc_evict_ghost(arc.mfu_ghost, todelete); 1104 } 1105 } 1106 } 1107 1108 static void 1109 arc_do_user_evicts(void) 1110 { 1111 mutex_enter(&arc_eviction_mtx); 1112 while (arc_eviction_list != NULL) { 1113 arc_buf_t *buf = arc_eviction_list; 1114 arc_eviction_list = buf->b_next; 1115 buf->b_hdr = NULL; 1116 mutex_exit(&arc_eviction_mtx); 1117 1118 if (buf->b_efunc != NULL) 1119 VERIFY(buf->b_efunc(buf) == 0); 1120 1121 buf->b_efunc = NULL; 1122 buf->b_private = NULL; 1123 kmem_cache_free(buf_cache, buf); 1124 mutex_enter(&arc_eviction_mtx); 1125 } 1126 mutex_exit(&arc_eviction_mtx); 1127 } 1128 1129 /* 1130 * Flush all *evictable* data from the cache. 1131 * NOTE: this will not touch "active" (i.e. referenced) data. 1132 */ 1133 void 1134 arc_flush(void) 1135 { 1136 while (list_head(&arc.mru->list)) 1137 (void) arc_evict(arc.mru, -1, FALSE); 1138 while (list_head(&arc.mfu->list)) 1139 (void) arc_evict(arc.mfu, -1, FALSE); 1140 1141 arc_evict_ghost(arc.mru_ghost, -1); 1142 arc_evict_ghost(arc.mfu_ghost, -1); 1143 1144 mutex_enter(&arc_reclaim_thr_lock); 1145 arc_do_user_evicts(); 1146 mutex_exit(&arc_reclaim_thr_lock); 1147 ASSERT(arc_eviction_list == NULL); 1148 } 1149 1150 int arc_kmem_reclaim_shift = 5; /* log2(fraction of arc to reclaim) */ 1151 1152 void 1153 arc_kmem_reclaim(void) 1154 { 1155 uint64_t to_free; 1156 1157 /* 1158 * We need arc_reclaim_lock because we don't want multiple 1159 * threads trying to reclaim concurrently. 1160 */ 1161 1162 /* 1163 * umem calls the reclaim func when we destroy the buf cache, 1164 * which is after we do arc_fini(). So we set a flag to prevent 1165 * accessing the destroyed mutexes and lists. 1166 */ 1167 if (arc_dead) 1168 return; 1169 1170 if (arc.c <= arc.c_min) 1171 return; 1172 1173 mutex_enter(&arc_reclaim_lock); 1174 1175 #ifdef _KERNEL 1176 to_free = MAX(arc.c >> arc_kmem_reclaim_shift, ptob(needfree)); 1177 #else 1178 to_free = arc.c >> arc_kmem_reclaim_shift; 1179 #endif 1180 if (arc.c > to_free) 1181 atomic_add_64(&arc.c, -to_free); 1182 else 1183 arc.c = arc.c_min; 1184 1185 atomic_add_64(&arc.p, -(arc.p >> arc_kmem_reclaim_shift)); 1186 if (arc.c > arc.size) 1187 arc.c = arc.size; 1188 if (arc.c < arc.c_min) 1189 arc.c = arc.c_min; 1190 if (arc.p > arc.c) 1191 arc.p = (arc.c >> 1); 1192 ASSERT((int64_t)arc.p >= 0); 1193 1194 arc_adjust(); 1195 1196 mutex_exit(&arc_reclaim_lock); 1197 } 1198 1199 static int 1200 arc_reclaim_needed(void) 1201 { 1202 uint64_t extra; 1203 1204 #ifdef _KERNEL 1205 1206 if (needfree) 1207 return (1); 1208 1209 /* 1210 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1211 */ 1212 extra = desfree; 1213 1214 /* 1215 * check that we're out of range of the pageout scanner. It starts to 1216 * schedule paging if freemem is less than lotsfree and needfree. 1217 * lotsfree is the high-water mark for pageout, and needfree is the 1218 * number of needed free pages. We add extra pages here to make sure 1219 * the scanner doesn't start up while we're freeing memory. 1220 */ 1221 if (freemem < lotsfree + needfree + extra) 1222 return (1); 1223 1224 /* 1225 * check to make sure that swapfs has enough space so that anon 1226 * reservations can still succeeed. anon_resvmem() checks that the 1227 * availrmem is greater than swapfs_minfree, and the number of reserved 1228 * swap pages. We also add a bit of extra here just to prevent 1229 * circumstances from getting really dire. 1230 */ 1231 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1232 return (1); 1233 1234 #if defined(__i386) 1235 /* 1236 * If we're on an i386 platform, it's possible that we'll exhaust the 1237 * kernel heap space before we ever run out of available physical 1238 * memory. Most checks of the size of the heap_area compare against 1239 * tune.t_minarmem, which is the minimum available real memory that we 1240 * can have in the system. However, this is generally fixed at 25 pages 1241 * which is so low that it's useless. In this comparison, we seek to 1242 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1243 * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1244 * free) 1245 */ 1246 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1247 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1248 return (1); 1249 #endif 1250 1251 #else 1252 if (spa_get_random(100) == 0) 1253 return (1); 1254 #endif 1255 return (0); 1256 } 1257 1258 static void 1259 arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1260 { 1261 size_t i; 1262 kmem_cache_t *prev_cache = NULL; 1263 extern kmem_cache_t *zio_buf_cache[]; 1264 1265 #ifdef _KERNEL 1266 /* 1267 * First purge some DNLC entries, in case the DNLC is using 1268 * up too much memory. 1269 */ 1270 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1271 1272 #if defined(__i386) 1273 /* 1274 * Reclaim unused memory from all kmem caches. 1275 */ 1276 kmem_reap(); 1277 #endif 1278 #endif 1279 1280 /* 1281 * An agressive reclamation will shrink the cache size as well as 1282 * reap free buffers from the arc kmem caches. 1283 */ 1284 if (strat == ARC_RECLAIM_AGGR) 1285 arc_kmem_reclaim(); 1286 1287 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1288 if (zio_buf_cache[i] != prev_cache) { 1289 prev_cache = zio_buf_cache[i]; 1290 kmem_cache_reap_now(zio_buf_cache[i]); 1291 } 1292 } 1293 kmem_cache_reap_now(buf_cache); 1294 kmem_cache_reap_now(hdr_cache); 1295 } 1296 1297 static void 1298 arc_reclaim_thread(void) 1299 { 1300 clock_t growtime = 0; 1301 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1302 callb_cpr_t cpr; 1303 1304 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1305 1306 mutex_enter(&arc_reclaim_thr_lock); 1307 while (arc_thread_exit == 0) { 1308 if (arc_reclaim_needed()) { 1309 1310 if (arc.no_grow) { 1311 if (last_reclaim == ARC_RECLAIM_CONS) { 1312 last_reclaim = ARC_RECLAIM_AGGR; 1313 } else { 1314 last_reclaim = ARC_RECLAIM_CONS; 1315 } 1316 } else { 1317 arc.no_grow = TRUE; 1318 last_reclaim = ARC_RECLAIM_AGGR; 1319 membar_producer(); 1320 } 1321 1322 /* reset the growth delay for every reclaim */ 1323 growtime = lbolt + (arc_grow_retry * hz); 1324 ASSERT(growtime > 0); 1325 1326 arc_kmem_reap_now(last_reclaim); 1327 1328 } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1329 arc.no_grow = FALSE; 1330 } 1331 1332 if (arc_eviction_list != NULL) 1333 arc_do_user_evicts(); 1334 1335 /* block until needed, or one second, whichever is shorter */ 1336 CALLB_CPR_SAFE_BEGIN(&cpr); 1337 (void) cv_timedwait(&arc_reclaim_thr_cv, 1338 &arc_reclaim_thr_lock, (lbolt + hz)); 1339 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1340 } 1341 1342 arc_thread_exit = 0; 1343 cv_broadcast(&arc_reclaim_thr_cv); 1344 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1345 thread_exit(); 1346 } 1347 1348 /* 1349 * Adapt arc info given the number of bytes we are trying to add and 1350 * the state that we are comming from. This function is only called 1351 * when we are adding new content to the cache. 1352 */ 1353 static void 1354 arc_adapt(int bytes, arc_state_t *state) 1355 { 1356 int mult; 1357 1358 ASSERT(bytes > 0); 1359 /* 1360 * Adapt the target size of the MRU list: 1361 * - if we just hit in the MRU ghost list, then increase 1362 * the target size of the MRU list. 1363 * - if we just hit in the MFU ghost list, then increase 1364 * the target size of the MFU list by decreasing the 1365 * target size of the MRU list. 1366 */ 1367 if (state == arc.mru_ghost) { 1368 mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ? 1369 1 : (arc.mfu_ghost->size/arc.mru_ghost->size)); 1370 1371 arc.p = MIN(arc.c, arc.p + bytes * mult); 1372 } else if (state == arc.mfu_ghost) { 1373 mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ? 1374 1 : (arc.mru_ghost->size/arc.mfu_ghost->size)); 1375 1376 arc.p = MAX(0, (int64_t)arc.p - bytes * mult); 1377 } 1378 ASSERT((int64_t)arc.p >= 0); 1379 1380 if (arc_reclaim_needed()) { 1381 cv_signal(&arc_reclaim_thr_cv); 1382 return; 1383 } 1384 1385 if (arc.no_grow) 1386 return; 1387 1388 if (arc.c >= arc.c_max) 1389 return; 1390 1391 /* 1392 * If we're within (2 * maxblocksize) bytes of the target 1393 * cache size, increment the target cache size 1394 */ 1395 if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1396 atomic_add_64(&arc.c, (int64_t)bytes); 1397 if (arc.c > arc.c_max) 1398 arc.c = arc.c_max; 1399 else if (state == arc.anon) 1400 atomic_add_64(&arc.p, (int64_t)bytes); 1401 if (arc.p > arc.c) 1402 arc.p = arc.c; 1403 } 1404 ASSERT((int64_t)arc.p >= 0); 1405 } 1406 1407 /* 1408 * Check if the cache has reached its limits and eviction is required 1409 * prior to insert. 1410 */ 1411 static int 1412 arc_evict_needed() 1413 { 1414 if (arc_reclaim_needed()) 1415 return (1); 1416 1417 return (arc.size > arc.c); 1418 } 1419 1420 /* 1421 * The buffer, supplied as the first argument, needs a data block. 1422 * So, if we are at cache max, determine which cache should be victimized. 1423 * We have the following cases: 1424 * 1425 * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) -> 1426 * In this situation if we're out of space, but the resident size of the MFU is 1427 * under the limit, victimize the MFU cache to satisfy this insertion request. 1428 * 1429 * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) -> 1430 * Here, we've used up all of the available space for the MRU, so we need to 1431 * evict from our own cache instead. Evict from the set of resident MRU 1432 * entries. 1433 * 1434 * 3. Insert for MFU (c - p) > sizeof(arc.mfu) -> 1435 * c minus p represents the MFU space in the cache, since p is the size of the 1436 * cache that is dedicated to the MRU. In this situation there's still space on 1437 * the MFU side, so the MRU side needs to be victimized. 1438 * 1439 * 4. Insert for MFU (c - p) < sizeof(arc.mfu) -> 1440 * MFU's resident set is consuming more space than it has been allotted. In 1441 * this situation, we must victimize our own cache, the MFU, for this insertion. 1442 */ 1443 static void 1444 arc_get_data_buf(arc_buf_t *buf) 1445 { 1446 arc_state_t *state = buf->b_hdr->b_state; 1447 uint64_t size = buf->b_hdr->b_size; 1448 1449 arc_adapt(size, state); 1450 1451 /* 1452 * We have not yet reached cache maximum size, 1453 * just allocate a new buffer. 1454 */ 1455 if (!arc_evict_needed()) { 1456 buf->b_data = zio_buf_alloc(size); 1457 atomic_add_64(&arc.size, size); 1458 goto out; 1459 } 1460 1461 /* 1462 * If we are prefetching from the mfu ghost list, this buffer 1463 * will end up on the mru list; so steal space from there. 1464 */ 1465 if (state == arc.mfu_ghost) 1466 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu; 1467 else if (state == arc.mru_ghost) 1468 state = arc.mru; 1469 1470 if (state == arc.mru || state == arc.anon) { 1471 uint64_t mru_used = arc.anon->size + arc.mru->size; 1472 state = (arc.p > mru_used) ? arc.mfu : arc.mru; 1473 } else { 1474 /* MFU cases */ 1475 uint64_t mfu_space = arc.c - arc.p; 1476 state = (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu; 1477 } 1478 if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) { 1479 buf->b_data = zio_buf_alloc(size); 1480 atomic_add_64(&arc.size, size); 1481 atomic_add_64(&arc.recycle_miss, 1); 1482 } 1483 ASSERT(buf->b_data != NULL); 1484 out: 1485 /* 1486 * Update the state size. Note that ghost states have a 1487 * "ghost size" and so don't need to be updated. 1488 */ 1489 if (!GHOST_STATE(buf->b_hdr->b_state)) { 1490 arc_buf_hdr_t *hdr = buf->b_hdr; 1491 1492 atomic_add_64(&hdr->b_state->size, size); 1493 if (list_link_active(&hdr->b_arc_node)) { 1494 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1495 atomic_add_64(&hdr->b_state->lsize, size); 1496 } 1497 } 1498 } 1499 1500 /* 1501 * This routine is called whenever a buffer is accessed. 1502 * NOTE: the hash lock is dropped in this function. 1503 */ 1504 static void 1505 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1506 { 1507 ASSERT(MUTEX_HELD(hash_lock)); 1508 1509 if (buf->b_state == arc.anon) { 1510 /* 1511 * This buffer is not in the cache, and does not 1512 * appear in our "ghost" list. Add the new buffer 1513 * to the MRU state. 1514 */ 1515 1516 ASSERT(buf->b_arc_access == 0); 1517 buf->b_arc_access = lbolt; 1518 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1519 arc_change_state(arc.mru, buf, hash_lock); 1520 1521 } else if (buf->b_state == arc.mru) { 1522 /* 1523 * If this buffer is here because of a prefetch, then either: 1524 * - clear the flag if this is a "referencing" read 1525 * (any subsequent access will bump this into the MFU state). 1526 * or 1527 * - move the buffer to the head of the list if this is 1528 * another prefetch (to make it less likely to be evicted). 1529 */ 1530 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1531 if (refcount_count(&buf->b_refcnt) == 0) { 1532 ASSERT(list_link_active(&buf->b_arc_node)); 1533 mutex_enter(&arc.mru->mtx); 1534 list_remove(&arc.mru->list, buf); 1535 list_insert_head(&arc.mru->list, buf); 1536 mutex_exit(&arc.mru->mtx); 1537 } else { 1538 buf->b_flags &= ~ARC_PREFETCH; 1539 atomic_add_64(&arc.mru->hits, 1); 1540 } 1541 buf->b_arc_access = lbolt; 1542 return; 1543 } 1544 1545 /* 1546 * This buffer has been "accessed" only once so far, 1547 * but it is still in the cache. Move it to the MFU 1548 * state. 1549 */ 1550 if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1551 /* 1552 * More than 125ms have passed since we 1553 * instantiated this buffer. Move it to the 1554 * most frequently used state. 1555 */ 1556 buf->b_arc_access = lbolt; 1557 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1558 arc_change_state(arc.mfu, buf, hash_lock); 1559 } 1560 atomic_add_64(&arc.mru->hits, 1); 1561 } else if (buf->b_state == arc.mru_ghost) { 1562 arc_state_t *new_state; 1563 /* 1564 * This buffer has been "accessed" recently, but 1565 * was evicted from the cache. Move it to the 1566 * MFU state. 1567 */ 1568 1569 if (buf->b_flags & ARC_PREFETCH) { 1570 new_state = arc.mru; 1571 if (refcount_count(&buf->b_refcnt) > 0) 1572 buf->b_flags &= ~ARC_PREFETCH; 1573 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1574 } else { 1575 new_state = arc.mfu; 1576 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1577 } 1578 1579 buf->b_arc_access = lbolt; 1580 arc_change_state(new_state, buf, hash_lock); 1581 1582 atomic_add_64(&arc.mru_ghost->hits, 1); 1583 } else if (buf->b_state == arc.mfu) { 1584 /* 1585 * This buffer has been accessed more than once and is 1586 * still in the cache. Keep it in the MFU state. 1587 * 1588 * NOTE: an add_reference() that occurred when we did 1589 * the arc_read() will have kicked this off the list. 1590 * If it was a prefetch, we will explicitly move it to 1591 * the head of the list now. 1592 */ 1593 if ((buf->b_flags & ARC_PREFETCH) != 0) { 1594 ASSERT(refcount_count(&buf->b_refcnt) == 0); 1595 ASSERT(list_link_active(&buf->b_arc_node)); 1596 mutex_enter(&arc.mfu->mtx); 1597 list_remove(&arc.mfu->list, buf); 1598 list_insert_head(&arc.mfu->list, buf); 1599 mutex_exit(&arc.mfu->mtx); 1600 } 1601 atomic_add_64(&arc.mfu->hits, 1); 1602 buf->b_arc_access = lbolt; 1603 } else if (buf->b_state == arc.mfu_ghost) { 1604 arc_state_t *new_state = arc.mfu; 1605 /* 1606 * This buffer has been accessed more than once but has 1607 * been evicted from the cache. Move it back to the 1608 * MFU state. 1609 */ 1610 1611 if (buf->b_flags & ARC_PREFETCH) { 1612 /* 1613 * This is a prefetch access... 1614 * move this block back to the MRU state. 1615 */ 1616 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1617 new_state = arc.mru; 1618 } 1619 1620 buf->b_arc_access = lbolt; 1621 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1622 arc_change_state(new_state, buf, hash_lock); 1623 1624 atomic_add_64(&arc.mfu_ghost->hits, 1); 1625 } else { 1626 ASSERT(!"invalid arc state"); 1627 } 1628 } 1629 1630 /* a generic arc_done_func_t which you can use */ 1631 /* ARGSUSED */ 1632 void 1633 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1634 { 1635 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1636 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1637 } 1638 1639 /* a generic arc_done_func_t which you can use */ 1640 void 1641 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1642 { 1643 arc_buf_t **bufp = arg; 1644 if (zio && zio->io_error) { 1645 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1646 *bufp = NULL; 1647 } else { 1648 *bufp = buf; 1649 } 1650 } 1651 1652 static void 1653 arc_read_done(zio_t *zio) 1654 { 1655 arc_buf_hdr_t *hdr, *found; 1656 arc_buf_t *buf; 1657 arc_buf_t *abuf; /* buffer we're assigning to callback */ 1658 kmutex_t *hash_lock; 1659 arc_callback_t *callback_list, *acb; 1660 int freeable = FALSE; 1661 1662 buf = zio->io_private; 1663 hdr = buf->b_hdr; 1664 1665 /* 1666 * The hdr was inserted into hash-table and removed from lists 1667 * prior to starting I/O. We should find this header, since 1668 * it's in the hash table, and it should be legit since it's 1669 * not possible to evict it during the I/O. The only possible 1670 * reason for it not to be found is if we were freed during the 1671 * read. 1672 */ 1673 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1674 &hash_lock); 1675 1676 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1677 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1678 1679 /* byteswap if necessary */ 1680 callback_list = hdr->b_acb; 1681 ASSERT(callback_list != NULL); 1682 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1683 callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1684 1685 /* create copies of the data buffer for the callers */ 1686 abuf = buf; 1687 for (acb = callback_list; acb; acb = acb->acb_next) { 1688 if (acb->acb_done) { 1689 if (abuf == NULL) 1690 abuf = arc_buf_clone(buf); 1691 acb->acb_buf = abuf; 1692 abuf = NULL; 1693 } 1694 } 1695 hdr->b_acb = NULL; 1696 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1697 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1698 if (abuf == buf) 1699 hdr->b_flags |= ARC_BUF_AVAILABLE; 1700 1701 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1702 1703 if (zio->io_error != 0) { 1704 hdr->b_flags |= ARC_IO_ERROR; 1705 if (hdr->b_state != arc.anon) 1706 arc_change_state(arc.anon, hdr, hash_lock); 1707 if (HDR_IN_HASH_TABLE(hdr)) 1708 buf_hash_remove(hdr); 1709 freeable = refcount_is_zero(&hdr->b_refcnt); 1710 /* convert checksum errors into IO errors */ 1711 if (zio->io_error == ECKSUM) 1712 zio->io_error = EIO; 1713 } 1714 1715 /* 1716 * Broadcast before we drop the hash_lock to avoid the possibility 1717 * that the hdr (and hence the cv) might be freed before we get to 1718 * the cv_broadcast(). 1719 */ 1720 cv_broadcast(&hdr->b_cv); 1721 1722 if (hash_lock) { 1723 /* 1724 * Only call arc_access on anonymous buffers. This is because 1725 * if we've issued an I/O for an evicted buffer, we've already 1726 * called arc_access (to prevent any simultaneous readers from 1727 * getting confused). 1728 */ 1729 if (zio->io_error == 0 && hdr->b_state == arc.anon) 1730 arc_access(hdr, hash_lock); 1731 mutex_exit(hash_lock); 1732 } else { 1733 /* 1734 * This block was freed while we waited for the read to 1735 * complete. It has been removed from the hash table and 1736 * moved to the anonymous state (so that it won't show up 1737 * in the cache). 1738 */ 1739 ASSERT3P(hdr->b_state, ==, arc.anon); 1740 freeable = refcount_is_zero(&hdr->b_refcnt); 1741 } 1742 1743 /* execute each callback and free its structure */ 1744 while ((acb = callback_list) != NULL) { 1745 if (acb->acb_done) 1746 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1747 1748 if (acb->acb_zio_dummy != NULL) { 1749 acb->acb_zio_dummy->io_error = zio->io_error; 1750 zio_nowait(acb->acb_zio_dummy); 1751 } 1752 1753 callback_list = acb->acb_next; 1754 kmem_free(acb, sizeof (arc_callback_t)); 1755 } 1756 1757 if (freeable) 1758 arc_hdr_destroy(hdr); 1759 } 1760 1761 /* 1762 * "Read" the block block at the specified DVA (in bp) via the 1763 * cache. If the block is found in the cache, invoke the provided 1764 * callback immediately and return. Note that the `zio' parameter 1765 * in the callback will be NULL in this case, since no IO was 1766 * required. If the block is not in the cache pass the read request 1767 * on to the spa with a substitute callback function, so that the 1768 * requested block will be added to the cache. 1769 * 1770 * If a read request arrives for a block that has a read in-progress, 1771 * either wait for the in-progress read to complete (and return the 1772 * results); or, if this is a read with a "done" func, add a record 1773 * to the read to invoke the "done" func when the read completes, 1774 * and return; or just return. 1775 * 1776 * arc_read_done() will invoke all the requested "done" functions 1777 * for readers of this block. 1778 */ 1779 int 1780 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 1781 arc_done_func_t *done, void *private, int priority, int flags, 1782 uint32_t *arc_flags, zbookmark_t *zb) 1783 { 1784 arc_buf_hdr_t *hdr; 1785 arc_buf_t *buf; 1786 kmutex_t *hash_lock; 1787 zio_t *rzio; 1788 1789 top: 1790 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 1791 if (hdr && hdr->b_datacnt > 0) { 1792 1793 *arc_flags |= ARC_CACHED; 1794 1795 if (HDR_IO_IN_PROGRESS(hdr)) { 1796 1797 if (*arc_flags & ARC_WAIT) { 1798 cv_wait(&hdr->b_cv, hash_lock); 1799 mutex_exit(hash_lock); 1800 goto top; 1801 } 1802 ASSERT(*arc_flags & ARC_NOWAIT); 1803 1804 if (done) { 1805 arc_callback_t *acb = NULL; 1806 1807 acb = kmem_zalloc(sizeof (arc_callback_t), 1808 KM_SLEEP); 1809 acb->acb_done = done; 1810 acb->acb_private = private; 1811 acb->acb_byteswap = swap; 1812 if (pio != NULL) 1813 acb->acb_zio_dummy = zio_null(pio, 1814 spa, NULL, NULL, flags); 1815 1816 ASSERT(acb->acb_done != NULL); 1817 acb->acb_next = hdr->b_acb; 1818 hdr->b_acb = acb; 1819 add_reference(hdr, hash_lock, private); 1820 mutex_exit(hash_lock); 1821 return (0); 1822 } 1823 mutex_exit(hash_lock); 1824 return (0); 1825 } 1826 1827 ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 1828 1829 if (done) { 1830 add_reference(hdr, hash_lock, private); 1831 /* 1832 * If this block is already in use, create a new 1833 * copy of the data so that we will be guaranteed 1834 * that arc_release() will always succeed. 1835 */ 1836 buf = hdr->b_buf; 1837 ASSERT(buf); 1838 ASSERT(buf->b_data); 1839 if (HDR_BUF_AVAILABLE(hdr)) { 1840 ASSERT(buf->b_efunc == NULL); 1841 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 1842 } else { 1843 buf = arc_buf_clone(buf); 1844 } 1845 } else if (*arc_flags & ARC_PREFETCH && 1846 refcount_count(&hdr->b_refcnt) == 0) { 1847 hdr->b_flags |= ARC_PREFETCH; 1848 } 1849 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1850 arc_access(hdr, hash_lock); 1851 mutex_exit(hash_lock); 1852 atomic_add_64(&arc.hits, 1); 1853 if (done) 1854 done(NULL, buf, private); 1855 } else { 1856 uint64_t size = BP_GET_LSIZE(bp); 1857 arc_callback_t *acb; 1858 1859 if (hdr == NULL) { 1860 /* this block is not in the cache */ 1861 arc_buf_hdr_t *exists; 1862 1863 buf = arc_buf_alloc(spa, size, private); 1864 hdr = buf->b_hdr; 1865 hdr->b_dva = *BP_IDENTITY(bp); 1866 hdr->b_birth = bp->blk_birth; 1867 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 1868 exists = buf_hash_insert(hdr, &hash_lock); 1869 if (exists) { 1870 /* somebody beat us to the hash insert */ 1871 mutex_exit(hash_lock); 1872 bzero(&hdr->b_dva, sizeof (dva_t)); 1873 hdr->b_birth = 0; 1874 hdr->b_cksum0 = 0; 1875 (void) arc_buf_remove_ref(buf, private); 1876 goto top; /* restart the IO request */ 1877 } 1878 /* if this is a prefetch, we don't have a reference */ 1879 if (*arc_flags & ARC_PREFETCH) { 1880 (void) remove_reference(hdr, hash_lock, 1881 private); 1882 hdr->b_flags |= ARC_PREFETCH; 1883 } 1884 if (BP_GET_LEVEL(bp) > 0) 1885 hdr->b_flags |= ARC_INDIRECT; 1886 } else { 1887 /* this block is in the ghost cache */ 1888 ASSERT(GHOST_STATE(hdr->b_state)); 1889 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1890 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 1891 ASSERT(hdr->b_buf == NULL); 1892 1893 /* if this is a prefetch, we don't have a reference */ 1894 if (*arc_flags & ARC_PREFETCH) 1895 hdr->b_flags |= ARC_PREFETCH; 1896 else 1897 add_reference(hdr, hash_lock, private); 1898 buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1899 buf->b_hdr = hdr; 1900 buf->b_data = NULL; 1901 buf->b_efunc = NULL; 1902 buf->b_private = NULL; 1903 buf->b_next = NULL; 1904 hdr->b_buf = buf; 1905 arc_get_data_buf(buf); 1906 ASSERT(hdr->b_datacnt == 0); 1907 hdr->b_datacnt = 1; 1908 1909 } 1910 1911 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 1912 acb->acb_done = done; 1913 acb->acb_private = private; 1914 acb->acb_byteswap = swap; 1915 1916 ASSERT(hdr->b_acb == NULL); 1917 hdr->b_acb = acb; 1918 hdr->b_flags |= ARC_IO_IN_PROGRESS; 1919 1920 /* 1921 * If the buffer has been evicted, migrate it to a present state 1922 * before issuing the I/O. Once we drop the hash-table lock, 1923 * the header will be marked as I/O in progress and have an 1924 * attached buffer. At this point, anybody who finds this 1925 * buffer ought to notice that it's legit but has a pending I/O. 1926 */ 1927 1928 if (GHOST_STATE(hdr->b_state)) 1929 arc_access(hdr, hash_lock); 1930 mutex_exit(hash_lock); 1931 1932 ASSERT3U(hdr->b_size, ==, size); 1933 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 1934 zbookmark_t *, zb); 1935 atomic_add_64(&arc.misses, 1); 1936 1937 rzio = zio_read(pio, spa, bp, buf->b_data, size, 1938 arc_read_done, buf, priority, flags, zb); 1939 1940 if (*arc_flags & ARC_WAIT) 1941 return (zio_wait(rzio)); 1942 1943 ASSERT(*arc_flags & ARC_NOWAIT); 1944 zio_nowait(rzio); 1945 } 1946 return (0); 1947 } 1948 1949 /* 1950 * arc_read() variant to support pool traversal. If the block is already 1951 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 1952 * The idea is that we don't want pool traversal filling up memory, but 1953 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 1954 */ 1955 int 1956 arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 1957 { 1958 arc_buf_hdr_t *hdr; 1959 kmutex_t *hash_mtx; 1960 int rc = 0; 1961 1962 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 1963 1964 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 1965 arc_buf_t *buf = hdr->b_buf; 1966 1967 ASSERT(buf); 1968 while (buf->b_data == NULL) { 1969 buf = buf->b_next; 1970 ASSERT(buf); 1971 } 1972 bcopy(buf->b_data, data, hdr->b_size); 1973 } else { 1974 rc = ENOENT; 1975 } 1976 1977 if (hash_mtx) 1978 mutex_exit(hash_mtx); 1979 1980 return (rc); 1981 } 1982 1983 void 1984 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 1985 { 1986 ASSERT(buf->b_hdr != NULL); 1987 ASSERT(buf->b_hdr->b_state != arc.anon); 1988 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 1989 buf->b_efunc = func; 1990 buf->b_private = private; 1991 } 1992 1993 /* 1994 * This is used by the DMU to let the ARC know that a buffer is 1995 * being evicted, so the ARC should clean up. If this arc buf 1996 * is not yet in the evicted state, it will be put there. 1997 */ 1998 int 1999 arc_buf_evict(arc_buf_t *buf) 2000 { 2001 arc_buf_hdr_t *hdr; 2002 kmutex_t *hash_lock; 2003 arc_buf_t **bufp; 2004 2005 mutex_enter(&arc_eviction_mtx); 2006 hdr = buf->b_hdr; 2007 if (hdr == NULL) { 2008 /* 2009 * We are in arc_do_user_evicts(). 2010 */ 2011 ASSERT(buf->b_data == NULL); 2012 mutex_exit(&arc_eviction_mtx); 2013 return (0); 2014 } 2015 hash_lock = HDR_LOCK(hdr); 2016 mutex_exit(&arc_eviction_mtx); 2017 2018 mutex_enter(hash_lock); 2019 2020 if (buf->b_data == NULL) { 2021 /* 2022 * We are on the eviction list. 2023 */ 2024 mutex_exit(hash_lock); 2025 mutex_enter(&arc_eviction_mtx); 2026 if (buf->b_hdr == NULL) { 2027 /* 2028 * We are already in arc_do_user_evicts(). 2029 */ 2030 mutex_exit(&arc_eviction_mtx); 2031 return (0); 2032 } else { 2033 arc_buf_t copy = *buf; /* structure assignment */ 2034 /* 2035 * Process this buffer now 2036 * but let arc_do_user_evicts() do the reaping. 2037 */ 2038 buf->b_efunc = NULL; 2039 mutex_exit(&arc_eviction_mtx); 2040 VERIFY(copy.b_efunc(©) == 0); 2041 return (1); 2042 } 2043 } 2044 2045 ASSERT(buf->b_hdr == hdr); 2046 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2047 ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 2048 2049 /* 2050 * Pull this buffer off of the hdr 2051 */ 2052 bufp = &hdr->b_buf; 2053 while (*bufp != buf) 2054 bufp = &(*bufp)->b_next; 2055 *bufp = buf->b_next; 2056 2057 ASSERT(buf->b_data != NULL); 2058 arc_buf_destroy(buf, FALSE, FALSE); 2059 2060 if (hdr->b_datacnt == 0) { 2061 arc_state_t *old_state = hdr->b_state; 2062 arc_state_t *evicted_state; 2063 2064 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2065 2066 evicted_state = 2067 (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 2068 2069 mutex_enter(&old_state->mtx); 2070 mutex_enter(&evicted_state->mtx); 2071 2072 arc_change_state(evicted_state, hdr, hash_lock); 2073 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2074 hdr->b_flags = ARC_IN_HASH_TABLE; 2075 2076 mutex_exit(&evicted_state->mtx); 2077 mutex_exit(&old_state->mtx); 2078 } 2079 mutex_exit(hash_lock); 2080 2081 VERIFY(buf->b_efunc(buf) == 0); 2082 buf->b_efunc = NULL; 2083 buf->b_private = NULL; 2084 buf->b_hdr = NULL; 2085 kmem_cache_free(buf_cache, buf); 2086 return (1); 2087 } 2088 2089 /* 2090 * Release this buffer from the cache. This must be done 2091 * after a read and prior to modifying the buffer contents. 2092 * If the buffer has more than one reference, we must make 2093 * make a new hdr for the buffer. 2094 */ 2095 void 2096 arc_release(arc_buf_t *buf, void *tag) 2097 { 2098 arc_buf_hdr_t *hdr = buf->b_hdr; 2099 kmutex_t *hash_lock = HDR_LOCK(hdr); 2100 2101 /* this buffer is not on any list */ 2102 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2103 2104 if (hdr->b_state == arc.anon) { 2105 /* this buffer is already released */ 2106 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2107 ASSERT(BUF_EMPTY(hdr)); 2108 ASSERT(buf->b_efunc == NULL); 2109 return; 2110 } 2111 2112 mutex_enter(hash_lock); 2113 2114 /* 2115 * Do we have more than one buf? 2116 */ 2117 if (hdr->b_buf != buf || buf->b_next != NULL) { 2118 arc_buf_hdr_t *nhdr; 2119 arc_buf_t **bufp; 2120 uint64_t blksz = hdr->b_size; 2121 spa_t *spa = hdr->b_spa; 2122 2123 ASSERT(hdr->b_datacnt > 1); 2124 /* 2125 * Pull the data off of this buf and attach it to 2126 * a new anonymous buf. 2127 */ 2128 (void) remove_reference(hdr, hash_lock, tag); 2129 bufp = &hdr->b_buf; 2130 while (*bufp != buf) 2131 bufp = &(*bufp)->b_next; 2132 *bufp = (*bufp)->b_next; 2133 2134 ASSERT3U(hdr->b_state->size, >=, hdr->b_size); 2135 atomic_add_64(&hdr->b_state->size, -hdr->b_size); 2136 if (refcount_is_zero(&hdr->b_refcnt)) { 2137 ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size); 2138 atomic_add_64(&hdr->b_state->lsize, -hdr->b_size); 2139 } 2140 hdr->b_datacnt -= 1; 2141 2142 mutex_exit(hash_lock); 2143 2144 nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2145 nhdr->b_size = blksz; 2146 nhdr->b_spa = spa; 2147 nhdr->b_buf = buf; 2148 nhdr->b_state = arc.anon; 2149 nhdr->b_arc_access = 0; 2150 nhdr->b_flags = 0; 2151 nhdr->b_datacnt = 1; 2152 buf->b_hdr = nhdr; 2153 buf->b_next = NULL; 2154 (void) refcount_add(&nhdr->b_refcnt, tag); 2155 atomic_add_64(&arc.anon->size, blksz); 2156 2157 hdr = nhdr; 2158 } else { 2159 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2160 ASSERT(!list_link_active(&hdr->b_arc_node)); 2161 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2162 arc_change_state(arc.anon, hdr, hash_lock); 2163 hdr->b_arc_access = 0; 2164 mutex_exit(hash_lock); 2165 bzero(&hdr->b_dva, sizeof (dva_t)); 2166 hdr->b_birth = 0; 2167 hdr->b_cksum0 = 0; 2168 } 2169 buf->b_efunc = NULL; 2170 buf->b_private = NULL; 2171 } 2172 2173 int 2174 arc_released(arc_buf_t *buf) 2175 { 2176 return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon); 2177 } 2178 2179 int 2180 arc_has_callback(arc_buf_t *buf) 2181 { 2182 return (buf->b_efunc != NULL); 2183 } 2184 2185 #ifdef ZFS_DEBUG 2186 int 2187 arc_referenced(arc_buf_t *buf) 2188 { 2189 return (refcount_count(&buf->b_hdr->b_refcnt)); 2190 } 2191 #endif 2192 2193 static void 2194 arc_write_done(zio_t *zio) 2195 { 2196 arc_buf_t *buf; 2197 arc_buf_hdr_t *hdr; 2198 arc_callback_t *acb; 2199 2200 buf = zio->io_private; 2201 hdr = buf->b_hdr; 2202 acb = hdr->b_acb; 2203 hdr->b_acb = NULL; 2204 ASSERT(acb != NULL); 2205 2206 /* this buffer is on no lists and is not in the hash table */ 2207 ASSERT3P(hdr->b_state, ==, arc.anon); 2208 2209 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2210 hdr->b_birth = zio->io_bp->blk_birth; 2211 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2212 /* 2213 * If the block to be written was all-zero, we may have 2214 * compressed it away. In this case no write was performed 2215 * so there will be no dva/birth-date/checksum. The buffer 2216 * must therefor remain anonymous (and uncached). 2217 */ 2218 if (!BUF_EMPTY(hdr)) { 2219 arc_buf_hdr_t *exists; 2220 kmutex_t *hash_lock; 2221 2222 exists = buf_hash_insert(hdr, &hash_lock); 2223 if (exists) { 2224 /* 2225 * This can only happen if we overwrite for 2226 * sync-to-convergence, because we remove 2227 * buffers from the hash table when we arc_free(). 2228 */ 2229 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2230 BP_IDENTITY(zio->io_bp))); 2231 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2232 zio->io_bp->blk_birth); 2233 2234 ASSERT(refcount_is_zero(&exists->b_refcnt)); 2235 arc_change_state(arc.anon, exists, hash_lock); 2236 mutex_exit(hash_lock); 2237 arc_hdr_destroy(exists); 2238 exists = buf_hash_insert(hdr, &hash_lock); 2239 ASSERT3P(exists, ==, NULL); 2240 } 2241 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2242 arc_access(hdr, hash_lock); 2243 mutex_exit(hash_lock); 2244 } else if (acb->acb_done == NULL) { 2245 int destroy_hdr; 2246 /* 2247 * This is an anonymous buffer with no user callback, 2248 * destroy it if there are no active references. 2249 */ 2250 mutex_enter(&arc_eviction_mtx); 2251 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2252 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2253 mutex_exit(&arc_eviction_mtx); 2254 if (destroy_hdr) 2255 arc_hdr_destroy(hdr); 2256 } else { 2257 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2258 } 2259 2260 if (acb->acb_done) { 2261 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2262 acb->acb_done(zio, buf, acb->acb_private); 2263 } 2264 2265 kmem_free(acb, sizeof (arc_callback_t)); 2266 } 2267 2268 int 2269 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2270 uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2271 arc_done_func_t *done, void *private, int priority, int flags, 2272 uint32_t arc_flags, zbookmark_t *zb) 2273 { 2274 arc_buf_hdr_t *hdr = buf->b_hdr; 2275 arc_callback_t *acb; 2276 zio_t *rzio; 2277 2278 /* this is a private buffer - no locking required */ 2279 ASSERT3P(hdr->b_state, ==, arc.anon); 2280 ASSERT(BUF_EMPTY(hdr)); 2281 ASSERT(!HDR_IO_ERROR(hdr)); 2282 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2283 ASSERT(hdr->b_acb == 0); 2284 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2285 acb->acb_done = done; 2286 acb->acb_private = private; 2287 acb->acb_byteswap = (arc_byteswap_func_t *)-1; 2288 hdr->b_acb = acb; 2289 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2290 rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2291 buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb); 2292 2293 if (arc_flags & ARC_WAIT) 2294 return (zio_wait(rzio)); 2295 2296 ASSERT(arc_flags & ARC_NOWAIT); 2297 zio_nowait(rzio); 2298 2299 return (0); 2300 } 2301 2302 int 2303 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2304 zio_done_func_t *done, void *private, uint32_t arc_flags) 2305 { 2306 arc_buf_hdr_t *ab; 2307 kmutex_t *hash_lock; 2308 zio_t *zio; 2309 2310 /* 2311 * If this buffer is in the cache, release it, so it 2312 * can be re-used. 2313 */ 2314 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2315 if (ab != NULL) { 2316 /* 2317 * The checksum of blocks to free is not always 2318 * preserved (eg. on the deadlist). However, if it is 2319 * nonzero, it should match what we have in the cache. 2320 */ 2321 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2322 ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2323 if (ab->b_state != arc.anon) 2324 arc_change_state(arc.anon, ab, hash_lock); 2325 if (HDR_IO_IN_PROGRESS(ab)) { 2326 /* 2327 * This should only happen when we prefetch. 2328 */ 2329 ASSERT(ab->b_flags & ARC_PREFETCH); 2330 ASSERT3U(ab->b_datacnt, ==, 1); 2331 ab->b_flags |= ARC_FREED_IN_READ; 2332 if (HDR_IN_HASH_TABLE(ab)) 2333 buf_hash_remove(ab); 2334 ab->b_arc_access = 0; 2335 bzero(&ab->b_dva, sizeof (dva_t)); 2336 ab->b_birth = 0; 2337 ab->b_cksum0 = 0; 2338 ab->b_buf->b_efunc = NULL; 2339 ab->b_buf->b_private = NULL; 2340 mutex_exit(hash_lock); 2341 } else if (refcount_is_zero(&ab->b_refcnt)) { 2342 mutex_exit(hash_lock); 2343 arc_hdr_destroy(ab); 2344 atomic_add_64(&arc.deleted, 1); 2345 } else { 2346 /* 2347 * We still have an active reference on this 2348 * buffer. This can happen, e.g., from 2349 * dbuf_unoverride(). 2350 */ 2351 ASSERT(!HDR_IN_HASH_TABLE(ab)); 2352 ab->b_arc_access = 0; 2353 bzero(&ab->b_dva, sizeof (dva_t)); 2354 ab->b_birth = 0; 2355 ab->b_cksum0 = 0; 2356 ab->b_buf->b_efunc = NULL; 2357 ab->b_buf->b_private = NULL; 2358 mutex_exit(hash_lock); 2359 } 2360 } 2361 2362 zio = zio_free(pio, spa, txg, bp, done, private); 2363 2364 if (arc_flags & ARC_WAIT) 2365 return (zio_wait(zio)); 2366 2367 ASSERT(arc_flags & ARC_NOWAIT); 2368 zio_nowait(zio); 2369 2370 return (0); 2371 } 2372 2373 void 2374 arc_tempreserve_clear(uint64_t tempreserve) 2375 { 2376 atomic_add_64(&arc_tempreserve, -tempreserve); 2377 ASSERT((int64_t)arc_tempreserve >= 0); 2378 } 2379 2380 int 2381 arc_tempreserve_space(uint64_t tempreserve) 2382 { 2383 #ifdef ZFS_DEBUG 2384 /* 2385 * Once in a while, fail for no reason. Everything should cope. 2386 */ 2387 if (spa_get_random(10000) == 0) { 2388 dprintf("forcing random failure\n"); 2389 return (ERESTART); 2390 } 2391 #endif 2392 if (tempreserve > arc.c/4 && !arc.no_grow) 2393 arc.c = MIN(arc.c_max, tempreserve * 4); 2394 if (tempreserve > arc.c) 2395 return (ENOMEM); 2396 2397 /* 2398 * Throttle writes when the amount of dirty data in the cache 2399 * gets too large. We try to keep the cache less than half full 2400 * of dirty blocks so that our sync times don't grow too large. 2401 * Note: if two requests come in concurrently, we might let them 2402 * both succeed, when one of them should fail. Not a huge deal. 2403 * 2404 * XXX The limit should be adjusted dynamically to keep the time 2405 * to sync a dataset fixed (around 1-5 seconds?). 2406 */ 2407 2408 if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 && 2409 arc_tempreserve + arc.anon->size > arc.c / 4) { 2410 dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2411 "tempreserve=%lluK arc.c=%lluK\n", 2412 arc_tempreserve>>10, arc.anon->lsize>>10, 2413 tempreserve>>10, arc.c>>10); 2414 return (ERESTART); 2415 } 2416 atomic_add_64(&arc_tempreserve, tempreserve); 2417 return (0); 2418 } 2419 2420 void 2421 arc_init(void) 2422 { 2423 mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 2424 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2425 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2426 2427 /* Convert seconds to clock ticks */ 2428 arc_min_prefetch_lifespan = 1 * hz; 2429 2430 /* Start out with 1/8 of all memory */ 2431 arc.c = physmem * PAGESIZE / 8; 2432 2433 #ifdef _KERNEL 2434 /* 2435 * On architectures where the physical memory can be larger 2436 * than the addressable space (intel in 32-bit mode), we may 2437 * need to limit the cache to 1/8 of VM size. 2438 */ 2439 arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2440 #endif 2441 2442 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2443 arc.c_min = MAX(arc.c / 4, 64<<20); 2444 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2445 if (arc.c * 8 >= 1<<30) 2446 arc.c_max = (arc.c * 8) - (1<<30); 2447 else 2448 arc.c_max = arc.c_min; 2449 arc.c_max = MAX(arc.c * 6, arc.c_max); 2450 2451 /* 2452 * Allow the tunables to override our calculations if they are 2453 * reasonable (ie. over 64MB) 2454 */ 2455 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 2456 arc.c_max = zfs_arc_max; 2457 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max) 2458 arc.c_min = zfs_arc_min; 2459 2460 arc.c = arc.c_max; 2461 arc.p = (arc.c >> 1); 2462 2463 /* if kmem_flags are set, lets try to use less memory */ 2464 if (kmem_debugging()) 2465 arc.c = arc.c / 2; 2466 if (arc.c < arc.c_min) 2467 arc.c = arc.c_min; 2468 2469 arc.anon = &ARC_anon; 2470 arc.mru = &ARC_mru; 2471 arc.mru_ghost = &ARC_mru_ghost; 2472 arc.mfu = &ARC_mfu; 2473 arc.mfu_ghost = &ARC_mfu_ghost; 2474 arc.size = 0; 2475 2476 arc.hits = 0; 2477 arc.recycle_miss = 0; 2478 arc.evict_skip = 0; 2479 arc.mutex_miss = 0; 2480 2481 mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL); 2482 mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL); 2483 mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL); 2484 mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL); 2485 mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL); 2486 2487 list_create(&arc.mru->list, sizeof (arc_buf_hdr_t), 2488 offsetof(arc_buf_hdr_t, b_arc_node)); 2489 list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t), 2490 offsetof(arc_buf_hdr_t, b_arc_node)); 2491 list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t), 2492 offsetof(arc_buf_hdr_t, b_arc_node)); 2493 list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t), 2494 offsetof(arc_buf_hdr_t, b_arc_node)); 2495 2496 buf_init(); 2497 2498 arc_thread_exit = 0; 2499 arc_eviction_list = NULL; 2500 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2501 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2502 2503 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2504 TS_RUN, minclsyspri); 2505 } 2506 2507 void 2508 arc_fini(void) 2509 { 2510 mutex_enter(&arc_reclaim_thr_lock); 2511 arc_thread_exit = 1; 2512 while (arc_thread_exit != 0) 2513 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2514 mutex_exit(&arc_reclaim_thr_lock); 2515 2516 arc_flush(); 2517 2518 arc_dead = TRUE; 2519 2520 mutex_destroy(&arc_eviction_mtx); 2521 mutex_destroy(&arc_reclaim_lock); 2522 mutex_destroy(&arc_reclaim_thr_lock); 2523 cv_destroy(&arc_reclaim_thr_cv); 2524 2525 list_destroy(&arc.mru->list); 2526 list_destroy(&arc.mru_ghost->list); 2527 list_destroy(&arc.mfu->list); 2528 list_destroy(&arc.mfu_ghost->list); 2529 2530 mutex_destroy(&arc.anon->mtx); 2531 mutex_destroy(&arc.mru->mtx); 2532 mutex_destroy(&arc.mru_ghost->mtx); 2533 mutex_destroy(&arc.mfu->mtx); 2534 mutex_destroy(&arc.mfu_ghost->mtx); 2535 2536 buf_fini(); 2537 } 2538