xref: /titanic_52/usr/src/uts/common/fs/zfs/arc.c (revision 18c2aff776a775d34a4c9893a4c72e0434d68e36)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * DVA-based Adjustable Relpacement Cache
30  *
31  * While much of the theory of operation used here is
32  * based on the self-tuning, low overhead replacement cache
33  * presented by Megiddo and Modha at FAST 2003, there are some
34  * significant differences:
35  *
36  * 1. The Megiddo and Modha model assumes any page is evictable.
37  * Pages in its cache cannot be "locked" into memory.  This makes
38  * the eviction algorithm simple: evict the last page in the list.
39  * This also make the performance characteristics easy to reason
40  * about.  Our cache is not so simple.  At any given moment, some
41  * subset of the blocks in the cache are un-evictable because we
42  * have handed out a reference to them.  Blocks are only evictable
43  * when there are no external references active.  This makes
44  * eviction far more problematic:  we choose to evict the evictable
45  * blocks that are the "lowest" in the list.
46  *
47  * There are times when it is not possible to evict the requested
48  * space.  In these circumstances we are unable to adjust the cache
49  * size.  To prevent the cache growing unbounded at these times we
50  * implement a "cache throttle" that slowes the flow of new data
51  * into the cache until we can make space avaiable.
52  *
53  * 2. The Megiddo and Modha model assumes a fixed cache size.
54  * Pages are evicted when the cache is full and there is a cache
55  * miss.  Our model has a variable sized cache.  It grows with
56  * high use, but also tries to react to memory preasure from the
57  * operating system: decreasing its size when system memory is
58  * tight.
59  *
60  * 3. The Megiddo and Modha model assumes a fixed page size. All
61  * elements of the cache are therefor exactly the same size.  So
62  * when adjusting the cache size following a cache miss, its simply
63  * a matter of choosing a single page to evict.  In our model, we
64  * have variable sized cache blocks (rangeing from 512 bytes to
65  * 128K bytes).  We therefor choose a set of blocks to evict to make
66  * space for a cache miss that approximates as closely as possible
67  * the space used by the new block.
68  *
69  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70  * by N. Megiddo & D. Modha, FAST 2003
71  */
72 
73 /*
74  * The locking model:
75  *
76  * A new reference to a cache buffer can be obtained in two
77  * ways: 1) via a hash table lookup using the DVA as a key,
78  * or 2) via one of the ARC lists.  The arc_read() inerface
79  * uses method 1, while the internal arc algorithms for
80  * adjusting the cache use method 2.  We therefor provide two
81  * types of locks: 1) the hash table lock array, and 2) the
82  * arc list locks.
83  *
84  * Buffers do not have their own mutexs, rather they rely on the
85  * hash table mutexs for the bulk of their protection (i.e. most
86  * fields in the arc_buf_hdr_t are protected by these mutexs).
87  *
88  * buf_hash_find() returns the appropriate mutex (held) when it
89  * locates the requested buffer in the hash table.  It returns
90  * NULL for the mutex if the buffer was not in the table.
91  *
92  * buf_hash_remove() expects the appropriate hash mutex to be
93  * already held before it is invoked.
94  *
95  * Each arc state also has a mutex which is used to protect the
96  * buffer list associated with the state.  When attempting to
97  * obtain a hash table lock while holding an arc list lock you
98  * must use: mutex_tryenter() to avoid deadlock.  Also note that
99  * the active state mutex must be held before the ghost state mutex.
100  *
101  * Arc buffers may have an associated eviction callback function.
102  * This function will be invoked prior to removing the buffer (e.g.
103  * in arc_do_user_evicts()).  Note however that the data associated
104  * with the buffer may be evicted prior to the callback.  The callback
105  * must be made with *no locks held* (to prevent deadlock).  Additionally,
106  * the users of callbacks must ensure that their private data is
107  * protected from simultaneous callbacks from arc_buf_evict()
108  * and arc_do_user_evicts().
109  *
110  * Note that the majority of the performance stats are manipulated
111  * with atomic operations.
112  */
113 
114 #include <sys/spa.h>
115 #include <sys/zio.h>
116 #include <sys/zfs_context.h>
117 #include <sys/arc.h>
118 #include <sys/refcount.h>
119 #ifdef _KERNEL
120 #include <sys/vmsystm.h>
121 #include <vm/anon.h>
122 #include <sys/fs/swapnode.h>
123 #include <sys/dnlc.h>
124 #endif
125 #include <sys/callb.h>
126 
127 static kmutex_t		arc_reclaim_thr_lock;
128 static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
129 static uint8_t		arc_thread_exit;
130 
131 #define	ARC_REDUCE_DNLC_PERCENT	3
132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
133 
134 typedef enum arc_reclaim_strategy {
135 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
136 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
137 } arc_reclaim_strategy_t;
138 
139 /* number of seconds before growing cache again */
140 static int		arc_grow_retry = 60;
141 
142 /*
143  * minimum lifespan of a prefetch block in clock ticks
144  * (initialized in arc_init())
145  */
146 static int		arc_min_prefetch_lifespan;
147 
148 static kmutex_t arc_reclaim_lock;
149 static int arc_dead;
150 
151 /*
152  * These tunables are for performance analysis.
153  */
154 uint64_t zfs_arc_max;
155 uint64_t zfs_arc_min;
156 
157 /*
158  * Note that buffers can be on one of 5 states:
159  *	ARC_anon	- anonymous (discussed below)
160  *	ARC_mru		- recently used, currently cached
161  *	ARC_mru_ghost	- recentely used, no longer in cache
162  *	ARC_mfu		- frequently used, currently cached
163  *	ARC_mfu_ghost	- frequently used, no longer in cache
164  * When there are no active references to the buffer, they
165  * are linked onto one of the lists in arc.  These are the
166  * only buffers that can be evicted or deleted.
167  *
168  * Anonymous buffers are buffers that are not associated with
169  * a DVA.  These are buffers that hold dirty block copies
170  * before they are written to stable storage.  By definition,
171  * they are "ref'd" and are considered part of arc_mru
172  * that cannot be freed.  Generally, they will aquire a DVA
173  * as they are written and migrate onto the arc_mru list.
174  */
175 
176 typedef struct arc_state {
177 	list_t	list;	/* linked list of evictable buffer in state */
178 	uint64_t lsize;	/* total size of buffers in the linked list */
179 	uint64_t size;	/* total size of all buffers in this state */
180 	uint64_t hits;
181 	kmutex_t mtx;
182 } arc_state_t;
183 
184 /* The 5 states: */
185 static arc_state_t ARC_anon;
186 static arc_state_t ARC_mru;
187 static arc_state_t ARC_mru_ghost;
188 static arc_state_t ARC_mfu;
189 static arc_state_t ARC_mfu_ghost;
190 
191 static struct arc {
192 	arc_state_t 	*anon;
193 	arc_state_t	*mru;
194 	arc_state_t	*mru_ghost;
195 	arc_state_t	*mfu;
196 	arc_state_t	*mfu_ghost;
197 	uint64_t	size;		/* Actual total arc size */
198 	uint64_t	p;		/* Target size (in bytes) of mru */
199 	uint64_t	c;		/* Target size of cache (in bytes) */
200 	uint64_t	c_min;		/* Minimum target cache size */
201 	uint64_t	c_max;		/* Maximum target cache size */
202 
203 	/* performance stats */
204 	uint64_t	hits;
205 	uint64_t	misses;
206 	uint64_t	deleted;
207 	uint64_t	recycle_miss;
208 	uint64_t	mutex_miss;
209 	uint64_t	evict_skip;
210 	uint64_t	hash_elements;
211 	uint64_t	hash_elements_max;
212 	uint64_t	hash_collisions;
213 	uint64_t	hash_chains;
214 	uint32_t	hash_chain_max;
215 
216 	int		no_grow;	/* Don't try to grow cache size */
217 } arc;
218 
219 static uint64_t arc_tempreserve;
220 
221 typedef struct arc_callback arc_callback_t;
222 
223 struct arc_callback {
224 	arc_done_func_t		*acb_done;
225 	void			*acb_private;
226 	arc_byteswap_func_t	*acb_byteswap;
227 	arc_buf_t		*acb_buf;
228 	zio_t			*acb_zio_dummy;
229 	arc_callback_t		*acb_next;
230 };
231 
232 struct arc_buf_hdr {
233 	/* immutable */
234 	uint64_t		b_size;
235 	spa_t			*b_spa;
236 
237 	/* protected by hash lock */
238 	dva_t			b_dva;
239 	uint64_t		b_birth;
240 	uint64_t		b_cksum0;
241 
242 	arc_buf_hdr_t		*b_hash_next;
243 	arc_buf_t		*b_buf;
244 	uint32_t		b_flags;
245 	uint32_t		b_datacnt;
246 
247 	kcondvar_t		b_cv;
248 	arc_callback_t		*b_acb;
249 
250 	/* protected by arc state mutex */
251 	arc_state_t		*b_state;
252 	list_node_t		b_arc_node;
253 
254 	/* updated atomically */
255 	clock_t			b_arc_access;
256 
257 	/* self protecting */
258 	refcount_t		b_refcnt;
259 };
260 
261 static arc_buf_t *arc_eviction_list;
262 static kmutex_t arc_eviction_mtx;
263 static arc_buf_hdr_t arc_eviction_hdr;
264 static void arc_get_data_buf(arc_buf_t *buf);
265 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
266 
267 #define	GHOST_STATE(state)	\
268 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
269 
270 /*
271  * Private ARC flags.  These flags are private ARC only flags that will show up
272  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
273  * be passed in as arc_flags in things like arc_read.  However, these flags
274  * should never be passed and should only be set by ARC code.  When adding new
275  * public flags, make sure not to smash the private ones.
276  */
277 
278 #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
279 #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
280 #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
281 #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
282 #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
283 #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
284 
285 #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
286 #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
287 #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
288 #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
289 #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
290 
291 /*
292  * Hash table routines
293  */
294 
295 #define	HT_LOCK_PAD	64
296 
297 struct ht_lock {
298 	kmutex_t	ht_lock;
299 #ifdef _KERNEL
300 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
301 #endif
302 };
303 
304 #define	BUF_LOCKS 256
305 typedef struct buf_hash_table {
306 	uint64_t ht_mask;
307 	arc_buf_hdr_t **ht_table;
308 	struct ht_lock ht_locks[BUF_LOCKS];
309 } buf_hash_table_t;
310 
311 static buf_hash_table_t buf_hash_table;
312 
313 #define	BUF_HASH_INDEX(spa, dva, birth) \
314 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
315 #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
316 #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
317 #define	HDR_LOCK(buf) \
318 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
319 
320 uint64_t zfs_crc64_table[256];
321 
322 static uint64_t
323 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
324 {
325 	uintptr_t spav = (uintptr_t)spa;
326 	uint8_t *vdva = (uint8_t *)dva;
327 	uint64_t crc = -1ULL;
328 	int i;
329 
330 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
331 
332 	for (i = 0; i < sizeof (dva_t); i++)
333 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
334 
335 	crc ^= (spav>>8) ^ birth;
336 
337 	return (crc);
338 }
339 
340 #define	BUF_EMPTY(buf)						\
341 	((buf)->b_dva.dva_word[0] == 0 &&			\
342 	(buf)->b_dva.dva_word[1] == 0 &&			\
343 	(buf)->b_birth == 0)
344 
345 #define	BUF_EQUAL(spa, dva, birth, buf)				\
346 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
347 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
348 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
349 
350 static arc_buf_hdr_t *
351 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
352 {
353 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
354 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
355 	arc_buf_hdr_t *buf;
356 
357 	mutex_enter(hash_lock);
358 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
359 	    buf = buf->b_hash_next) {
360 		if (BUF_EQUAL(spa, dva, birth, buf)) {
361 			*lockp = hash_lock;
362 			return (buf);
363 		}
364 	}
365 	mutex_exit(hash_lock);
366 	*lockp = NULL;
367 	return (NULL);
368 }
369 
370 /*
371  * Insert an entry into the hash table.  If there is already an element
372  * equal to elem in the hash table, then the already existing element
373  * will be returned and the new element will not be inserted.
374  * Otherwise returns NULL.
375  */
376 static arc_buf_hdr_t *
377 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
378 {
379 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
380 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
381 	arc_buf_hdr_t *fbuf;
382 	uint32_t max, i;
383 
384 	ASSERT(!HDR_IN_HASH_TABLE(buf));
385 	*lockp = hash_lock;
386 	mutex_enter(hash_lock);
387 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
388 	    fbuf = fbuf->b_hash_next, i++) {
389 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
390 			return (fbuf);
391 	}
392 
393 	buf->b_hash_next = buf_hash_table.ht_table[idx];
394 	buf_hash_table.ht_table[idx] = buf;
395 	buf->b_flags |= ARC_IN_HASH_TABLE;
396 
397 	/* collect some hash table performance data */
398 	if (i > 0) {
399 		atomic_add_64(&arc.hash_collisions, 1);
400 		if (i == 1)
401 			atomic_add_64(&arc.hash_chains, 1);
402 	}
403 	while (i > (max = arc.hash_chain_max) &&
404 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
405 		continue;
406 	}
407 	atomic_add_64(&arc.hash_elements, 1);
408 	if (arc.hash_elements > arc.hash_elements_max)
409 		atomic_add_64(&arc.hash_elements_max, 1);
410 
411 	return (NULL);
412 }
413 
414 static void
415 buf_hash_remove(arc_buf_hdr_t *buf)
416 {
417 	arc_buf_hdr_t *fbuf, **bufp;
418 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
419 
420 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
421 	ASSERT(HDR_IN_HASH_TABLE(buf));
422 
423 	bufp = &buf_hash_table.ht_table[idx];
424 	while ((fbuf = *bufp) != buf) {
425 		ASSERT(fbuf != NULL);
426 		bufp = &fbuf->b_hash_next;
427 	}
428 	*bufp = buf->b_hash_next;
429 	buf->b_hash_next = NULL;
430 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
431 
432 	/* collect some hash table performance data */
433 	atomic_add_64(&arc.hash_elements, -1);
434 	if (buf_hash_table.ht_table[idx] &&
435 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
436 		atomic_add_64(&arc.hash_chains, -1);
437 }
438 
439 /*
440  * Global data structures and functions for the buf kmem cache.
441  */
442 static kmem_cache_t *hdr_cache;
443 static kmem_cache_t *buf_cache;
444 
445 static void
446 buf_fini(void)
447 {
448 	int i;
449 
450 	kmem_free(buf_hash_table.ht_table,
451 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
452 	for (i = 0; i < BUF_LOCKS; i++)
453 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
454 	kmem_cache_destroy(hdr_cache);
455 	kmem_cache_destroy(buf_cache);
456 }
457 
458 /*
459  * Constructor callback - called when the cache is empty
460  * and a new buf is requested.
461  */
462 /* ARGSUSED */
463 static int
464 hdr_cons(void *vbuf, void *unused, int kmflag)
465 {
466 	arc_buf_hdr_t *buf = vbuf;
467 
468 	bzero(buf, sizeof (arc_buf_hdr_t));
469 	refcount_create(&buf->b_refcnt);
470 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
471 	return (0);
472 }
473 
474 /*
475  * Destructor callback - called when a cached buf is
476  * no longer required.
477  */
478 /* ARGSUSED */
479 static void
480 hdr_dest(void *vbuf, void *unused)
481 {
482 	arc_buf_hdr_t *buf = vbuf;
483 
484 	refcount_destroy(&buf->b_refcnt);
485 	cv_destroy(&buf->b_cv);
486 }
487 
488 static int arc_reclaim_needed(void);
489 void arc_kmem_reclaim(void);
490 
491 /*
492  * Reclaim callback -- invoked when memory is low.
493  */
494 /* ARGSUSED */
495 static void
496 hdr_recl(void *unused)
497 {
498 	dprintf("hdr_recl called\n");
499 	if (arc_reclaim_needed())
500 		arc_kmem_reclaim();
501 }
502 
503 static void
504 buf_init(void)
505 {
506 	uint64_t *ct;
507 	uint64_t hsize = 1ULL << 12;
508 	int i, j;
509 
510 	/*
511 	 * The hash table is big enough to fill all of physical memory
512 	 * with an average 64K block size.  The table will take up
513 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
514 	 */
515 	while (hsize * 65536 < physmem * PAGESIZE)
516 		hsize <<= 1;
517 retry:
518 	buf_hash_table.ht_mask = hsize - 1;
519 	buf_hash_table.ht_table =
520 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
521 	if (buf_hash_table.ht_table == NULL) {
522 		ASSERT(hsize > (1ULL << 8));
523 		hsize >>= 1;
524 		goto retry;
525 	}
526 
527 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
528 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
529 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
530 	    0, NULL, NULL, NULL, NULL, NULL, 0);
531 
532 	for (i = 0; i < 256; i++)
533 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
534 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
535 
536 	for (i = 0; i < BUF_LOCKS; i++) {
537 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
538 		    NULL, MUTEX_DEFAULT, NULL);
539 	}
540 }
541 
542 #define	ARC_MINTIME	(hz>>4) /* 62 ms */
543 
544 static void
545 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
546 {
547 	ASSERT(MUTEX_HELD(hash_lock));
548 
549 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
550 	    (ab->b_state != arc.anon)) {
551 		int delta = ab->b_size * ab->b_datacnt;
552 
553 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
554 		mutex_enter(&ab->b_state->mtx);
555 		ASSERT(list_link_active(&ab->b_arc_node));
556 		list_remove(&ab->b_state->list, ab);
557 		if (GHOST_STATE(ab->b_state)) {
558 			ASSERT3U(ab->b_datacnt, ==, 0);
559 			ASSERT3P(ab->b_buf, ==, NULL);
560 			delta = ab->b_size;
561 		}
562 		ASSERT(delta > 0);
563 		ASSERT3U(ab->b_state->lsize, >=, delta);
564 		atomic_add_64(&ab->b_state->lsize, -delta);
565 		mutex_exit(&ab->b_state->mtx);
566 		/* remove the prefetch flag is we get a reference */
567 		if (ab->b_flags & ARC_PREFETCH)
568 			ab->b_flags &= ~ARC_PREFETCH;
569 	}
570 }
571 
572 static int
573 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
574 {
575 	int cnt;
576 
577 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
578 	ASSERT(!GHOST_STATE(ab->b_state));
579 
580 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
581 	    (ab->b_state != arc.anon)) {
582 
583 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
584 		mutex_enter(&ab->b_state->mtx);
585 		ASSERT(!list_link_active(&ab->b_arc_node));
586 		list_insert_head(&ab->b_state->list, ab);
587 		ASSERT(ab->b_datacnt > 0);
588 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
589 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
590 		mutex_exit(&ab->b_state->mtx);
591 	}
592 	return (cnt);
593 }
594 
595 /*
596  * Move the supplied buffer to the indicated state.  The mutex
597  * for the buffer must be held by the caller.
598  */
599 static void
600 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
601 {
602 	arc_state_t *old_state = ab->b_state;
603 	int refcnt = refcount_count(&ab->b_refcnt);
604 	int from_delta, to_delta;
605 
606 	ASSERT(MUTEX_HELD(hash_lock));
607 	ASSERT(new_state != old_state);
608 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
609 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
610 
611 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
612 
613 	/*
614 	 * If this buffer is evictable, transfer it from the
615 	 * old state list to the new state list.
616 	 */
617 	if (refcnt == 0) {
618 		if (old_state != arc.anon) {
619 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
620 
621 			if (use_mutex)
622 				mutex_enter(&old_state->mtx);
623 
624 			ASSERT(list_link_active(&ab->b_arc_node));
625 			list_remove(&old_state->list, ab);
626 
627 			/*
628 			 * If prefetching out of the ghost cache,
629 			 * we will have a non-null datacnt.
630 			 */
631 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
632 				/* ghost elements have a ghost size */
633 				ASSERT(ab->b_buf == NULL);
634 				from_delta = ab->b_size;
635 			}
636 			ASSERT3U(old_state->lsize, >=, from_delta);
637 			atomic_add_64(&old_state->lsize, -from_delta);
638 
639 			if (use_mutex)
640 				mutex_exit(&old_state->mtx);
641 		}
642 		if (new_state != arc.anon) {
643 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
644 
645 			if (use_mutex)
646 				mutex_enter(&new_state->mtx);
647 
648 			list_insert_head(&new_state->list, ab);
649 
650 			/* ghost elements have a ghost size */
651 			if (GHOST_STATE(new_state)) {
652 				ASSERT(ab->b_datacnt == 0);
653 				ASSERT(ab->b_buf == NULL);
654 				to_delta = ab->b_size;
655 			}
656 			atomic_add_64(&new_state->lsize, to_delta);
657 			ASSERT3U(new_state->size + to_delta, >=,
658 			    new_state->lsize);
659 
660 			if (use_mutex)
661 				mutex_exit(&new_state->mtx);
662 		}
663 	}
664 
665 	ASSERT(!BUF_EMPTY(ab));
666 	if (new_state == arc.anon && old_state != arc.anon) {
667 		buf_hash_remove(ab);
668 	}
669 
670 	/* adjust state sizes */
671 	if (to_delta)
672 		atomic_add_64(&new_state->size, to_delta);
673 	if (from_delta) {
674 		ASSERT3U(old_state->size, >=, from_delta);
675 		atomic_add_64(&old_state->size, -from_delta);
676 	}
677 	ab->b_state = new_state;
678 }
679 
680 arc_buf_t *
681 arc_buf_alloc(spa_t *spa, int size, void *tag)
682 {
683 	arc_buf_hdr_t *hdr;
684 	arc_buf_t *buf;
685 
686 	ASSERT3U(size, >, 0);
687 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
688 	ASSERT(BUF_EMPTY(hdr));
689 	hdr->b_size = size;
690 	hdr->b_spa = spa;
691 	hdr->b_state = arc.anon;
692 	hdr->b_arc_access = 0;
693 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
694 	buf->b_hdr = hdr;
695 	buf->b_data = NULL;
696 	buf->b_efunc = NULL;
697 	buf->b_private = NULL;
698 	buf->b_next = NULL;
699 	hdr->b_buf = buf;
700 	arc_get_data_buf(buf);
701 	hdr->b_datacnt = 1;
702 	hdr->b_flags = 0;
703 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
704 	(void) refcount_add(&hdr->b_refcnt, tag);
705 
706 	return (buf);
707 }
708 
709 static arc_buf_t *
710 arc_buf_clone(arc_buf_t *from)
711 {
712 	arc_buf_t *buf;
713 	arc_buf_hdr_t *hdr = from->b_hdr;
714 	uint64_t size = hdr->b_size;
715 
716 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
717 	buf->b_hdr = hdr;
718 	buf->b_data = NULL;
719 	buf->b_efunc = NULL;
720 	buf->b_private = NULL;
721 	buf->b_next = hdr->b_buf;
722 	hdr->b_buf = buf;
723 	arc_get_data_buf(buf);
724 	bcopy(from->b_data, buf->b_data, size);
725 	hdr->b_datacnt += 1;
726 	return (buf);
727 }
728 
729 void
730 arc_buf_add_ref(arc_buf_t *buf, void* tag)
731 {
732 	arc_buf_hdr_t *hdr;
733 	kmutex_t *hash_lock;
734 
735 	/*
736 	 * Check to see if this buffer is currently being evicted via
737 	 * arc_do_user_evicts().
738 	 */
739 	mutex_enter(&arc_eviction_mtx);
740 	hdr = buf->b_hdr;
741 	if (hdr == NULL) {
742 		mutex_exit(&arc_eviction_mtx);
743 		return;
744 	}
745 	hash_lock = HDR_LOCK(hdr);
746 	mutex_exit(&arc_eviction_mtx);
747 
748 	mutex_enter(hash_lock);
749 	if (buf->b_data == NULL) {
750 		/*
751 		 * This buffer is evicted.
752 		 */
753 		mutex_exit(hash_lock);
754 		return;
755 	}
756 
757 	ASSERT(buf->b_hdr == hdr);
758 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
759 	add_reference(hdr, hash_lock, tag);
760 	arc_access(hdr, hash_lock);
761 	mutex_exit(hash_lock);
762 	atomic_add_64(&arc.hits, 1);
763 }
764 
765 static void
766 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
767 {
768 	arc_buf_t **bufp;
769 
770 	/* free up data associated with the buf */
771 	if (buf->b_data) {
772 		arc_state_t *state = buf->b_hdr->b_state;
773 		uint64_t size = buf->b_hdr->b_size;
774 
775 		if (!recycle) {
776 			zio_buf_free(buf->b_data, size);
777 			atomic_add_64(&arc.size, -size);
778 		}
779 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
780 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
781 			ASSERT(state != arc.anon);
782 			ASSERT3U(state->lsize, >=, size);
783 			atomic_add_64(&state->lsize, -size);
784 		}
785 		ASSERT3U(state->size, >=, size);
786 		atomic_add_64(&state->size, -size);
787 		buf->b_data = NULL;
788 		ASSERT(buf->b_hdr->b_datacnt > 0);
789 		buf->b_hdr->b_datacnt -= 1;
790 	}
791 
792 	/* only remove the buf if requested */
793 	if (!all)
794 		return;
795 
796 	/* remove the buf from the hdr list */
797 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
798 		continue;
799 	*bufp = buf->b_next;
800 
801 	ASSERT(buf->b_efunc == NULL);
802 
803 	/* clean up the buf */
804 	buf->b_hdr = NULL;
805 	kmem_cache_free(buf_cache, buf);
806 }
807 
808 static void
809 arc_hdr_destroy(arc_buf_hdr_t *hdr)
810 {
811 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
812 	ASSERT3P(hdr->b_state, ==, arc.anon);
813 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
814 
815 	if (!BUF_EMPTY(hdr)) {
816 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
817 		bzero(&hdr->b_dva, sizeof (dva_t));
818 		hdr->b_birth = 0;
819 		hdr->b_cksum0 = 0;
820 	}
821 	while (hdr->b_buf) {
822 		arc_buf_t *buf = hdr->b_buf;
823 
824 		if (buf->b_efunc) {
825 			mutex_enter(&arc_eviction_mtx);
826 			ASSERT(buf->b_hdr != NULL);
827 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
828 			hdr->b_buf = buf->b_next;
829 			buf->b_hdr = &arc_eviction_hdr;
830 			buf->b_next = arc_eviction_list;
831 			arc_eviction_list = buf;
832 			mutex_exit(&arc_eviction_mtx);
833 		} else {
834 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
835 		}
836 	}
837 
838 	ASSERT(!list_link_active(&hdr->b_arc_node));
839 	ASSERT3P(hdr->b_hash_next, ==, NULL);
840 	ASSERT3P(hdr->b_acb, ==, NULL);
841 	kmem_cache_free(hdr_cache, hdr);
842 }
843 
844 void
845 arc_buf_free(arc_buf_t *buf, void *tag)
846 {
847 	arc_buf_hdr_t *hdr = buf->b_hdr;
848 	int hashed = hdr->b_state != arc.anon;
849 
850 	ASSERT(buf->b_efunc == NULL);
851 	ASSERT(buf->b_data != NULL);
852 
853 	if (hashed) {
854 		kmutex_t *hash_lock = HDR_LOCK(hdr);
855 
856 		mutex_enter(hash_lock);
857 		(void) remove_reference(hdr, hash_lock, tag);
858 		if (hdr->b_datacnt > 1)
859 			arc_buf_destroy(buf, FALSE, TRUE);
860 		else
861 			hdr->b_flags |= ARC_BUF_AVAILABLE;
862 		mutex_exit(hash_lock);
863 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
864 		int destroy_hdr;
865 		/*
866 		 * We are in the middle of an async write.  Don't destroy
867 		 * this buffer unless the write completes before we finish
868 		 * decrementing the reference count.
869 		 */
870 		mutex_enter(&arc_eviction_mtx);
871 		(void) remove_reference(hdr, NULL, tag);
872 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
873 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
874 		mutex_exit(&arc_eviction_mtx);
875 		if (destroy_hdr)
876 			arc_hdr_destroy(hdr);
877 	} else {
878 		if (remove_reference(hdr, NULL, tag) > 0) {
879 			ASSERT(HDR_IO_ERROR(hdr));
880 			arc_buf_destroy(buf, FALSE, TRUE);
881 		} else {
882 			arc_hdr_destroy(hdr);
883 		}
884 	}
885 }
886 
887 int
888 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
889 {
890 	arc_buf_hdr_t *hdr = buf->b_hdr;
891 	kmutex_t *hash_lock = HDR_LOCK(hdr);
892 	int no_callback = (buf->b_efunc == NULL);
893 
894 	if (hdr->b_state == arc.anon) {
895 		arc_buf_free(buf, tag);
896 		return (no_callback);
897 	}
898 
899 	mutex_enter(hash_lock);
900 	ASSERT(hdr->b_state != arc.anon);
901 	ASSERT(buf->b_data != NULL);
902 
903 	(void) remove_reference(hdr, hash_lock, tag);
904 	if (hdr->b_datacnt > 1) {
905 		if (no_callback)
906 			arc_buf_destroy(buf, FALSE, TRUE);
907 	} else if (no_callback) {
908 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
909 		hdr->b_flags |= ARC_BUF_AVAILABLE;
910 	}
911 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
912 	    refcount_is_zero(&hdr->b_refcnt));
913 	mutex_exit(hash_lock);
914 	return (no_callback);
915 }
916 
917 int
918 arc_buf_size(arc_buf_t *buf)
919 {
920 	return (buf->b_hdr->b_size);
921 }
922 
923 /*
924  * Evict buffers from list until we've removed the specified number of
925  * bytes.  Move the removed buffers to the appropriate evict state.
926  * If the recycle flag is set, then attempt to "recycle" a buffer:
927  * - look for a buffer to evict that is `bytes' long.
928  * - return the data block from this buffer rather than freeing it.
929  * This flag is used by callers that are trying to make space for a
930  * new buffer in a full arc cache.
931  */
932 static void *
933 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle)
934 {
935 	arc_state_t *evicted_state;
936 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
937 	arc_buf_hdr_t *ab, *ab_prev;
938 	kmutex_t *hash_lock;
939 	boolean_t have_lock;
940 	void *steal = NULL;
941 
942 	ASSERT(state == arc.mru || state == arc.mfu);
943 
944 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
945 
946 	mutex_enter(&state->mtx);
947 	mutex_enter(&evicted_state->mtx);
948 
949 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
950 		ab_prev = list_prev(&state->list, ab);
951 		/* prefetch buffers have a minimum lifespan */
952 		if (HDR_IO_IN_PROGRESS(ab) ||
953 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
954 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
955 			skipped++;
956 			continue;
957 		}
958 		if (recycle && (ab->b_size != bytes || ab->b_datacnt > 1))
959 			continue;
960 		hash_lock = HDR_LOCK(ab);
961 		have_lock = MUTEX_HELD(hash_lock);
962 		if (have_lock || mutex_tryenter(hash_lock)) {
963 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
964 			ASSERT(ab->b_datacnt > 0);
965 			while (ab->b_buf) {
966 				arc_buf_t *buf = ab->b_buf;
967 				if (buf->b_data) {
968 					bytes_evicted += ab->b_size;
969 					if (recycle)
970 						steal = buf->b_data;
971 				}
972 				if (buf->b_efunc) {
973 					mutex_enter(&arc_eviction_mtx);
974 					arc_buf_destroy(buf, recycle, FALSE);
975 					ab->b_buf = buf->b_next;
976 					buf->b_hdr = &arc_eviction_hdr;
977 					buf->b_next = arc_eviction_list;
978 					arc_eviction_list = buf;
979 					mutex_exit(&arc_eviction_mtx);
980 				} else {
981 					arc_buf_destroy(buf, recycle, TRUE);
982 				}
983 			}
984 			ASSERT(ab->b_datacnt == 0);
985 			arc_change_state(evicted_state, ab, hash_lock);
986 			ASSERT(HDR_IN_HASH_TABLE(ab));
987 			ab->b_flags = ARC_IN_HASH_TABLE;
988 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
989 			if (!have_lock)
990 				mutex_exit(hash_lock);
991 			if (bytes >= 0 && bytes_evicted >= bytes)
992 				break;
993 		} else {
994 			missed += 1;
995 		}
996 	}
997 	mutex_exit(&evicted_state->mtx);
998 	mutex_exit(&state->mtx);
999 
1000 	if (bytes_evicted < bytes)
1001 		dprintf("only evicted %lld bytes from %x",
1002 		    (longlong_t)bytes_evicted, state);
1003 
1004 	if (skipped)
1005 		atomic_add_64(&arc.evict_skip, skipped);
1006 	if (missed)
1007 		atomic_add_64(&arc.mutex_miss, missed);
1008 	return (steal);
1009 }
1010 
1011 /*
1012  * Remove buffers from list until we've removed the specified number of
1013  * bytes.  Destroy the buffers that are removed.
1014  */
1015 static void
1016 arc_evict_ghost(arc_state_t *state, int64_t bytes)
1017 {
1018 	arc_buf_hdr_t *ab, *ab_prev;
1019 	kmutex_t *hash_lock;
1020 	uint64_t bytes_deleted = 0;
1021 	uint_t bufs_skipped = 0;
1022 
1023 	ASSERT(GHOST_STATE(state));
1024 top:
1025 	mutex_enter(&state->mtx);
1026 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1027 		ab_prev = list_prev(&state->list, ab);
1028 		hash_lock = HDR_LOCK(ab);
1029 		if (mutex_tryenter(hash_lock)) {
1030 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
1031 			ASSERT(ab->b_buf == NULL);
1032 			arc_change_state(arc.anon, ab, hash_lock);
1033 			mutex_exit(hash_lock);
1034 			atomic_add_64(&arc.deleted, 1);
1035 			bytes_deleted += ab->b_size;
1036 			arc_hdr_destroy(ab);
1037 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1038 			if (bytes >= 0 && bytes_deleted >= bytes)
1039 				break;
1040 		} else {
1041 			if (bytes < 0) {
1042 				mutex_exit(&state->mtx);
1043 				mutex_enter(hash_lock);
1044 				mutex_exit(hash_lock);
1045 				goto top;
1046 			}
1047 			bufs_skipped += 1;
1048 		}
1049 	}
1050 	mutex_exit(&state->mtx);
1051 
1052 	if (bufs_skipped) {
1053 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1054 		ASSERT(bytes >= 0);
1055 	}
1056 
1057 	if (bytes_deleted < bytes)
1058 		dprintf("only deleted %lld bytes from %p",
1059 		    (longlong_t)bytes_deleted, state);
1060 }
1061 
1062 static void
1063 arc_adjust(void)
1064 {
1065 	int64_t top_sz, mru_over, arc_over;
1066 
1067 	top_sz = arc.anon->size + arc.mru->size;
1068 
1069 	if (top_sz > arc.p && arc.mru->lsize > 0) {
1070 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
1071 		(void) arc_evict(arc.mru, toevict, FALSE);
1072 		top_sz = arc.anon->size + arc.mru->size;
1073 	}
1074 
1075 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1076 
1077 	if (mru_over > 0) {
1078 		if (arc.mru_ghost->lsize > 0) {
1079 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
1080 			arc_evict_ghost(arc.mru_ghost, todelete);
1081 		}
1082 	}
1083 
1084 	if ((arc_over = arc.size - arc.c) > 0) {
1085 		int64_t tbl_over;
1086 
1087 		if (arc.mfu->lsize > 0) {
1088 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
1089 			(void) arc_evict(arc.mfu, toevict, FALSE);
1090 		}
1091 
1092 		tbl_over = arc.size + arc.mru_ghost->lsize +
1093 		    arc.mfu_ghost->lsize - arc.c*2;
1094 
1095 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
1096 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
1097 			arc_evict_ghost(arc.mfu_ghost, todelete);
1098 		}
1099 	}
1100 }
1101 
1102 static void
1103 arc_do_user_evicts(void)
1104 {
1105 	mutex_enter(&arc_eviction_mtx);
1106 	while (arc_eviction_list != NULL) {
1107 		arc_buf_t *buf = arc_eviction_list;
1108 		arc_eviction_list = buf->b_next;
1109 		buf->b_hdr = NULL;
1110 		mutex_exit(&arc_eviction_mtx);
1111 
1112 		if (buf->b_efunc != NULL)
1113 			VERIFY(buf->b_efunc(buf) == 0);
1114 
1115 		buf->b_efunc = NULL;
1116 		buf->b_private = NULL;
1117 		kmem_cache_free(buf_cache, buf);
1118 		mutex_enter(&arc_eviction_mtx);
1119 	}
1120 	mutex_exit(&arc_eviction_mtx);
1121 }
1122 
1123 /*
1124  * Flush all *evictable* data from the cache.
1125  * NOTE: this will not touch "active" (i.e. referenced) data.
1126  */
1127 void
1128 arc_flush(void)
1129 {
1130 	while (list_head(&arc.mru->list))
1131 		(void) arc_evict(arc.mru, -1, FALSE);
1132 	while (list_head(&arc.mfu->list))
1133 		(void) arc_evict(arc.mfu, -1, FALSE);
1134 
1135 	arc_evict_ghost(arc.mru_ghost, -1);
1136 	arc_evict_ghost(arc.mfu_ghost, -1);
1137 
1138 	mutex_enter(&arc_reclaim_thr_lock);
1139 	arc_do_user_evicts();
1140 	mutex_exit(&arc_reclaim_thr_lock);
1141 	ASSERT(arc_eviction_list == NULL);
1142 }
1143 
1144 int arc_kmem_reclaim_shift = 5;		/* log2(fraction of arc to reclaim) */
1145 
1146 void
1147 arc_kmem_reclaim(void)
1148 {
1149 	uint64_t to_free;
1150 
1151 	/*
1152 	 * We need arc_reclaim_lock because we don't want multiple
1153 	 * threads trying to reclaim concurrently.
1154 	 */
1155 
1156 	/*
1157 	 * umem calls the reclaim func when we destroy the buf cache,
1158 	 * which is after we do arc_fini().  So we set a flag to prevent
1159 	 * accessing the destroyed mutexes and lists.
1160 	 */
1161 	if (arc_dead)
1162 		return;
1163 
1164 	if (arc.c <= arc.c_min)
1165 		return;
1166 
1167 	mutex_enter(&arc_reclaim_lock);
1168 
1169 #ifdef _KERNEL
1170 	to_free = MAX(arc.c >> arc_kmem_reclaim_shift, ptob(needfree));
1171 #else
1172 	to_free = arc.c >> arc_kmem_reclaim_shift;
1173 #endif
1174 	if (arc.c > to_free)
1175 		atomic_add_64(&arc.c, -to_free);
1176 	else
1177 		arc.c = arc.c_min;
1178 
1179 	atomic_add_64(&arc.p, -(arc.p >> arc_kmem_reclaim_shift));
1180 	if (arc.c > arc.size)
1181 		arc.c = arc.size;
1182 	if (arc.c < arc.c_min)
1183 		arc.c = arc.c_min;
1184 	if (arc.p > arc.c)
1185 		arc.p = (arc.c >> 1);
1186 	ASSERT((int64_t)arc.p >= 0);
1187 
1188 	arc_adjust();
1189 
1190 	mutex_exit(&arc_reclaim_lock);
1191 }
1192 
1193 static int
1194 arc_reclaim_needed(void)
1195 {
1196 	uint64_t extra;
1197 
1198 #ifdef _KERNEL
1199 
1200 	if (needfree)
1201 		return (1);
1202 
1203 	/*
1204 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1205 	 */
1206 	extra = desfree;
1207 
1208 	/*
1209 	 * check that we're out of range of the pageout scanner.  It starts to
1210 	 * schedule paging if freemem is less than lotsfree and needfree.
1211 	 * lotsfree is the high-water mark for pageout, and needfree is the
1212 	 * number of needed free pages.  We add extra pages here to make sure
1213 	 * the scanner doesn't start up while we're freeing memory.
1214 	 */
1215 	if (freemem < lotsfree + needfree + extra)
1216 		return (1);
1217 
1218 	/*
1219 	 * check to make sure that swapfs has enough space so that anon
1220 	 * reservations can still succeeed. anon_resvmem() checks that the
1221 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1222 	 * swap pages.  We also add a bit of extra here just to prevent
1223 	 * circumstances from getting really dire.
1224 	 */
1225 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1226 		return (1);
1227 
1228 #if defined(__i386)
1229 	/*
1230 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1231 	 * kernel heap space before we ever run out of available physical
1232 	 * memory.  Most checks of the size of the heap_area compare against
1233 	 * tune.t_minarmem, which is the minimum available real memory that we
1234 	 * can have in the system.  However, this is generally fixed at 25 pages
1235 	 * which is so low that it's useless.  In this comparison, we seek to
1236 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1237 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1238 	 * free)
1239 	 */
1240 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1241 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1242 		return (1);
1243 #endif
1244 
1245 #else
1246 	if (spa_get_random(100) == 0)
1247 		return (1);
1248 #endif
1249 	return (0);
1250 }
1251 
1252 static void
1253 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1254 {
1255 	size_t			i;
1256 	kmem_cache_t		*prev_cache = NULL;
1257 	extern kmem_cache_t	*zio_buf_cache[];
1258 
1259 #ifdef _KERNEL
1260 	/*
1261 	 * First purge some DNLC entries, in case the DNLC is using
1262 	 * up too much memory.
1263 	 */
1264 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1265 
1266 #if defined(__i386)
1267 	/*
1268 	 * Reclaim unused memory from all kmem caches.
1269 	 */
1270 	kmem_reap();
1271 #endif
1272 #endif
1273 
1274 	/*
1275 	 * An agressive reclamation will shrink the cache size as well as
1276 	 * reap free buffers from the arc kmem caches.
1277 	 */
1278 	if (strat == ARC_RECLAIM_AGGR)
1279 		arc_kmem_reclaim();
1280 
1281 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1282 		if (zio_buf_cache[i] != prev_cache) {
1283 			prev_cache = zio_buf_cache[i];
1284 			kmem_cache_reap_now(zio_buf_cache[i]);
1285 		}
1286 	}
1287 	kmem_cache_reap_now(buf_cache);
1288 	kmem_cache_reap_now(hdr_cache);
1289 }
1290 
1291 static void
1292 arc_reclaim_thread(void)
1293 {
1294 	clock_t			growtime = 0;
1295 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1296 	callb_cpr_t		cpr;
1297 
1298 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1299 
1300 	mutex_enter(&arc_reclaim_thr_lock);
1301 	while (arc_thread_exit == 0) {
1302 		if (arc_reclaim_needed()) {
1303 
1304 			if (arc.no_grow) {
1305 				if (last_reclaim == ARC_RECLAIM_CONS) {
1306 					last_reclaim = ARC_RECLAIM_AGGR;
1307 				} else {
1308 					last_reclaim = ARC_RECLAIM_CONS;
1309 				}
1310 			} else {
1311 				arc.no_grow = TRUE;
1312 				last_reclaim = ARC_RECLAIM_AGGR;
1313 				membar_producer();
1314 			}
1315 
1316 			/* reset the growth delay for every reclaim */
1317 			growtime = lbolt + (arc_grow_retry * hz);
1318 			ASSERT(growtime > 0);
1319 
1320 			arc_kmem_reap_now(last_reclaim);
1321 
1322 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1323 			arc.no_grow = FALSE;
1324 		}
1325 
1326 		if (arc_eviction_list != NULL)
1327 			arc_do_user_evicts();
1328 
1329 		/* block until needed, or one second, whichever is shorter */
1330 		CALLB_CPR_SAFE_BEGIN(&cpr);
1331 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1332 		    &arc_reclaim_thr_lock, (lbolt + hz));
1333 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1334 	}
1335 
1336 	arc_thread_exit = 0;
1337 	cv_broadcast(&arc_reclaim_thr_cv);
1338 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1339 	thread_exit();
1340 }
1341 
1342 /*
1343  * Adapt arc info given the number of bytes we are trying to add and
1344  * the state that we are comming from.  This function is only called
1345  * when we are adding new content to the cache.
1346  */
1347 static void
1348 arc_adapt(int bytes, arc_state_t *state)
1349 {
1350 	int mult;
1351 
1352 	ASSERT(bytes > 0);
1353 	/*
1354 	 * Adapt the target size of the MRU list:
1355 	 *	- if we just hit in the MRU ghost list, then increase
1356 	 *	  the target size of the MRU list.
1357 	 *	- if we just hit in the MFU ghost list, then increase
1358 	 *	  the target size of the MFU list by decreasing the
1359 	 *	  target size of the MRU list.
1360 	 */
1361 	if (state == arc.mru_ghost) {
1362 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
1363 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
1364 
1365 		arc.p = MIN(arc.c, arc.p + bytes * mult);
1366 	} else if (state == arc.mfu_ghost) {
1367 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
1368 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
1369 
1370 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
1371 	}
1372 	ASSERT((int64_t)arc.p >= 0);
1373 
1374 	if (arc_reclaim_needed()) {
1375 		cv_signal(&arc_reclaim_thr_cv);
1376 		return;
1377 	}
1378 
1379 	if (arc.no_grow)
1380 		return;
1381 
1382 	if (arc.c >= arc.c_max)
1383 		return;
1384 
1385 	/*
1386 	 * If we're within (2 * maxblocksize) bytes of the target
1387 	 * cache size, increment the target cache size
1388 	 */
1389 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1390 		atomic_add_64(&arc.c, (int64_t)bytes);
1391 		if (arc.c > arc.c_max)
1392 			arc.c = arc.c_max;
1393 		else if (state == arc.anon)
1394 			atomic_add_64(&arc.p, (int64_t)bytes);
1395 		if (arc.p > arc.c)
1396 			arc.p = arc.c;
1397 	}
1398 	ASSERT((int64_t)arc.p >= 0);
1399 }
1400 
1401 /*
1402  * Check if the cache has reached its limits and eviction is required
1403  * prior to insert.
1404  */
1405 static int
1406 arc_evict_needed()
1407 {
1408 	if (arc_reclaim_needed())
1409 		return (1);
1410 
1411 	return (arc.size > arc.c);
1412 }
1413 
1414 /*
1415  * The buffer, supplied as the first argument, needs a data block.
1416  * So, if we are at cache max, determine which cache should be victimized.
1417  * We have the following cases:
1418  *
1419  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1420  * In this situation if we're out of space, but the resident size of the MFU is
1421  * under the limit, victimize the MFU cache to satisfy this insertion request.
1422  *
1423  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1424  * Here, we've used up all of the available space for the MRU, so we need to
1425  * evict from our own cache instead.  Evict from the set of resident MRU
1426  * entries.
1427  *
1428  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1429  * c minus p represents the MFU space in the cache, since p is the size of the
1430  * cache that is dedicated to the MRU.  In this situation there's still space on
1431  * the MFU side, so the MRU side needs to be victimized.
1432  *
1433  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1434  * MFU's resident set is consuming more space than it has been allotted.  In
1435  * this situation, we must victimize our own cache, the MFU, for this insertion.
1436  */
1437 static void
1438 arc_get_data_buf(arc_buf_t *buf)
1439 {
1440 	arc_state_t	*state = buf->b_hdr->b_state;
1441 	uint64_t	size = buf->b_hdr->b_size;
1442 
1443 	arc_adapt(size, state);
1444 
1445 	/*
1446 	 * We have not yet reached cache maximum size,
1447 	 * just allocate a new buffer.
1448 	 */
1449 	if (!arc_evict_needed()) {
1450 		buf->b_data = zio_buf_alloc(size);
1451 		atomic_add_64(&arc.size, size);
1452 		goto out;
1453 	}
1454 
1455 	/*
1456 	 * If we are prefetching from the mfu ghost list, this buffer
1457 	 * will end up on the mru list; so steal space from there.
1458 	 */
1459 	if (state == arc.mfu_ghost)
1460 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
1461 	else if (state == arc.mru_ghost)
1462 		state = arc.mru;
1463 
1464 	if (state == arc.mru || state == arc.anon) {
1465 		uint64_t mru_used = arc.anon->size + arc.mru->size;
1466 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1467 	} else {
1468 		/* MFU cases */
1469 		uint64_t mfu_space = arc.c - arc.p;
1470 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
1471 	}
1472 	if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) {
1473 		(void) arc_evict(state, size, FALSE);
1474 		buf->b_data = zio_buf_alloc(size);
1475 		atomic_add_64(&arc.size, size);
1476 		atomic_add_64(&arc.recycle_miss, 1);
1477 		if (arc.size > arc.c)
1478 			arc_adjust();
1479 	}
1480 	ASSERT(buf->b_data != NULL);
1481 out:
1482 	/*
1483 	 * Update the state size.  Note that ghost states have a
1484 	 * "ghost size" and so don't need to be updated.
1485 	 */
1486 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
1487 		arc_buf_hdr_t *hdr = buf->b_hdr;
1488 
1489 		atomic_add_64(&hdr->b_state->size, size);
1490 		if (list_link_active(&hdr->b_arc_node)) {
1491 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
1492 			atomic_add_64(&hdr->b_state->lsize, size);
1493 		}
1494 	}
1495 }
1496 
1497 /*
1498  * This routine is called whenever a buffer is accessed.
1499  * NOTE: the hash lock is dropped in this function.
1500  */
1501 static void
1502 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1503 {
1504 	ASSERT(MUTEX_HELD(hash_lock));
1505 
1506 	if (buf->b_state == arc.anon) {
1507 		/*
1508 		 * This buffer is not in the cache, and does not
1509 		 * appear in our "ghost" list.  Add the new buffer
1510 		 * to the MRU state.
1511 		 */
1512 
1513 		ASSERT(buf->b_arc_access == 0);
1514 		buf->b_arc_access = lbolt;
1515 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1516 		arc_change_state(arc.mru, buf, hash_lock);
1517 
1518 	} else if (buf->b_state == arc.mru) {
1519 		/*
1520 		 * If this buffer is here because of a prefetch, then either:
1521 		 * - clear the flag if this is a "referencing" read
1522 		 *   (any subsequent access will bump this into the MFU state).
1523 		 * or
1524 		 * - move the buffer to the head of the list if this is
1525 		 *   another prefetch (to make it less likely to be evicted).
1526 		 */
1527 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1528 			if (refcount_count(&buf->b_refcnt) == 0) {
1529 				ASSERT(list_link_active(&buf->b_arc_node));
1530 				mutex_enter(&arc.mru->mtx);
1531 				list_remove(&arc.mru->list, buf);
1532 				list_insert_head(&arc.mru->list, buf);
1533 				mutex_exit(&arc.mru->mtx);
1534 			} else {
1535 				buf->b_flags &= ~ARC_PREFETCH;
1536 				atomic_add_64(&arc.mru->hits, 1);
1537 			}
1538 			buf->b_arc_access = lbolt;
1539 			return;
1540 		}
1541 
1542 		/*
1543 		 * This buffer has been "accessed" only once so far,
1544 		 * but it is still in the cache. Move it to the MFU
1545 		 * state.
1546 		 */
1547 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1548 			/*
1549 			 * More than 125ms have passed since we
1550 			 * instantiated this buffer.  Move it to the
1551 			 * most frequently used state.
1552 			 */
1553 			buf->b_arc_access = lbolt;
1554 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1555 			arc_change_state(arc.mfu, buf, hash_lock);
1556 		}
1557 		atomic_add_64(&arc.mru->hits, 1);
1558 	} else if (buf->b_state == arc.mru_ghost) {
1559 		arc_state_t	*new_state;
1560 		/*
1561 		 * This buffer has been "accessed" recently, but
1562 		 * was evicted from the cache.  Move it to the
1563 		 * MFU state.
1564 		 */
1565 
1566 		if (buf->b_flags & ARC_PREFETCH) {
1567 			new_state = arc.mru;
1568 			if (refcount_count(&buf->b_refcnt) > 0)
1569 				buf->b_flags &= ~ARC_PREFETCH;
1570 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1571 		} else {
1572 			new_state = arc.mfu;
1573 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1574 		}
1575 
1576 		buf->b_arc_access = lbolt;
1577 		arc_change_state(new_state, buf, hash_lock);
1578 
1579 		atomic_add_64(&arc.mru_ghost->hits, 1);
1580 	} else if (buf->b_state == arc.mfu) {
1581 		/*
1582 		 * This buffer has been accessed more than once and is
1583 		 * still in the cache.  Keep it in the MFU state.
1584 		 *
1585 		 * NOTE: an add_reference() that occurred when we did
1586 		 * the arc_read() will have kicked this off the list.
1587 		 * If it was a prefetch, we will explicitly move it to
1588 		 * the head of the list now.
1589 		 */
1590 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1591 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
1592 			ASSERT(list_link_active(&buf->b_arc_node));
1593 			mutex_enter(&arc.mfu->mtx);
1594 			list_remove(&arc.mfu->list, buf);
1595 			list_insert_head(&arc.mfu->list, buf);
1596 			mutex_exit(&arc.mfu->mtx);
1597 		}
1598 		atomic_add_64(&arc.mfu->hits, 1);
1599 		buf->b_arc_access = lbolt;
1600 	} else if (buf->b_state == arc.mfu_ghost) {
1601 		arc_state_t	*new_state = arc.mfu;
1602 		/*
1603 		 * This buffer has been accessed more than once but has
1604 		 * been evicted from the cache.  Move it back to the
1605 		 * MFU state.
1606 		 */
1607 
1608 		if (buf->b_flags & ARC_PREFETCH) {
1609 			/*
1610 			 * This is a prefetch access...
1611 			 * move this block back to the MRU state.
1612 			 */
1613 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
1614 			new_state = arc.mru;
1615 		}
1616 
1617 		buf->b_arc_access = lbolt;
1618 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1619 		arc_change_state(new_state, buf, hash_lock);
1620 
1621 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1622 	} else {
1623 		ASSERT(!"invalid arc state");
1624 	}
1625 }
1626 
1627 /* a generic arc_done_func_t which you can use */
1628 /* ARGSUSED */
1629 void
1630 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1631 {
1632 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
1633 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1634 }
1635 
1636 /* a generic arc_done_func_t which you can use */
1637 void
1638 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1639 {
1640 	arc_buf_t **bufp = arg;
1641 	if (zio && zio->io_error) {
1642 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1643 		*bufp = NULL;
1644 	} else {
1645 		*bufp = buf;
1646 	}
1647 }
1648 
1649 static void
1650 arc_read_done(zio_t *zio)
1651 {
1652 	arc_buf_hdr_t	*hdr, *found;
1653 	arc_buf_t	*buf;
1654 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1655 	kmutex_t	*hash_lock;
1656 	arc_callback_t	*callback_list, *acb;
1657 	int		freeable = FALSE;
1658 
1659 	buf = zio->io_private;
1660 	hdr = buf->b_hdr;
1661 
1662 	/*
1663 	 * The hdr was inserted into hash-table and removed from lists
1664 	 * prior to starting I/O.  We should find this header, since
1665 	 * it's in the hash table, and it should be legit since it's
1666 	 * not possible to evict it during the I/O.  The only possible
1667 	 * reason for it not to be found is if we were freed during the
1668 	 * read.
1669 	 */
1670 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1671 		    &hash_lock);
1672 
1673 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
1674 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1675 
1676 	/* byteswap if necessary */
1677 	callback_list = hdr->b_acb;
1678 	ASSERT(callback_list != NULL);
1679 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1680 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1681 
1682 	/* create copies of the data buffer for the callers */
1683 	abuf = buf;
1684 	for (acb = callback_list; acb; acb = acb->acb_next) {
1685 		if (acb->acb_done) {
1686 			if (abuf == NULL)
1687 				abuf = arc_buf_clone(buf);
1688 			acb->acb_buf = abuf;
1689 			abuf = NULL;
1690 		}
1691 	}
1692 	hdr->b_acb = NULL;
1693 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
1694 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
1695 	if (abuf == buf)
1696 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1697 
1698 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1699 
1700 	if (zio->io_error != 0) {
1701 		hdr->b_flags |= ARC_IO_ERROR;
1702 		if (hdr->b_state != arc.anon)
1703 			arc_change_state(arc.anon, hdr, hash_lock);
1704 		if (HDR_IN_HASH_TABLE(hdr))
1705 			buf_hash_remove(hdr);
1706 		freeable = refcount_is_zero(&hdr->b_refcnt);
1707 		/* convert checksum errors into IO errors */
1708 		if (zio->io_error == ECKSUM)
1709 			zio->io_error = EIO;
1710 	}
1711 
1712 	/*
1713 	 * Broadcast before we drop the hash_lock to avoid the possibility
1714 	 * that the hdr (and hence the cv) might be freed before we get to
1715 	 * the cv_broadcast().
1716 	 */
1717 	cv_broadcast(&hdr->b_cv);
1718 
1719 	if (hash_lock) {
1720 		/*
1721 		 * Only call arc_access on anonymous buffers.  This is because
1722 		 * if we've issued an I/O for an evicted buffer, we've already
1723 		 * called arc_access (to prevent any simultaneous readers from
1724 		 * getting confused).
1725 		 */
1726 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
1727 			arc_access(hdr, hash_lock);
1728 		mutex_exit(hash_lock);
1729 	} else {
1730 		/*
1731 		 * This block was freed while we waited for the read to
1732 		 * complete.  It has been removed from the hash table and
1733 		 * moved to the anonymous state (so that it won't show up
1734 		 * in the cache).
1735 		 */
1736 		ASSERT3P(hdr->b_state, ==, arc.anon);
1737 		freeable = refcount_is_zero(&hdr->b_refcnt);
1738 	}
1739 
1740 	/* execute each callback and free its structure */
1741 	while ((acb = callback_list) != NULL) {
1742 		if (acb->acb_done)
1743 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1744 
1745 		if (acb->acb_zio_dummy != NULL) {
1746 			acb->acb_zio_dummy->io_error = zio->io_error;
1747 			zio_nowait(acb->acb_zio_dummy);
1748 		}
1749 
1750 		callback_list = acb->acb_next;
1751 		kmem_free(acb, sizeof (arc_callback_t));
1752 	}
1753 
1754 	if (freeable)
1755 		arc_hdr_destroy(hdr);
1756 }
1757 
1758 /*
1759  * "Read" the block block at the specified DVA (in bp) via the
1760  * cache.  If the block is found in the cache, invoke the provided
1761  * callback immediately and return.  Note that the `zio' parameter
1762  * in the callback will be NULL in this case, since no IO was
1763  * required.  If the block is not in the cache pass the read request
1764  * on to the spa with a substitute callback function, so that the
1765  * requested block will be added to the cache.
1766  *
1767  * If a read request arrives for a block that has a read in-progress,
1768  * either wait for the in-progress read to complete (and return the
1769  * results); or, if this is a read with a "done" func, add a record
1770  * to the read to invoke the "done" func when the read completes,
1771  * and return; or just return.
1772  *
1773  * arc_read_done() will invoke all the requested "done" functions
1774  * for readers of this block.
1775  */
1776 int
1777 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1778     arc_done_func_t *done, void *private, int priority, int flags,
1779     uint32_t *arc_flags, zbookmark_t *zb)
1780 {
1781 	arc_buf_hdr_t *hdr;
1782 	arc_buf_t *buf;
1783 	kmutex_t *hash_lock;
1784 	zio_t	*rzio;
1785 
1786 top:
1787 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
1788 	if (hdr && hdr->b_datacnt > 0) {
1789 
1790 		*arc_flags |= ARC_CACHED;
1791 
1792 		if (HDR_IO_IN_PROGRESS(hdr)) {
1793 
1794 			if (*arc_flags & ARC_WAIT) {
1795 				cv_wait(&hdr->b_cv, hash_lock);
1796 				mutex_exit(hash_lock);
1797 				goto top;
1798 			}
1799 			ASSERT(*arc_flags & ARC_NOWAIT);
1800 
1801 			if (done) {
1802 				arc_callback_t	*acb = NULL;
1803 
1804 				acb = kmem_zalloc(sizeof (arc_callback_t),
1805 				    KM_SLEEP);
1806 				acb->acb_done = done;
1807 				acb->acb_private = private;
1808 				acb->acb_byteswap = swap;
1809 				if (pio != NULL)
1810 					acb->acb_zio_dummy = zio_null(pio,
1811 					    spa, NULL, NULL, flags);
1812 
1813 				ASSERT(acb->acb_done != NULL);
1814 				acb->acb_next = hdr->b_acb;
1815 				hdr->b_acb = acb;
1816 				add_reference(hdr, hash_lock, private);
1817 				mutex_exit(hash_lock);
1818 				return (0);
1819 			}
1820 			mutex_exit(hash_lock);
1821 			return (0);
1822 		}
1823 
1824 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1825 
1826 		if (done) {
1827 			add_reference(hdr, hash_lock, private);
1828 			/*
1829 			 * If this block is already in use, create a new
1830 			 * copy of the data so that we will be guaranteed
1831 			 * that arc_release() will always succeed.
1832 			 */
1833 			buf = hdr->b_buf;
1834 			ASSERT(buf);
1835 			ASSERT(buf->b_data);
1836 			if (HDR_BUF_AVAILABLE(hdr)) {
1837 				ASSERT(buf->b_efunc == NULL);
1838 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
1839 			} else {
1840 				buf = arc_buf_clone(buf);
1841 			}
1842 		} else if (*arc_flags & ARC_PREFETCH &&
1843 		    refcount_count(&hdr->b_refcnt) == 0) {
1844 			hdr->b_flags |= ARC_PREFETCH;
1845 		}
1846 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1847 		arc_access(hdr, hash_lock);
1848 		mutex_exit(hash_lock);
1849 		atomic_add_64(&arc.hits, 1);
1850 		if (done)
1851 			done(NULL, buf, private);
1852 	} else {
1853 		uint64_t size = BP_GET_LSIZE(bp);
1854 		arc_callback_t	*acb;
1855 
1856 		if (hdr == NULL) {
1857 			/* this block is not in the cache */
1858 			arc_buf_hdr_t	*exists;
1859 
1860 			buf = arc_buf_alloc(spa, size, private);
1861 			hdr = buf->b_hdr;
1862 			hdr->b_dva = *BP_IDENTITY(bp);
1863 			hdr->b_birth = bp->blk_birth;
1864 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1865 			exists = buf_hash_insert(hdr, &hash_lock);
1866 			if (exists) {
1867 				/* somebody beat us to the hash insert */
1868 				mutex_exit(hash_lock);
1869 				bzero(&hdr->b_dva, sizeof (dva_t));
1870 				hdr->b_birth = 0;
1871 				hdr->b_cksum0 = 0;
1872 				(void) arc_buf_remove_ref(buf, private);
1873 				goto top; /* restart the IO request */
1874 			}
1875 			/* if this is a prefetch, we don't have a reference */
1876 			if (*arc_flags & ARC_PREFETCH) {
1877 				(void) remove_reference(hdr, hash_lock,
1878 				    private);
1879 				hdr->b_flags |= ARC_PREFETCH;
1880 			}
1881 			if (BP_GET_LEVEL(bp) > 0)
1882 				hdr->b_flags |= ARC_INDIRECT;
1883 		} else {
1884 			/* this block is in the ghost cache */
1885 			ASSERT(GHOST_STATE(hdr->b_state));
1886 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1887 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
1888 			ASSERT(hdr->b_buf == NULL);
1889 
1890 			/* if this is a prefetch, we don't have a reference */
1891 			if (*arc_flags & ARC_PREFETCH)
1892 				hdr->b_flags |= ARC_PREFETCH;
1893 			else
1894 				add_reference(hdr, hash_lock, private);
1895 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1896 			buf->b_hdr = hdr;
1897 			buf->b_data = NULL;
1898 			buf->b_efunc = NULL;
1899 			buf->b_private = NULL;
1900 			buf->b_next = NULL;
1901 			hdr->b_buf = buf;
1902 			arc_get_data_buf(buf);
1903 			ASSERT(hdr->b_datacnt == 0);
1904 			hdr->b_datacnt = 1;
1905 
1906 		}
1907 
1908 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1909 		acb->acb_done = done;
1910 		acb->acb_private = private;
1911 		acb->acb_byteswap = swap;
1912 
1913 		ASSERT(hdr->b_acb == NULL);
1914 		hdr->b_acb = acb;
1915 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1916 
1917 		/*
1918 		 * If the buffer has been evicted, migrate it to a present state
1919 		 * before issuing the I/O.  Once we drop the hash-table lock,
1920 		 * the header will be marked as I/O in progress and have an
1921 		 * attached buffer.  At this point, anybody who finds this
1922 		 * buffer ought to notice that it's legit but has a pending I/O.
1923 		 */
1924 
1925 		if (GHOST_STATE(hdr->b_state))
1926 			arc_access(hdr, hash_lock);
1927 		mutex_exit(hash_lock);
1928 
1929 		ASSERT3U(hdr->b_size, ==, size);
1930 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
1931 		    zbookmark_t *, zb);
1932 		atomic_add_64(&arc.misses, 1);
1933 
1934 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
1935 		    arc_read_done, buf, priority, flags, zb);
1936 
1937 		if (*arc_flags & ARC_WAIT)
1938 			return (zio_wait(rzio));
1939 
1940 		ASSERT(*arc_flags & ARC_NOWAIT);
1941 		zio_nowait(rzio);
1942 	}
1943 	return (0);
1944 }
1945 
1946 /*
1947  * arc_read() variant to support pool traversal.  If the block is already
1948  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1949  * The idea is that we don't want pool traversal filling up memory, but
1950  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1951  */
1952 int
1953 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1954 {
1955 	arc_buf_hdr_t *hdr;
1956 	kmutex_t *hash_mtx;
1957 	int rc = 0;
1958 
1959 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1960 
1961 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
1962 		arc_buf_t *buf = hdr->b_buf;
1963 
1964 		ASSERT(buf);
1965 		while (buf->b_data == NULL) {
1966 			buf = buf->b_next;
1967 			ASSERT(buf);
1968 		}
1969 		bcopy(buf->b_data, data, hdr->b_size);
1970 	} else {
1971 		rc = ENOENT;
1972 	}
1973 
1974 	if (hash_mtx)
1975 		mutex_exit(hash_mtx);
1976 
1977 	return (rc);
1978 }
1979 
1980 void
1981 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
1982 {
1983 	ASSERT(buf->b_hdr != NULL);
1984 	ASSERT(buf->b_hdr->b_state != arc.anon);
1985 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
1986 	buf->b_efunc = func;
1987 	buf->b_private = private;
1988 }
1989 
1990 /*
1991  * This is used by the DMU to let the ARC know that a buffer is
1992  * being evicted, so the ARC should clean up.  If this arc buf
1993  * is not yet in the evicted state, it will be put there.
1994  */
1995 int
1996 arc_buf_evict(arc_buf_t *buf)
1997 {
1998 	arc_buf_hdr_t *hdr;
1999 	kmutex_t *hash_lock;
2000 	arc_buf_t **bufp;
2001 
2002 	mutex_enter(&arc_eviction_mtx);
2003 	hdr = buf->b_hdr;
2004 	if (hdr == NULL) {
2005 		/*
2006 		 * We are in arc_do_user_evicts().
2007 		 */
2008 		ASSERT(buf->b_data == NULL);
2009 		mutex_exit(&arc_eviction_mtx);
2010 		return (0);
2011 	}
2012 	hash_lock = HDR_LOCK(hdr);
2013 	mutex_exit(&arc_eviction_mtx);
2014 
2015 	mutex_enter(hash_lock);
2016 
2017 	if (buf->b_data == NULL) {
2018 		/*
2019 		 * We are on the eviction list.
2020 		 */
2021 		mutex_exit(hash_lock);
2022 		mutex_enter(&arc_eviction_mtx);
2023 		if (buf->b_hdr == NULL) {
2024 			/*
2025 			 * We are already in arc_do_user_evicts().
2026 			 */
2027 			mutex_exit(&arc_eviction_mtx);
2028 			return (0);
2029 		} else {
2030 			arc_buf_t copy = *buf; /* structure assignment */
2031 			/*
2032 			 * Process this buffer now
2033 			 * but let arc_do_user_evicts() do the reaping.
2034 			 */
2035 			buf->b_efunc = NULL;
2036 			mutex_exit(&arc_eviction_mtx);
2037 			VERIFY(copy.b_efunc(&copy) == 0);
2038 			return (1);
2039 		}
2040 	}
2041 
2042 	ASSERT(buf->b_hdr == hdr);
2043 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2044 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
2045 
2046 	/*
2047 	 * Pull this buffer off of the hdr
2048 	 */
2049 	bufp = &hdr->b_buf;
2050 	while (*bufp != buf)
2051 		bufp = &(*bufp)->b_next;
2052 	*bufp = buf->b_next;
2053 
2054 	ASSERT(buf->b_data != NULL);
2055 	arc_buf_destroy(buf, FALSE, FALSE);
2056 
2057 	if (hdr->b_datacnt == 0) {
2058 		arc_state_t *old_state = hdr->b_state;
2059 		arc_state_t *evicted_state;
2060 
2061 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
2062 
2063 		evicted_state =
2064 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
2065 
2066 		mutex_enter(&old_state->mtx);
2067 		mutex_enter(&evicted_state->mtx);
2068 
2069 		arc_change_state(evicted_state, hdr, hash_lock);
2070 		ASSERT(HDR_IN_HASH_TABLE(hdr));
2071 		hdr->b_flags = ARC_IN_HASH_TABLE;
2072 
2073 		mutex_exit(&evicted_state->mtx);
2074 		mutex_exit(&old_state->mtx);
2075 	}
2076 	mutex_exit(hash_lock);
2077 
2078 	VERIFY(buf->b_efunc(buf) == 0);
2079 	buf->b_efunc = NULL;
2080 	buf->b_private = NULL;
2081 	buf->b_hdr = NULL;
2082 	kmem_cache_free(buf_cache, buf);
2083 	return (1);
2084 }
2085 
2086 /*
2087  * Release this buffer from the cache.  This must be done
2088  * after a read and prior to modifying the buffer contents.
2089  * If the buffer has more than one reference, we must make
2090  * make a new hdr for the buffer.
2091  */
2092 void
2093 arc_release(arc_buf_t *buf, void *tag)
2094 {
2095 	arc_buf_hdr_t *hdr = buf->b_hdr;
2096 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2097 
2098 	/* this buffer is not on any list */
2099 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2100 
2101 	if (hdr->b_state == arc.anon) {
2102 		/* this buffer is already released */
2103 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2104 		ASSERT(BUF_EMPTY(hdr));
2105 		ASSERT(buf->b_efunc == NULL);
2106 		return;
2107 	}
2108 
2109 	mutex_enter(hash_lock);
2110 
2111 	/*
2112 	 * Do we have more than one buf?
2113 	 */
2114 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2115 		arc_buf_hdr_t *nhdr;
2116 		arc_buf_t **bufp;
2117 		uint64_t blksz = hdr->b_size;
2118 		spa_t *spa = hdr->b_spa;
2119 
2120 		ASSERT(hdr->b_datacnt > 1);
2121 		/*
2122 		 * Pull the data off of this buf and attach it to
2123 		 * a new anonymous buf.
2124 		 */
2125 		(void) remove_reference(hdr, hash_lock, tag);
2126 		bufp = &hdr->b_buf;
2127 		while (*bufp != buf)
2128 			bufp = &(*bufp)->b_next;
2129 		*bufp = (*bufp)->b_next;
2130 
2131 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2132 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
2133 		if (refcount_is_zero(&hdr->b_refcnt)) {
2134 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
2135 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
2136 		}
2137 		hdr->b_datacnt -= 1;
2138 
2139 		mutex_exit(hash_lock);
2140 
2141 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2142 		nhdr->b_size = blksz;
2143 		nhdr->b_spa = spa;
2144 		nhdr->b_buf = buf;
2145 		nhdr->b_state = arc.anon;
2146 		nhdr->b_arc_access = 0;
2147 		nhdr->b_flags = 0;
2148 		nhdr->b_datacnt = 1;
2149 		buf->b_hdr = nhdr;
2150 		buf->b_next = NULL;
2151 		(void) refcount_add(&nhdr->b_refcnt, tag);
2152 		atomic_add_64(&arc.anon->size, blksz);
2153 
2154 		hdr = nhdr;
2155 	} else {
2156 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2157 		ASSERT(!list_link_active(&hdr->b_arc_node));
2158 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2159 		arc_change_state(arc.anon, hdr, hash_lock);
2160 		hdr->b_arc_access = 0;
2161 		mutex_exit(hash_lock);
2162 		bzero(&hdr->b_dva, sizeof (dva_t));
2163 		hdr->b_birth = 0;
2164 		hdr->b_cksum0 = 0;
2165 	}
2166 	buf->b_efunc = NULL;
2167 	buf->b_private = NULL;
2168 }
2169 
2170 int
2171 arc_released(arc_buf_t *buf)
2172 {
2173 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
2174 }
2175 
2176 int
2177 arc_has_callback(arc_buf_t *buf)
2178 {
2179 	return (buf->b_efunc != NULL);
2180 }
2181 
2182 #ifdef ZFS_DEBUG
2183 int
2184 arc_referenced(arc_buf_t *buf)
2185 {
2186 	return (refcount_count(&buf->b_hdr->b_refcnt));
2187 }
2188 #endif
2189 
2190 static void
2191 arc_write_done(zio_t *zio)
2192 {
2193 	arc_buf_t *buf;
2194 	arc_buf_hdr_t *hdr;
2195 	arc_callback_t *acb;
2196 
2197 	buf = zio->io_private;
2198 	hdr = buf->b_hdr;
2199 	acb = hdr->b_acb;
2200 	hdr->b_acb = NULL;
2201 	ASSERT(acb != NULL);
2202 
2203 	/* this buffer is on no lists and is not in the hash table */
2204 	ASSERT3P(hdr->b_state, ==, arc.anon);
2205 
2206 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2207 	hdr->b_birth = zio->io_bp->blk_birth;
2208 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
2209 	/*
2210 	 * If the block to be written was all-zero, we may have
2211 	 * compressed it away.  In this case no write was performed
2212 	 * so there will be no dva/birth-date/checksum.  The buffer
2213 	 * must therefor remain anonymous (and uncached).
2214 	 */
2215 	if (!BUF_EMPTY(hdr)) {
2216 		arc_buf_hdr_t *exists;
2217 		kmutex_t *hash_lock;
2218 
2219 		exists = buf_hash_insert(hdr, &hash_lock);
2220 		if (exists) {
2221 			/*
2222 			 * This can only happen if we overwrite for
2223 			 * sync-to-convergence, because we remove
2224 			 * buffers from the hash table when we arc_free().
2225 			 */
2226 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2227 			    BP_IDENTITY(zio->io_bp)));
2228 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2229 			    zio->io_bp->blk_birth);
2230 
2231 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2232 			arc_change_state(arc.anon, exists, hash_lock);
2233 			mutex_exit(hash_lock);
2234 			arc_hdr_destroy(exists);
2235 			exists = buf_hash_insert(hdr, &hash_lock);
2236 			ASSERT3P(exists, ==, NULL);
2237 		}
2238 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2239 		arc_access(hdr, hash_lock);
2240 		mutex_exit(hash_lock);
2241 	} else if (acb->acb_done == NULL) {
2242 		int destroy_hdr;
2243 		/*
2244 		 * This is an anonymous buffer with no user callback,
2245 		 * destroy it if there are no active references.
2246 		 */
2247 		mutex_enter(&arc_eviction_mtx);
2248 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
2249 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2250 		mutex_exit(&arc_eviction_mtx);
2251 		if (destroy_hdr)
2252 			arc_hdr_destroy(hdr);
2253 	} else {
2254 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2255 	}
2256 
2257 	if (acb->acb_done) {
2258 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2259 		acb->acb_done(zio, buf, acb->acb_private);
2260 	}
2261 
2262 	kmem_free(acb, sizeof (arc_callback_t));
2263 }
2264 
2265 int
2266 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2267     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2268     arc_done_func_t *done, void *private, int priority, int flags,
2269     uint32_t arc_flags, zbookmark_t *zb)
2270 {
2271 	arc_buf_hdr_t *hdr = buf->b_hdr;
2272 	arc_callback_t	*acb;
2273 	zio_t	*rzio;
2274 
2275 	/* this is a private buffer - no locking required */
2276 	ASSERT3P(hdr->b_state, ==, arc.anon);
2277 	ASSERT(BUF_EMPTY(hdr));
2278 	ASSERT(!HDR_IO_ERROR(hdr));
2279 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
2280 	ASSERT(hdr->b_acb == 0);
2281 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2282 	acb->acb_done = done;
2283 	acb->acb_private = private;
2284 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2285 	hdr->b_acb = acb;
2286 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
2287 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
2288 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2289 
2290 	if (arc_flags & ARC_WAIT)
2291 		return (zio_wait(rzio));
2292 
2293 	ASSERT(arc_flags & ARC_NOWAIT);
2294 	zio_nowait(rzio);
2295 
2296 	return (0);
2297 }
2298 
2299 int
2300 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2301     zio_done_func_t *done, void *private, uint32_t arc_flags)
2302 {
2303 	arc_buf_hdr_t *ab;
2304 	kmutex_t *hash_lock;
2305 	zio_t	*zio;
2306 
2307 	/*
2308 	 * If this buffer is in the cache, release it, so it
2309 	 * can be re-used.
2310 	 */
2311 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2312 	if (ab != NULL) {
2313 		/*
2314 		 * The checksum of blocks to free is not always
2315 		 * preserved (eg. on the deadlist).  However, if it is
2316 		 * nonzero, it should match what we have in the cache.
2317 		 */
2318 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2319 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2320 		if (ab->b_state != arc.anon)
2321 			arc_change_state(arc.anon, ab, hash_lock);
2322 		if (HDR_IO_IN_PROGRESS(ab)) {
2323 			/*
2324 			 * This should only happen when we prefetch.
2325 			 */
2326 			ASSERT(ab->b_flags & ARC_PREFETCH);
2327 			ASSERT3U(ab->b_datacnt, ==, 1);
2328 			ab->b_flags |= ARC_FREED_IN_READ;
2329 			if (HDR_IN_HASH_TABLE(ab))
2330 				buf_hash_remove(ab);
2331 			ab->b_arc_access = 0;
2332 			bzero(&ab->b_dva, sizeof (dva_t));
2333 			ab->b_birth = 0;
2334 			ab->b_cksum0 = 0;
2335 			ab->b_buf->b_efunc = NULL;
2336 			ab->b_buf->b_private = NULL;
2337 			mutex_exit(hash_lock);
2338 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2339 			mutex_exit(hash_lock);
2340 			arc_hdr_destroy(ab);
2341 			atomic_add_64(&arc.deleted, 1);
2342 		} else {
2343 			/*
2344 			 * We still have an active reference on this
2345 			 * buffer.  This can happen, e.g., from
2346 			 * dbuf_unoverride().
2347 			 */
2348 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2349 			ab->b_arc_access = 0;
2350 			bzero(&ab->b_dva, sizeof (dva_t));
2351 			ab->b_birth = 0;
2352 			ab->b_cksum0 = 0;
2353 			ab->b_buf->b_efunc = NULL;
2354 			ab->b_buf->b_private = NULL;
2355 			mutex_exit(hash_lock);
2356 		}
2357 	}
2358 
2359 	zio = zio_free(pio, spa, txg, bp, done, private);
2360 
2361 	if (arc_flags & ARC_WAIT)
2362 		return (zio_wait(zio));
2363 
2364 	ASSERT(arc_flags & ARC_NOWAIT);
2365 	zio_nowait(zio);
2366 
2367 	return (0);
2368 }
2369 
2370 void
2371 arc_tempreserve_clear(uint64_t tempreserve)
2372 {
2373 	atomic_add_64(&arc_tempreserve, -tempreserve);
2374 	ASSERT((int64_t)arc_tempreserve >= 0);
2375 }
2376 
2377 int
2378 arc_tempreserve_space(uint64_t tempreserve)
2379 {
2380 #ifdef ZFS_DEBUG
2381 	/*
2382 	 * Once in a while, fail for no reason.  Everything should cope.
2383 	 */
2384 	if (spa_get_random(10000) == 0) {
2385 		dprintf("forcing random failure\n");
2386 		return (ERESTART);
2387 	}
2388 #endif
2389 	if (tempreserve > arc.c/4 && !arc.no_grow)
2390 		arc.c = MIN(arc.c_max, tempreserve * 4);
2391 	if (tempreserve > arc.c)
2392 		return (ENOMEM);
2393 
2394 	/*
2395 	 * Throttle writes when the amount of dirty data in the cache
2396 	 * gets too large.  We try to keep the cache less than half full
2397 	 * of dirty blocks so that our sync times don't grow too large.
2398 	 * Note: if two requests come in concurrently, we might let them
2399 	 * both succeed, when one of them should fail.  Not a huge deal.
2400 	 *
2401 	 * XXX The limit should be adjusted dynamically to keep the time
2402 	 * to sync a dataset fixed (around 1-5 seconds?).
2403 	 */
2404 
2405 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2406 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2407 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2408 		    "tempreserve=%lluK arc.c=%lluK\n",
2409 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2410 		    tempreserve>>10, arc.c>>10);
2411 		return (ERESTART);
2412 	}
2413 	atomic_add_64(&arc_tempreserve, tempreserve);
2414 	return (0);
2415 }
2416 
2417 void
2418 arc_init(void)
2419 {
2420 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
2421 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2422 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2423 
2424 	/* Convert seconds to clock ticks */
2425 	arc_min_prefetch_lifespan = 1 * hz;
2426 
2427 	/* Start out with 1/8 of all memory */
2428 	arc.c = physmem * PAGESIZE / 8;
2429 
2430 #ifdef _KERNEL
2431 	/*
2432 	 * On architectures where the physical memory can be larger
2433 	 * than the addressable space (intel in 32-bit mode), we may
2434 	 * need to limit the cache to 1/8 of VM size.
2435 	 */
2436 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2437 #endif
2438 
2439 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2440 	arc.c_min = MAX(arc.c / 4, 64<<20);
2441 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2442 	if (arc.c * 8 >= 1<<30)
2443 		arc.c_max = (arc.c * 8) - (1<<30);
2444 	else
2445 		arc.c_max = arc.c_min;
2446 	arc.c_max = MAX(arc.c * 6, arc.c_max);
2447 
2448 	/*
2449 	 * Allow the tunables to override our calculations if they are
2450 	 * reasonable (ie. over 64MB)
2451 	 */
2452 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
2453 		arc.c_max = zfs_arc_max;
2454 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
2455 		arc.c_min = zfs_arc_min;
2456 
2457 	arc.c = arc.c_max;
2458 	arc.p = (arc.c >> 1);
2459 
2460 	/* if kmem_flags are set, lets try to use less memory */
2461 	if (kmem_debugging())
2462 		arc.c = arc.c / 2;
2463 	if (arc.c < arc.c_min)
2464 		arc.c = arc.c_min;
2465 
2466 	arc.anon = &ARC_anon;
2467 	arc.mru = &ARC_mru;
2468 	arc.mru_ghost = &ARC_mru_ghost;
2469 	arc.mfu = &ARC_mfu;
2470 	arc.mfu_ghost = &ARC_mfu_ghost;
2471 	arc.size = 0;
2472 
2473 	arc.hits = 0;
2474 	arc.recycle_miss = 0;
2475 	arc.evict_skip = 0;
2476 	arc.mutex_miss = 0;
2477 
2478 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
2479 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
2480 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
2481 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
2482 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
2483 
2484 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2485 	    offsetof(arc_buf_hdr_t, b_arc_node));
2486 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2487 	    offsetof(arc_buf_hdr_t, b_arc_node));
2488 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2489 	    offsetof(arc_buf_hdr_t, b_arc_node));
2490 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2491 	    offsetof(arc_buf_hdr_t, b_arc_node));
2492 
2493 	buf_init();
2494 
2495 	arc_thread_exit = 0;
2496 	arc_eviction_list = NULL;
2497 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2498 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2499 
2500 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2501 	    TS_RUN, minclsyspri);
2502 }
2503 
2504 void
2505 arc_fini(void)
2506 {
2507 	mutex_enter(&arc_reclaim_thr_lock);
2508 	arc_thread_exit = 1;
2509 	while (arc_thread_exit != 0)
2510 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2511 	mutex_exit(&arc_reclaim_thr_lock);
2512 
2513 	arc_flush();
2514 
2515 	arc_dead = TRUE;
2516 
2517 	mutex_destroy(&arc_eviction_mtx);
2518 	mutex_destroy(&arc_reclaim_lock);
2519 	mutex_destroy(&arc_reclaim_thr_lock);
2520 	cv_destroy(&arc_reclaim_thr_cv);
2521 
2522 	list_destroy(&arc.mru->list);
2523 	list_destroy(&arc.mru_ghost->list);
2524 	list_destroy(&arc.mfu->list);
2525 	list_destroy(&arc.mfu_ghost->list);
2526 
2527 	mutex_destroy(&arc.anon->mtx);
2528 	mutex_destroy(&arc.mru->mtx);
2529 	mutex_destroy(&arc.mru_ghost->mtx);
2530 	mutex_destroy(&arc.mfu->mtx);
2531 	mutex_destroy(&arc.mfu_ghost->mtx);
2532 
2533 	buf_fini();
2534 }
2535