xref: /titanic_51/usr/src/uts/common/fs/zfs/arc.c (revision a2eea2e101e6a163a537dcc6d4e3c4da2a0ea5b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * DVA-based Adjustable Relpacement Cache
30  *
31  * While much of the theory of operation used here is
32  * based on the self-tuning, low overhead replacement cache
33  * presented by Megiddo and Modha at FAST 2003, there are some
34  * significant differences:
35  *
36  * 1. The Megiddo and Modha model assumes any page is evictable.
37  * Pages in its cache cannot be "locked" into memory.  This makes
38  * the eviction algorithm simple: evict the last page in the list.
39  * This also make the performance characteristics easy to reason
40  * about.  Our cache is not so simple.  At any given moment, some
41  * subset of the blocks in the cache are un-evictable because we
42  * have handed out a reference to them.  Blocks are only evictable
43  * when there are no external references active.  This makes
44  * eviction far more problematic:  we choose to evict the evictable
45  * blocks that are the "lowest" in the list.
46  *
47  * There are times when it is not possible to evict the requested
48  * space.  In these circumstances we are unable to adjust the cache
49  * size.  To prevent the cache growing unbounded at these times we
50  * implement a "cache throttle" that slowes the flow of new data
51  * into the cache until we can make space avaiable.
52  *
53  * 2. The Megiddo and Modha model assumes a fixed cache size.
54  * Pages are evicted when the cache is full and there is a cache
55  * miss.  Our model has a variable sized cache.  It grows with
56  * high use, but also tries to react to memory preasure from the
57  * operating system: decreasing its size when system memory is
58  * tight.
59  *
60  * 3. The Megiddo and Modha model assumes a fixed page size. All
61  * elements of the cache are therefor exactly the same size.  So
62  * when adjusting the cache size following a cache miss, its simply
63  * a matter of choosing a single page to evict.  In our model, we
64  * have variable sized cache blocks (rangeing from 512 bytes to
65  * 128K bytes).  We therefor choose a set of blocks to evict to make
66  * space for a cache miss that approximates as closely as possible
67  * the space used by the new block.
68  *
69  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70  * by N. Megiddo & D. Modha, FAST 2003
71  */
72 
73 /*
74  * The locking model:
75  *
76  * A new reference to a cache buffer can be obtained in two
77  * ways: 1) via a hash table lookup using the DVA as a key,
78  * or 2) via one of the ARC lists.  The arc_read() inerface
79  * uses method 1, while the internal arc algorithms for
80  * adjusting the cache use method 2.  We therefor provide two
81  * types of locks: 1) the hash table lock array, and 2) the
82  * arc list locks.
83  *
84  * Buffers do not have their own mutexs, rather they rely on the
85  * hash table mutexs for the bulk of their protection (i.e. most
86  * fields in the arc_buf_hdr_t are protected by these mutexs).
87  *
88  * buf_hash_find() returns the appropriate mutex (held) when it
89  * locates the requested buffer in the hash table.  It returns
90  * NULL for the mutex if the buffer was not in the table.
91  *
92  * buf_hash_remove() expects the appropriate hash mutex to be
93  * already held before it is invoked.
94  *
95  * Each arc state also has a mutex which is used to protect the
96  * buffer list associated with the state.  When attempting to
97  * obtain a hash table lock while holding an arc list lock you
98  * must use: mutex_tryenter() to avoid deadlock.  Also note that
99  * the active state mutex must be held before the ghost state mutex.
100  *
101  * Arc buffers may have an associated eviction callback function.
102  * This function will be invoked prior to removing the buffer (e.g.
103  * in arc_do_user_evicts()).  Note however that the data associated
104  * with the buffer may be evicted prior to the callback.  The callback
105  * must be made with *no locks held* (to prevent deadlock).  Additionally,
106  * the users of callbacks must ensure that their private data is
107  * protected from simultaneous callbacks from arc_buf_evict()
108  * and arc_do_user_evicts().
109  *
110  * Note that the majority of the performance stats are manipulated
111  * with atomic operations.
112  */
113 
114 #include <sys/spa.h>
115 #include <sys/zio.h>
116 #include <sys/zfs_context.h>
117 #include <sys/arc.h>
118 #include <sys/refcount.h>
119 #ifdef _KERNEL
120 #include <sys/vmsystm.h>
121 #include <vm/anon.h>
122 #include <sys/fs/swapnode.h>
123 #include <sys/dnlc.h>
124 #endif
125 #include <sys/callb.h>
126 
127 static kmutex_t		arc_reclaim_thr_lock;
128 static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
129 static uint8_t		arc_thread_exit;
130 
131 #define	ARC_REDUCE_DNLC_PERCENT	3
132 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
133 
134 typedef enum arc_reclaim_strategy {
135 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
136 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
137 } arc_reclaim_strategy_t;
138 
139 /* number of seconds before growing cache again */
140 static int		arc_grow_retry = 60;
141 
142 /*
143  * minimum lifespan of a prefetch block in clock ticks
144  * (initialized in arc_init())
145  */
146 static int		arc_min_prefetch_lifespan;
147 
148 static kmutex_t arc_reclaim_lock;
149 static int arc_dead;
150 
151 /*
152  * These tunables are for performance analysis.
153  */
154 uint64_t zfs_arc_max;
155 uint64_t zfs_arc_min;
156 
157 /*
158  * Note that buffers can be on one of 5 states:
159  *	ARC_anon	- anonymous (discussed below)
160  *	ARC_mru		- recently used, currently cached
161  *	ARC_mru_ghost	- recentely used, no longer in cache
162  *	ARC_mfu		- frequently used, currently cached
163  *	ARC_mfu_ghost	- frequently used, no longer in cache
164  * When there are no active references to the buffer, they
165  * are linked onto one of the lists in arc.  These are the
166  * only buffers that can be evicted or deleted.
167  *
168  * Anonymous buffers are buffers that are not associated with
169  * a DVA.  These are buffers that hold dirty block copies
170  * before they are written to stable storage.  By definition,
171  * they are "ref'd" and are considered part of arc_mru
172  * that cannot be freed.  Generally, they will aquire a DVA
173  * as they are written and migrate onto the arc_mru list.
174  */
175 
176 typedef struct arc_state {
177 	list_t	list;	/* linked list of evictable buffer in state */
178 	uint64_t lsize;	/* total size of buffers in the linked list */
179 	uint64_t size;	/* total size of all buffers in this state */
180 	uint64_t hits;
181 	kmutex_t mtx;
182 } arc_state_t;
183 
184 /* The 5 states: */
185 static arc_state_t ARC_anon;
186 static arc_state_t ARC_mru;
187 static arc_state_t ARC_mru_ghost;
188 static arc_state_t ARC_mfu;
189 static arc_state_t ARC_mfu_ghost;
190 
191 static struct arc {
192 	arc_state_t 	*anon;
193 	arc_state_t	*mru;
194 	arc_state_t	*mru_ghost;
195 	arc_state_t	*mfu;
196 	arc_state_t	*mfu_ghost;
197 	uint64_t	size;		/* Actual total arc size */
198 	uint64_t	p;		/* Target size (in bytes) of mru */
199 	uint64_t	c;		/* Target size of cache (in bytes) */
200 	uint64_t	c_min;		/* Minimum target cache size */
201 	uint64_t	c_max;		/* Maximum target cache size */
202 
203 	/* performance stats */
204 	uint64_t	hits;
205 	uint64_t	misses;
206 	uint64_t	deleted;
207 	uint64_t	recycle_miss;
208 	uint64_t	mutex_miss;
209 	uint64_t	evict_skip;
210 	uint64_t	hash_elements;
211 	uint64_t	hash_elements_max;
212 	uint64_t	hash_collisions;
213 	uint64_t	hash_chains;
214 	uint32_t	hash_chain_max;
215 
216 	int		no_grow;	/* Don't try to grow cache size */
217 } arc;
218 
219 static uint64_t arc_tempreserve;
220 
221 typedef struct arc_callback arc_callback_t;
222 
223 struct arc_callback {
224 	arc_done_func_t		*acb_done;
225 	void			*acb_private;
226 	arc_byteswap_func_t	*acb_byteswap;
227 	arc_buf_t		*acb_buf;
228 	zio_t			*acb_zio_dummy;
229 	arc_callback_t		*acb_next;
230 };
231 
232 struct arc_buf_hdr {
233 	/* immutable */
234 	uint64_t		b_size;
235 	spa_t			*b_spa;
236 
237 	/* protected by hash lock */
238 	dva_t			b_dva;
239 	uint64_t		b_birth;
240 	uint64_t		b_cksum0;
241 
242 	arc_buf_hdr_t		*b_hash_next;
243 	arc_buf_t		*b_buf;
244 	uint32_t		b_flags;
245 	uint32_t		b_datacnt;
246 
247 	kcondvar_t		b_cv;
248 	arc_callback_t		*b_acb;
249 
250 	/* protected by arc state mutex */
251 	arc_state_t		*b_state;
252 	list_node_t		b_arc_node;
253 
254 	/* updated atomically */
255 	clock_t			b_arc_access;
256 
257 	/* self protecting */
258 	refcount_t		b_refcnt;
259 };
260 
261 static arc_buf_t *arc_eviction_list;
262 static kmutex_t arc_eviction_mtx;
263 static void arc_get_data_buf(arc_buf_t *buf);
264 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
265 
266 #define	GHOST_STATE(state)	\
267 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
268 
269 /*
270  * Private ARC flags.  These flags are private ARC only flags that will show up
271  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
272  * be passed in as arc_flags in things like arc_read.  However, these flags
273  * should never be passed and should only be set by ARC code.  When adding new
274  * public flags, make sure not to smash the private ones.
275  */
276 
277 #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
278 #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
279 #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
280 #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
281 #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
282 #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
283 
284 #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
285 #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
286 #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
287 #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
288 #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
289 
290 /*
291  * Hash table routines
292  */
293 
294 #define	HT_LOCK_PAD	64
295 
296 struct ht_lock {
297 	kmutex_t	ht_lock;
298 #ifdef _KERNEL
299 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
300 #endif
301 };
302 
303 #define	BUF_LOCKS 256
304 typedef struct buf_hash_table {
305 	uint64_t ht_mask;
306 	arc_buf_hdr_t **ht_table;
307 	struct ht_lock ht_locks[BUF_LOCKS];
308 } buf_hash_table_t;
309 
310 static buf_hash_table_t buf_hash_table;
311 
312 #define	BUF_HASH_INDEX(spa, dva, birth) \
313 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
314 #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
315 #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
316 #define	HDR_LOCK(buf) \
317 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
318 
319 uint64_t zfs_crc64_table[256];
320 
321 static uint64_t
322 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
323 {
324 	uintptr_t spav = (uintptr_t)spa;
325 	uint8_t *vdva = (uint8_t *)dva;
326 	uint64_t crc = -1ULL;
327 	int i;
328 
329 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
330 
331 	for (i = 0; i < sizeof (dva_t); i++)
332 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
333 
334 	crc ^= (spav>>8) ^ birth;
335 
336 	return (crc);
337 }
338 
339 #define	BUF_EMPTY(buf)						\
340 	((buf)->b_dva.dva_word[0] == 0 &&			\
341 	(buf)->b_dva.dva_word[1] == 0 &&			\
342 	(buf)->b_birth == 0)
343 
344 #define	BUF_EQUAL(spa, dva, birth, buf)				\
345 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
346 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
347 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
348 
349 static arc_buf_hdr_t *
350 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
351 {
352 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
353 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
354 	arc_buf_hdr_t *buf;
355 
356 	mutex_enter(hash_lock);
357 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
358 	    buf = buf->b_hash_next) {
359 		if (BUF_EQUAL(spa, dva, birth, buf)) {
360 			*lockp = hash_lock;
361 			return (buf);
362 		}
363 	}
364 	mutex_exit(hash_lock);
365 	*lockp = NULL;
366 	return (NULL);
367 }
368 
369 /*
370  * Insert an entry into the hash table.  If there is already an element
371  * equal to elem in the hash table, then the already existing element
372  * will be returned and the new element will not be inserted.
373  * Otherwise returns NULL.
374  */
375 static arc_buf_hdr_t *
376 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
377 {
378 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
379 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
380 	arc_buf_hdr_t *fbuf;
381 	uint32_t max, i;
382 
383 	ASSERT(!HDR_IN_HASH_TABLE(buf));
384 	*lockp = hash_lock;
385 	mutex_enter(hash_lock);
386 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
387 	    fbuf = fbuf->b_hash_next, i++) {
388 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
389 			return (fbuf);
390 	}
391 
392 	buf->b_hash_next = buf_hash_table.ht_table[idx];
393 	buf_hash_table.ht_table[idx] = buf;
394 	buf->b_flags |= ARC_IN_HASH_TABLE;
395 
396 	/* collect some hash table performance data */
397 	if (i > 0) {
398 		atomic_add_64(&arc.hash_collisions, 1);
399 		if (i == 1)
400 			atomic_add_64(&arc.hash_chains, 1);
401 	}
402 	while (i > (max = arc.hash_chain_max) &&
403 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
404 		continue;
405 	}
406 	atomic_add_64(&arc.hash_elements, 1);
407 	if (arc.hash_elements > arc.hash_elements_max)
408 		atomic_add_64(&arc.hash_elements_max, 1);
409 
410 	return (NULL);
411 }
412 
413 static void
414 buf_hash_remove(arc_buf_hdr_t *buf)
415 {
416 	arc_buf_hdr_t *fbuf, **bufp;
417 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
418 
419 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
420 	ASSERT(HDR_IN_HASH_TABLE(buf));
421 
422 	bufp = &buf_hash_table.ht_table[idx];
423 	while ((fbuf = *bufp) != buf) {
424 		ASSERT(fbuf != NULL);
425 		bufp = &fbuf->b_hash_next;
426 	}
427 	*bufp = buf->b_hash_next;
428 	buf->b_hash_next = NULL;
429 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
430 
431 	/* collect some hash table performance data */
432 	atomic_add_64(&arc.hash_elements, -1);
433 	if (buf_hash_table.ht_table[idx] &&
434 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
435 		atomic_add_64(&arc.hash_chains, -1);
436 }
437 
438 /*
439  * Global data structures and functions for the buf kmem cache.
440  */
441 static kmem_cache_t *hdr_cache;
442 static kmem_cache_t *buf_cache;
443 
444 static void
445 buf_fini(void)
446 {
447 	int i;
448 
449 	kmem_free(buf_hash_table.ht_table,
450 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
451 	for (i = 0; i < BUF_LOCKS; i++)
452 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
453 	kmem_cache_destroy(hdr_cache);
454 	kmem_cache_destroy(buf_cache);
455 }
456 
457 /*
458  * Constructor callback - called when the cache is empty
459  * and a new buf is requested.
460  */
461 /* ARGSUSED */
462 static int
463 hdr_cons(void *vbuf, void *unused, int kmflag)
464 {
465 	arc_buf_hdr_t *buf = vbuf;
466 
467 	bzero(buf, sizeof (arc_buf_hdr_t));
468 	refcount_create(&buf->b_refcnt);
469 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
470 	return (0);
471 }
472 
473 /*
474  * Destructor callback - called when a cached buf is
475  * no longer required.
476  */
477 /* ARGSUSED */
478 static void
479 hdr_dest(void *vbuf, void *unused)
480 {
481 	arc_buf_hdr_t *buf = vbuf;
482 
483 	refcount_destroy(&buf->b_refcnt);
484 	cv_destroy(&buf->b_cv);
485 }
486 
487 static int arc_reclaim_needed(void);
488 void arc_kmem_reclaim(void);
489 
490 /*
491  * Reclaim callback -- invoked when memory is low.
492  */
493 /* ARGSUSED */
494 static void
495 hdr_recl(void *unused)
496 {
497 	dprintf("hdr_recl called\n");
498 	if (arc_reclaim_needed())
499 		arc_kmem_reclaim();
500 }
501 
502 static void
503 buf_init(void)
504 {
505 	uint64_t *ct;
506 	uint64_t hsize = 1ULL << 12;
507 	int i, j;
508 
509 	/*
510 	 * The hash table is big enough to fill all of physical memory
511 	 * with an average 64K block size.  The table will take up
512 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
513 	 */
514 	while (hsize * 65536 < physmem * PAGESIZE)
515 		hsize <<= 1;
516 retry:
517 	buf_hash_table.ht_mask = hsize - 1;
518 	buf_hash_table.ht_table =
519 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
520 	if (buf_hash_table.ht_table == NULL) {
521 		ASSERT(hsize > (1ULL << 8));
522 		hsize >>= 1;
523 		goto retry;
524 	}
525 
526 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
527 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
528 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
529 	    0, NULL, NULL, NULL, NULL, NULL, 0);
530 
531 	for (i = 0; i < 256; i++)
532 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
533 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
534 
535 	for (i = 0; i < BUF_LOCKS; i++) {
536 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
537 		    NULL, MUTEX_DEFAULT, NULL);
538 	}
539 }
540 
541 #define	ARC_MINTIME	(hz>>4) /* 62 ms */
542 
543 static void
544 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
545 {
546 	ASSERT(MUTEX_HELD(hash_lock));
547 
548 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
549 	    (ab->b_state != arc.anon)) {
550 		int delta = ab->b_size * ab->b_datacnt;
551 
552 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
553 		mutex_enter(&ab->b_state->mtx);
554 		ASSERT(list_link_active(&ab->b_arc_node));
555 		list_remove(&ab->b_state->list, ab);
556 		if (GHOST_STATE(ab->b_state)) {
557 			ASSERT3U(ab->b_datacnt, ==, 0);
558 			ASSERT3P(ab->b_buf, ==, NULL);
559 			delta = ab->b_size;
560 		}
561 		ASSERT(delta > 0);
562 		ASSERT3U(ab->b_state->lsize, >=, delta);
563 		atomic_add_64(&ab->b_state->lsize, -delta);
564 		mutex_exit(&ab->b_state->mtx);
565 		/* remove the prefetch flag is we get a reference */
566 		if (ab->b_flags & ARC_PREFETCH)
567 			ab->b_flags &= ~ARC_PREFETCH;
568 	}
569 }
570 
571 static int
572 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
573 {
574 	int cnt;
575 
576 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
577 	ASSERT(!GHOST_STATE(ab->b_state));
578 
579 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
580 	    (ab->b_state != arc.anon)) {
581 
582 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
583 		mutex_enter(&ab->b_state->mtx);
584 		ASSERT(!list_link_active(&ab->b_arc_node));
585 		list_insert_head(&ab->b_state->list, ab);
586 		ASSERT(ab->b_datacnt > 0);
587 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
588 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
589 		mutex_exit(&ab->b_state->mtx);
590 	}
591 	return (cnt);
592 }
593 
594 /*
595  * Move the supplied buffer to the indicated state.  The mutex
596  * for the buffer must be held by the caller.
597  */
598 static void
599 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
600 {
601 	arc_state_t *old_state = ab->b_state;
602 	int refcnt = refcount_count(&ab->b_refcnt);
603 	int from_delta, to_delta;
604 
605 	ASSERT(MUTEX_HELD(hash_lock));
606 	ASSERT(new_state != old_state);
607 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
608 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
609 
610 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
611 
612 	/*
613 	 * If this buffer is evictable, transfer it from the
614 	 * old state list to the new state list.
615 	 */
616 	if (refcnt == 0) {
617 		if (old_state != arc.anon) {
618 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
619 
620 			if (use_mutex)
621 				mutex_enter(&old_state->mtx);
622 
623 			ASSERT(list_link_active(&ab->b_arc_node));
624 			list_remove(&old_state->list, ab);
625 
626 			/*
627 			 * If prefetching out of the ghost cache,
628 			 * we will have a non-null datacnt.
629 			 */
630 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
631 				/* ghost elements have a ghost size */
632 				ASSERT(ab->b_buf == NULL);
633 				from_delta = ab->b_size;
634 			}
635 			ASSERT3U(old_state->lsize, >=, from_delta);
636 			atomic_add_64(&old_state->lsize, -from_delta);
637 
638 			if (use_mutex)
639 				mutex_exit(&old_state->mtx);
640 		}
641 		if (new_state != arc.anon) {
642 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
643 
644 			if (use_mutex)
645 				mutex_enter(&new_state->mtx);
646 
647 			list_insert_head(&new_state->list, ab);
648 
649 			/* ghost elements have a ghost size */
650 			if (GHOST_STATE(new_state)) {
651 				ASSERT(ab->b_datacnt == 0);
652 				ASSERT(ab->b_buf == NULL);
653 				to_delta = ab->b_size;
654 			}
655 			atomic_add_64(&new_state->lsize, to_delta);
656 			ASSERT3U(new_state->size + to_delta, >=,
657 			    new_state->lsize);
658 
659 			if (use_mutex)
660 				mutex_exit(&new_state->mtx);
661 		}
662 	}
663 
664 	ASSERT(!BUF_EMPTY(ab));
665 	if (new_state == arc.anon && old_state != arc.anon) {
666 		buf_hash_remove(ab);
667 	}
668 
669 	/* adjust state sizes */
670 	if (to_delta)
671 		atomic_add_64(&new_state->size, to_delta);
672 	if (from_delta) {
673 		ASSERT3U(old_state->size, >=, from_delta);
674 		atomic_add_64(&old_state->size, -from_delta);
675 	}
676 	ab->b_state = new_state;
677 }
678 
679 arc_buf_t *
680 arc_buf_alloc(spa_t *spa, int size, void *tag)
681 {
682 	arc_buf_hdr_t *hdr;
683 	arc_buf_t *buf;
684 
685 	ASSERT3U(size, >, 0);
686 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
687 	ASSERT(BUF_EMPTY(hdr));
688 	hdr->b_size = size;
689 	hdr->b_spa = spa;
690 	hdr->b_state = arc.anon;
691 	hdr->b_arc_access = 0;
692 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
693 	buf->b_hdr = hdr;
694 	buf->b_data = NULL;
695 	buf->b_efunc = NULL;
696 	buf->b_private = NULL;
697 	buf->b_next = NULL;
698 	hdr->b_buf = buf;
699 	arc_get_data_buf(buf);
700 	hdr->b_datacnt = 1;
701 	hdr->b_flags = 0;
702 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
703 	(void) refcount_add(&hdr->b_refcnt, tag);
704 
705 	return (buf);
706 }
707 
708 static arc_buf_t *
709 arc_buf_clone(arc_buf_t *from)
710 {
711 	arc_buf_t *buf;
712 	arc_buf_hdr_t *hdr = from->b_hdr;
713 	uint64_t size = hdr->b_size;
714 
715 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
716 	buf->b_hdr = hdr;
717 	buf->b_data = NULL;
718 	buf->b_efunc = NULL;
719 	buf->b_private = NULL;
720 	buf->b_next = hdr->b_buf;
721 	hdr->b_buf = buf;
722 	arc_get_data_buf(buf);
723 	bcopy(from->b_data, buf->b_data, size);
724 	hdr->b_datacnt += 1;
725 	return (buf);
726 }
727 
728 void
729 arc_buf_add_ref(arc_buf_t *buf, void* tag)
730 {
731 	arc_buf_hdr_t *hdr = buf->b_hdr;
732 	kmutex_t *hash_lock;
733 
734 	/*
735 	 * Check to see if this buffer is currently being evicted via
736 	 * arc_do_user_evicts().  We can do this without holding any
737 	 * locks because if we happen to obtain the header before its
738 	 * cleared, we will find b_data is NULL later.
739 	 */
740 	if (hdr == NULL)
741 		return;
742 
743 	hash_lock = HDR_LOCK(hdr);
744 	mutex_enter(hash_lock);
745 	if (buf->b_data == NULL) {
746 		/*
747 		 * This buffer is evicted.
748 		 */
749 		mutex_exit(hash_lock);
750 		return;
751 	}
752 
753 	ASSERT(buf->b_hdr == hdr);
754 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
755 	add_reference(hdr, hash_lock, tag);
756 	arc_access(hdr, hash_lock);
757 	mutex_exit(hash_lock);
758 	atomic_add_64(&arc.hits, 1);
759 }
760 
761 static void
762 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
763 {
764 	arc_buf_t **bufp;
765 
766 	/* free up data associated with the buf */
767 	if (buf->b_data) {
768 		arc_state_t *state = buf->b_hdr->b_state;
769 		uint64_t size = buf->b_hdr->b_size;
770 
771 		if (!recycle) {
772 			zio_buf_free(buf->b_data, size);
773 			atomic_add_64(&arc.size, -size);
774 		}
775 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
776 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
777 			ASSERT(state != arc.anon);
778 			ASSERT3U(state->lsize, >=, size);
779 			atomic_add_64(&state->lsize, -size);
780 		}
781 		ASSERT3U(state->size, >=, size);
782 		atomic_add_64(&state->size, -size);
783 		buf->b_data = NULL;
784 		ASSERT(buf->b_hdr->b_datacnt > 0);
785 		buf->b_hdr->b_datacnt -= 1;
786 	}
787 
788 	/* only remove the buf if requested */
789 	if (!all)
790 		return;
791 
792 	/* remove the buf from the hdr list */
793 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
794 		continue;
795 	*bufp = buf->b_next;
796 
797 	ASSERT(buf->b_efunc == NULL);
798 
799 	/* clean up the buf */
800 	buf->b_hdr = NULL;
801 	kmem_cache_free(buf_cache, buf);
802 }
803 
804 static void
805 arc_hdr_destroy(arc_buf_hdr_t *hdr)
806 {
807 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
808 	ASSERT3P(hdr->b_state, ==, arc.anon);
809 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
810 
811 	if (!BUF_EMPTY(hdr)) {
812 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
813 		bzero(&hdr->b_dva, sizeof (dva_t));
814 		hdr->b_birth = 0;
815 		hdr->b_cksum0 = 0;
816 	}
817 	while (hdr->b_buf) {
818 		arc_buf_t *buf = hdr->b_buf;
819 
820 		if (buf->b_efunc) {
821 			mutex_enter(&arc_eviction_mtx);
822 			ASSERT(buf->b_hdr != NULL);
823 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
824 			hdr->b_buf = buf->b_next;
825 			buf->b_next = arc_eviction_list;
826 			arc_eviction_list = buf;
827 			mutex_exit(&arc_eviction_mtx);
828 		} else {
829 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
830 		}
831 	}
832 
833 	ASSERT(!list_link_active(&hdr->b_arc_node));
834 	ASSERT3P(hdr->b_hash_next, ==, NULL);
835 	ASSERT3P(hdr->b_acb, ==, NULL);
836 	kmem_cache_free(hdr_cache, hdr);
837 }
838 
839 void
840 arc_buf_free(arc_buf_t *buf, void *tag)
841 {
842 	arc_buf_hdr_t *hdr = buf->b_hdr;
843 	int hashed = hdr->b_state != arc.anon;
844 
845 	ASSERT(buf->b_efunc == NULL);
846 	ASSERT(buf->b_data != NULL);
847 
848 	if (hashed) {
849 		kmutex_t *hash_lock = HDR_LOCK(hdr);
850 
851 		mutex_enter(hash_lock);
852 		(void) remove_reference(hdr, hash_lock, tag);
853 		if (hdr->b_datacnt > 1)
854 			arc_buf_destroy(buf, FALSE, TRUE);
855 		else
856 			hdr->b_flags |= ARC_BUF_AVAILABLE;
857 		mutex_exit(hash_lock);
858 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
859 		int destroy_hdr;
860 		/*
861 		 * We are in the middle of an async write.  Don't destroy
862 		 * this buffer unless the write completes before we finish
863 		 * decrementing the reference count.
864 		 */
865 		mutex_enter(&arc_eviction_mtx);
866 		(void) remove_reference(hdr, NULL, tag);
867 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
868 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
869 		mutex_exit(&arc_eviction_mtx);
870 		if (destroy_hdr)
871 			arc_hdr_destroy(hdr);
872 	} else {
873 		if (remove_reference(hdr, NULL, tag) > 0) {
874 			ASSERT(HDR_IO_ERROR(hdr));
875 			arc_buf_destroy(buf, FALSE, TRUE);
876 		} else {
877 			arc_hdr_destroy(hdr);
878 		}
879 	}
880 }
881 
882 int
883 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
884 {
885 	arc_buf_hdr_t *hdr = buf->b_hdr;
886 	kmutex_t *hash_lock = HDR_LOCK(hdr);
887 	int no_callback = (buf->b_efunc == NULL);
888 
889 	if (hdr->b_state == arc.anon) {
890 		arc_buf_free(buf, tag);
891 		return (no_callback);
892 	}
893 
894 	mutex_enter(hash_lock);
895 	ASSERT(hdr->b_state != arc.anon);
896 	ASSERT(buf->b_data != NULL);
897 
898 	(void) remove_reference(hdr, hash_lock, tag);
899 	if (hdr->b_datacnt > 1) {
900 		if (no_callback)
901 			arc_buf_destroy(buf, FALSE, TRUE);
902 	} else if (no_callback) {
903 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
904 		hdr->b_flags |= ARC_BUF_AVAILABLE;
905 	}
906 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
907 	    refcount_is_zero(&hdr->b_refcnt));
908 	mutex_exit(hash_lock);
909 	return (no_callback);
910 }
911 
912 int
913 arc_buf_size(arc_buf_t *buf)
914 {
915 	return (buf->b_hdr->b_size);
916 }
917 
918 /*
919  * Evict buffers from list until we've removed the specified number of
920  * bytes.  Move the removed buffers to the appropriate evict state.
921  * If the recycle flag is set, then attempt to "recycle" a buffer:
922  * - look for a buffer to evict that is `bytes' long.
923  * - return the data block from this buffer rather than freeing it.
924  * This flag is used by callers that are trying to make space for a
925  * new buffer in a full arc cache.
926  */
927 static void *
928 arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle)
929 {
930 	arc_state_t *evicted_state;
931 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
932 	arc_buf_hdr_t *ab, *ab_prev;
933 	kmutex_t *hash_lock;
934 	boolean_t have_lock;
935 	void *steal = NULL;
936 
937 	ASSERT(state == arc.mru || state == arc.mfu);
938 
939 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
940 
941 	mutex_enter(&state->mtx);
942 	mutex_enter(&evicted_state->mtx);
943 
944 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
945 		ab_prev = list_prev(&state->list, ab);
946 		/* prefetch buffers have a minimum lifespan */
947 		if (HDR_IO_IN_PROGRESS(ab) ||
948 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
949 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
950 			skipped++;
951 			continue;
952 		}
953 		if (recycle && (ab->b_size != bytes || ab->b_datacnt > 1))
954 			continue;
955 		hash_lock = HDR_LOCK(ab);
956 		have_lock = MUTEX_HELD(hash_lock);
957 		if (have_lock || mutex_tryenter(hash_lock)) {
958 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
959 			ASSERT(ab->b_datacnt > 0);
960 			while (ab->b_buf) {
961 				arc_buf_t *buf = ab->b_buf;
962 				if (buf->b_data) {
963 					bytes_evicted += ab->b_size;
964 					if (recycle)
965 						steal = buf->b_data;
966 				}
967 				if (buf->b_efunc) {
968 					mutex_enter(&arc_eviction_mtx);
969 					arc_buf_destroy(buf, recycle, FALSE);
970 					ab->b_buf = buf->b_next;
971 					buf->b_next = arc_eviction_list;
972 					arc_eviction_list = buf;
973 					mutex_exit(&arc_eviction_mtx);
974 				} else {
975 					arc_buf_destroy(buf, recycle, TRUE);
976 				}
977 			}
978 			ASSERT(ab->b_datacnt == 0);
979 			arc_change_state(evicted_state, ab, hash_lock);
980 			ASSERT(HDR_IN_HASH_TABLE(ab));
981 			ab->b_flags = ARC_IN_HASH_TABLE;
982 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
983 			if (!have_lock)
984 				mutex_exit(hash_lock);
985 			if (bytes >= 0 && bytes_evicted >= bytes)
986 				break;
987 		} else {
988 			missed += 1;
989 		}
990 	}
991 	mutex_exit(&evicted_state->mtx);
992 	mutex_exit(&state->mtx);
993 
994 	if (bytes_evicted < bytes)
995 		dprintf("only evicted %lld bytes from %x",
996 		    (longlong_t)bytes_evicted, state);
997 
998 	if (skipped)
999 		atomic_add_64(&arc.evict_skip, skipped);
1000 	if (missed)
1001 		atomic_add_64(&arc.mutex_miss, missed);
1002 	return (steal);
1003 }
1004 
1005 /*
1006  * Remove buffers from list until we've removed the specified number of
1007  * bytes.  Destroy the buffers that are removed.
1008  */
1009 static void
1010 arc_evict_ghost(arc_state_t *state, int64_t bytes)
1011 {
1012 	arc_buf_hdr_t *ab, *ab_prev;
1013 	kmutex_t *hash_lock;
1014 	uint64_t bytes_deleted = 0;
1015 	uint_t bufs_skipped = 0;
1016 
1017 	ASSERT(GHOST_STATE(state));
1018 top:
1019 	mutex_enter(&state->mtx);
1020 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1021 		ab_prev = list_prev(&state->list, ab);
1022 		hash_lock = HDR_LOCK(ab);
1023 		if (mutex_tryenter(hash_lock)) {
1024 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
1025 			ASSERT(ab->b_buf == NULL);
1026 			arc_change_state(arc.anon, ab, hash_lock);
1027 			mutex_exit(hash_lock);
1028 			atomic_add_64(&arc.deleted, 1);
1029 			bytes_deleted += ab->b_size;
1030 			arc_hdr_destroy(ab);
1031 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1032 			if (bytes >= 0 && bytes_deleted >= bytes)
1033 				break;
1034 		} else {
1035 			if (bytes < 0) {
1036 				mutex_exit(&state->mtx);
1037 				mutex_enter(hash_lock);
1038 				mutex_exit(hash_lock);
1039 				goto top;
1040 			}
1041 			bufs_skipped += 1;
1042 		}
1043 	}
1044 	mutex_exit(&state->mtx);
1045 
1046 	if (bufs_skipped) {
1047 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1048 		ASSERT(bytes >= 0);
1049 	}
1050 
1051 	if (bytes_deleted < bytes)
1052 		dprintf("only deleted %lld bytes from %p",
1053 		    (longlong_t)bytes_deleted, state);
1054 }
1055 
1056 static void
1057 arc_adjust(void)
1058 {
1059 	int64_t top_sz, mru_over, arc_over;
1060 
1061 	top_sz = arc.anon->size + arc.mru->size;
1062 
1063 	if (top_sz > arc.p && arc.mru->lsize > 0) {
1064 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
1065 		(void) arc_evict(arc.mru, toevict, FALSE);
1066 		top_sz = arc.anon->size + arc.mru->size;
1067 	}
1068 
1069 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1070 
1071 	if (mru_over > 0) {
1072 		if (arc.mru_ghost->lsize > 0) {
1073 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
1074 			arc_evict_ghost(arc.mru_ghost, todelete);
1075 		}
1076 	}
1077 
1078 	if ((arc_over = arc.size - arc.c) > 0) {
1079 		int64_t tbl_over;
1080 
1081 		if (arc.mfu->lsize > 0) {
1082 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
1083 			(void) arc_evict(arc.mfu, toevict, FALSE);
1084 		}
1085 
1086 		tbl_over = arc.size + arc.mru_ghost->lsize +
1087 		    arc.mfu_ghost->lsize - arc.c*2;
1088 
1089 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
1090 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
1091 			arc_evict_ghost(arc.mfu_ghost, todelete);
1092 		}
1093 	}
1094 }
1095 
1096 static void
1097 arc_do_user_evicts(void)
1098 {
1099 	mutex_enter(&arc_eviction_mtx);
1100 	while (arc_eviction_list != NULL) {
1101 		arc_buf_t *buf = arc_eviction_list;
1102 		arc_eviction_list = buf->b_next;
1103 		buf->b_hdr = NULL;
1104 		mutex_exit(&arc_eviction_mtx);
1105 
1106 		if (buf->b_efunc != NULL)
1107 			VERIFY(buf->b_efunc(buf) == 0);
1108 
1109 		buf->b_efunc = NULL;
1110 		buf->b_private = NULL;
1111 		kmem_cache_free(buf_cache, buf);
1112 		mutex_enter(&arc_eviction_mtx);
1113 	}
1114 	mutex_exit(&arc_eviction_mtx);
1115 }
1116 
1117 /*
1118  * Flush all *evictable* data from the cache.
1119  * NOTE: this will not touch "active" (i.e. referenced) data.
1120  */
1121 void
1122 arc_flush(void)
1123 {
1124 	while (list_head(&arc.mru->list))
1125 		(void) arc_evict(arc.mru, -1, FALSE);
1126 	while (list_head(&arc.mfu->list))
1127 		(void) arc_evict(arc.mfu, -1, FALSE);
1128 
1129 	arc_evict_ghost(arc.mru_ghost, -1);
1130 	arc_evict_ghost(arc.mfu_ghost, -1);
1131 
1132 	mutex_enter(&arc_reclaim_thr_lock);
1133 	arc_do_user_evicts();
1134 	mutex_exit(&arc_reclaim_thr_lock);
1135 	ASSERT(arc_eviction_list == NULL);
1136 }
1137 
1138 int arc_kmem_reclaim_shift = 5;		/* log2(fraction of arc to reclaim) */
1139 
1140 void
1141 arc_kmem_reclaim(void)
1142 {
1143 	uint64_t to_free;
1144 
1145 	/*
1146 	 * We need arc_reclaim_lock because we don't want multiple
1147 	 * threads trying to reclaim concurrently.
1148 	 */
1149 
1150 	/*
1151 	 * umem calls the reclaim func when we destroy the buf cache,
1152 	 * which is after we do arc_fini().  So we set a flag to prevent
1153 	 * accessing the destroyed mutexes and lists.
1154 	 */
1155 	if (arc_dead)
1156 		return;
1157 
1158 	if (arc.c <= arc.c_min)
1159 		return;
1160 
1161 	mutex_enter(&arc_reclaim_lock);
1162 
1163 #ifdef _KERNEL
1164 	to_free = MAX(arc.c >> arc_kmem_reclaim_shift, ptob(needfree));
1165 #else
1166 	to_free = arc.c >> arc_kmem_reclaim_shift;
1167 #endif
1168 	if (arc.c > to_free)
1169 		atomic_add_64(&arc.c, -to_free);
1170 	else
1171 		arc.c = arc.c_min;
1172 
1173 	atomic_add_64(&arc.p, -(arc.p >> arc_kmem_reclaim_shift));
1174 	if (arc.c > arc.size)
1175 		arc.c = arc.size;
1176 	if (arc.c < arc.c_min)
1177 		arc.c = arc.c_min;
1178 	if (arc.p > arc.c)
1179 		arc.p = (arc.c >> 1);
1180 	ASSERT((int64_t)arc.p >= 0);
1181 
1182 	arc_adjust();
1183 
1184 	mutex_exit(&arc_reclaim_lock);
1185 }
1186 
1187 static int
1188 arc_reclaim_needed(void)
1189 {
1190 	uint64_t extra;
1191 
1192 #ifdef _KERNEL
1193 
1194 	if (needfree)
1195 		return (1);
1196 
1197 	/*
1198 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1199 	 */
1200 	extra = desfree;
1201 
1202 	/*
1203 	 * check that we're out of range of the pageout scanner.  It starts to
1204 	 * schedule paging if freemem is less than lotsfree and needfree.
1205 	 * lotsfree is the high-water mark for pageout, and needfree is the
1206 	 * number of needed free pages.  We add extra pages here to make sure
1207 	 * the scanner doesn't start up while we're freeing memory.
1208 	 */
1209 	if (freemem < lotsfree + needfree + extra)
1210 		return (1);
1211 
1212 	/*
1213 	 * check to make sure that swapfs has enough space so that anon
1214 	 * reservations can still succeeed. anon_resvmem() checks that the
1215 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1216 	 * swap pages.  We also add a bit of extra here just to prevent
1217 	 * circumstances from getting really dire.
1218 	 */
1219 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1220 		return (1);
1221 
1222 #if defined(__i386)
1223 	/*
1224 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1225 	 * kernel heap space before we ever run out of available physical
1226 	 * memory.  Most checks of the size of the heap_area compare against
1227 	 * tune.t_minarmem, which is the minimum available real memory that we
1228 	 * can have in the system.  However, this is generally fixed at 25 pages
1229 	 * which is so low that it's useless.  In this comparison, we seek to
1230 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1231 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1232 	 * free)
1233 	 */
1234 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1235 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1236 		return (1);
1237 #endif
1238 
1239 #else
1240 	if (spa_get_random(100) == 0)
1241 		return (1);
1242 #endif
1243 	return (0);
1244 }
1245 
1246 static void
1247 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1248 {
1249 	size_t			i;
1250 	kmem_cache_t		*prev_cache = NULL;
1251 	extern kmem_cache_t	*zio_buf_cache[];
1252 
1253 #ifdef _KERNEL
1254 	/*
1255 	 * First purge some DNLC entries, in case the DNLC is using
1256 	 * up too much memory.
1257 	 */
1258 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1259 
1260 #if defined(__i386)
1261 	/*
1262 	 * Reclaim unused memory from all kmem caches.
1263 	 */
1264 	kmem_reap();
1265 #endif
1266 #endif
1267 
1268 	/*
1269 	 * An agressive reclamation will shrink the cache size as well as
1270 	 * reap free buffers from the arc kmem caches.
1271 	 */
1272 	if (strat == ARC_RECLAIM_AGGR)
1273 		arc_kmem_reclaim();
1274 
1275 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1276 		if (zio_buf_cache[i] != prev_cache) {
1277 			prev_cache = zio_buf_cache[i];
1278 			kmem_cache_reap_now(zio_buf_cache[i]);
1279 		}
1280 	}
1281 	kmem_cache_reap_now(buf_cache);
1282 	kmem_cache_reap_now(hdr_cache);
1283 }
1284 
1285 static void
1286 arc_reclaim_thread(void)
1287 {
1288 	clock_t			growtime = 0;
1289 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1290 	callb_cpr_t		cpr;
1291 
1292 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1293 
1294 	mutex_enter(&arc_reclaim_thr_lock);
1295 	while (arc_thread_exit == 0) {
1296 		if (arc_reclaim_needed()) {
1297 
1298 			if (arc.no_grow) {
1299 				if (last_reclaim == ARC_RECLAIM_CONS) {
1300 					last_reclaim = ARC_RECLAIM_AGGR;
1301 				} else {
1302 					last_reclaim = ARC_RECLAIM_CONS;
1303 				}
1304 			} else {
1305 				arc.no_grow = TRUE;
1306 				last_reclaim = ARC_RECLAIM_AGGR;
1307 				membar_producer();
1308 			}
1309 
1310 			/* reset the growth delay for every reclaim */
1311 			growtime = lbolt + (arc_grow_retry * hz);
1312 			ASSERT(growtime > 0);
1313 
1314 			arc_kmem_reap_now(last_reclaim);
1315 
1316 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1317 			arc.no_grow = FALSE;
1318 		}
1319 
1320 		if (arc_eviction_list != NULL)
1321 			arc_do_user_evicts();
1322 
1323 		/* block until needed, or one second, whichever is shorter */
1324 		CALLB_CPR_SAFE_BEGIN(&cpr);
1325 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1326 		    &arc_reclaim_thr_lock, (lbolt + hz));
1327 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1328 	}
1329 
1330 	arc_thread_exit = 0;
1331 	cv_broadcast(&arc_reclaim_thr_cv);
1332 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1333 	thread_exit();
1334 }
1335 
1336 /*
1337  * Adapt arc info given the number of bytes we are trying to add and
1338  * the state that we are comming from.  This function is only called
1339  * when we are adding new content to the cache.
1340  */
1341 static void
1342 arc_adapt(int bytes, arc_state_t *state)
1343 {
1344 	int mult;
1345 
1346 	ASSERT(bytes > 0);
1347 	/*
1348 	 * Adapt the target size of the MRU list:
1349 	 *	- if we just hit in the MRU ghost list, then increase
1350 	 *	  the target size of the MRU list.
1351 	 *	- if we just hit in the MFU ghost list, then increase
1352 	 *	  the target size of the MFU list by decreasing the
1353 	 *	  target size of the MRU list.
1354 	 */
1355 	if (state == arc.mru_ghost) {
1356 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
1357 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
1358 
1359 		arc.p = MIN(arc.c, arc.p + bytes * mult);
1360 	} else if (state == arc.mfu_ghost) {
1361 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
1362 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
1363 
1364 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
1365 	}
1366 	ASSERT((int64_t)arc.p >= 0);
1367 
1368 	if (arc_reclaim_needed()) {
1369 		cv_signal(&arc_reclaim_thr_cv);
1370 		return;
1371 	}
1372 
1373 	if (arc.no_grow)
1374 		return;
1375 
1376 	if (arc.c >= arc.c_max)
1377 		return;
1378 
1379 	/*
1380 	 * If we're within (2 * maxblocksize) bytes of the target
1381 	 * cache size, increment the target cache size
1382 	 */
1383 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1384 		atomic_add_64(&arc.c, (int64_t)bytes);
1385 		if (arc.c > arc.c_max)
1386 			arc.c = arc.c_max;
1387 		else if (state == arc.anon)
1388 			atomic_add_64(&arc.p, (int64_t)bytes);
1389 		if (arc.p > arc.c)
1390 			arc.p = arc.c;
1391 	}
1392 	ASSERT((int64_t)arc.p >= 0);
1393 }
1394 
1395 /*
1396  * Check if the cache has reached its limits and eviction is required
1397  * prior to insert.
1398  */
1399 static int
1400 arc_evict_needed()
1401 {
1402 	if (arc_reclaim_needed())
1403 		return (1);
1404 
1405 	return (arc.size > arc.c);
1406 }
1407 
1408 /*
1409  * The buffer, supplied as the first argument, needs a data block.
1410  * So, if we are at cache max, determine which cache should be victimized.
1411  * We have the following cases:
1412  *
1413  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1414  * In this situation if we're out of space, but the resident size of the MFU is
1415  * under the limit, victimize the MFU cache to satisfy this insertion request.
1416  *
1417  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1418  * Here, we've used up all of the available space for the MRU, so we need to
1419  * evict from our own cache instead.  Evict from the set of resident MRU
1420  * entries.
1421  *
1422  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1423  * c minus p represents the MFU space in the cache, since p is the size of the
1424  * cache that is dedicated to the MRU.  In this situation there's still space on
1425  * the MFU side, so the MRU side needs to be victimized.
1426  *
1427  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1428  * MFU's resident set is consuming more space than it has been allotted.  In
1429  * this situation, we must victimize our own cache, the MFU, for this insertion.
1430  */
1431 static void
1432 arc_get_data_buf(arc_buf_t *buf)
1433 {
1434 	arc_state_t	*state = buf->b_hdr->b_state;
1435 	uint64_t	size = buf->b_hdr->b_size;
1436 
1437 	arc_adapt(size, state);
1438 
1439 	/*
1440 	 * We have not yet reached cache maximum size,
1441 	 * just allocate a new buffer.
1442 	 */
1443 	if (!arc_evict_needed()) {
1444 		buf->b_data = zio_buf_alloc(size);
1445 		atomic_add_64(&arc.size, size);
1446 		goto out;
1447 	}
1448 
1449 	/*
1450 	 * If we are prefetching from the mfu ghost list, this buffer
1451 	 * will end up on the mru list; so steal space from there.
1452 	 */
1453 	if (state == arc.mfu_ghost)
1454 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
1455 	else if (state == arc.mru_ghost)
1456 		state = arc.mru;
1457 
1458 	if (state == arc.mru || state == arc.anon) {
1459 		uint64_t mru_used = arc.anon->size + arc.mru->size;
1460 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1461 	} else {
1462 		/* MFU cases */
1463 		uint64_t mfu_space = arc.c - arc.p;
1464 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
1465 	}
1466 	if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) {
1467 		(void) arc_evict(state, size, FALSE);
1468 		buf->b_data = zio_buf_alloc(size);
1469 		atomic_add_64(&arc.size, size);
1470 		atomic_add_64(&arc.recycle_miss, 1);
1471 		if (arc.size > arc.c)
1472 			arc_adjust();
1473 	}
1474 	ASSERT(buf->b_data != NULL);
1475 out:
1476 	/*
1477 	 * Update the state size.  Note that ghost states have a
1478 	 * "ghost size" and so don't need to be updated.
1479 	 */
1480 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
1481 		arc_buf_hdr_t *hdr = buf->b_hdr;
1482 
1483 		atomic_add_64(&hdr->b_state->size, size);
1484 		if (list_link_active(&hdr->b_arc_node)) {
1485 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
1486 			atomic_add_64(&hdr->b_state->lsize, size);
1487 		}
1488 	}
1489 }
1490 
1491 /*
1492  * This routine is called whenever a buffer is accessed.
1493  * NOTE: the hash lock is dropped in this function.
1494  */
1495 static void
1496 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1497 {
1498 	ASSERT(MUTEX_HELD(hash_lock));
1499 
1500 	if (buf->b_state == arc.anon) {
1501 		/*
1502 		 * This buffer is not in the cache, and does not
1503 		 * appear in our "ghost" list.  Add the new buffer
1504 		 * to the MRU state.
1505 		 */
1506 
1507 		ASSERT(buf->b_arc_access == 0);
1508 		buf->b_arc_access = lbolt;
1509 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1510 		arc_change_state(arc.mru, buf, hash_lock);
1511 
1512 	} else if (buf->b_state == arc.mru) {
1513 		/*
1514 		 * If this buffer is here because of a prefetch, then either:
1515 		 * - clear the flag if this is a "referencing" read
1516 		 *   (any subsequent access will bump this into the MFU state).
1517 		 * or
1518 		 * - move the buffer to the head of the list if this is
1519 		 *   another prefetch (to make it less likely to be evicted).
1520 		 */
1521 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1522 			if (refcount_count(&buf->b_refcnt) == 0) {
1523 				ASSERT(list_link_active(&buf->b_arc_node));
1524 				mutex_enter(&arc.mru->mtx);
1525 				list_remove(&arc.mru->list, buf);
1526 				list_insert_head(&arc.mru->list, buf);
1527 				mutex_exit(&arc.mru->mtx);
1528 			} else {
1529 				buf->b_flags &= ~ARC_PREFETCH;
1530 				atomic_add_64(&arc.mru->hits, 1);
1531 			}
1532 			buf->b_arc_access = lbolt;
1533 			return;
1534 		}
1535 
1536 		/*
1537 		 * This buffer has been "accessed" only once so far,
1538 		 * but it is still in the cache. Move it to the MFU
1539 		 * state.
1540 		 */
1541 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1542 			/*
1543 			 * More than 125ms have passed since we
1544 			 * instantiated this buffer.  Move it to the
1545 			 * most frequently used state.
1546 			 */
1547 			buf->b_arc_access = lbolt;
1548 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1549 			arc_change_state(arc.mfu, buf, hash_lock);
1550 		}
1551 		atomic_add_64(&arc.mru->hits, 1);
1552 	} else if (buf->b_state == arc.mru_ghost) {
1553 		arc_state_t	*new_state;
1554 		/*
1555 		 * This buffer has been "accessed" recently, but
1556 		 * was evicted from the cache.  Move it to the
1557 		 * MFU state.
1558 		 */
1559 
1560 		if (buf->b_flags & ARC_PREFETCH) {
1561 			new_state = arc.mru;
1562 			if (refcount_count(&buf->b_refcnt) > 0)
1563 				buf->b_flags &= ~ARC_PREFETCH;
1564 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1565 		} else {
1566 			new_state = arc.mfu;
1567 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1568 		}
1569 
1570 		buf->b_arc_access = lbolt;
1571 		arc_change_state(new_state, buf, hash_lock);
1572 
1573 		atomic_add_64(&arc.mru_ghost->hits, 1);
1574 	} else if (buf->b_state == arc.mfu) {
1575 		/*
1576 		 * This buffer has been accessed more than once and is
1577 		 * still in the cache.  Keep it in the MFU state.
1578 		 *
1579 		 * NOTE: an add_reference() that occurred when we did
1580 		 * the arc_read() will have kicked this off the list.
1581 		 * If it was a prefetch, we will explicitly move it to
1582 		 * the head of the list now.
1583 		 */
1584 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1585 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
1586 			ASSERT(list_link_active(&buf->b_arc_node));
1587 			mutex_enter(&arc.mfu->mtx);
1588 			list_remove(&arc.mfu->list, buf);
1589 			list_insert_head(&arc.mfu->list, buf);
1590 			mutex_exit(&arc.mfu->mtx);
1591 		}
1592 		atomic_add_64(&arc.mfu->hits, 1);
1593 		buf->b_arc_access = lbolt;
1594 	} else if (buf->b_state == arc.mfu_ghost) {
1595 		arc_state_t	*new_state = arc.mfu;
1596 		/*
1597 		 * This buffer has been accessed more than once but has
1598 		 * been evicted from the cache.  Move it back to the
1599 		 * MFU state.
1600 		 */
1601 
1602 		if (buf->b_flags & ARC_PREFETCH) {
1603 			/*
1604 			 * This is a prefetch access...
1605 			 * move this block back to the MRU state.
1606 			 */
1607 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
1608 			new_state = arc.mru;
1609 		}
1610 
1611 		buf->b_arc_access = lbolt;
1612 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1613 		arc_change_state(new_state, buf, hash_lock);
1614 
1615 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1616 	} else {
1617 		ASSERT(!"invalid arc state");
1618 	}
1619 }
1620 
1621 /* a generic arc_done_func_t which you can use */
1622 /* ARGSUSED */
1623 void
1624 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1625 {
1626 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
1627 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1628 }
1629 
1630 /* a generic arc_done_func_t which you can use */
1631 void
1632 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1633 {
1634 	arc_buf_t **bufp = arg;
1635 	if (zio && zio->io_error) {
1636 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1637 		*bufp = NULL;
1638 	} else {
1639 		*bufp = buf;
1640 	}
1641 }
1642 
1643 static void
1644 arc_read_done(zio_t *zio)
1645 {
1646 	arc_buf_hdr_t	*hdr, *found;
1647 	arc_buf_t	*buf;
1648 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1649 	kmutex_t	*hash_lock;
1650 	arc_callback_t	*callback_list, *acb;
1651 	int		freeable = FALSE;
1652 
1653 	buf = zio->io_private;
1654 	hdr = buf->b_hdr;
1655 
1656 	/*
1657 	 * The hdr was inserted into hash-table and removed from lists
1658 	 * prior to starting I/O.  We should find this header, since
1659 	 * it's in the hash table, and it should be legit since it's
1660 	 * not possible to evict it during the I/O.  The only possible
1661 	 * reason for it not to be found is if we were freed during the
1662 	 * read.
1663 	 */
1664 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1665 		    &hash_lock);
1666 
1667 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
1668 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1669 
1670 	/* byteswap if necessary */
1671 	callback_list = hdr->b_acb;
1672 	ASSERT(callback_list != NULL);
1673 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1674 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1675 
1676 	/* create copies of the data buffer for the callers */
1677 	abuf = buf;
1678 	for (acb = callback_list; acb; acb = acb->acb_next) {
1679 		if (acb->acb_done) {
1680 			if (abuf == NULL)
1681 				abuf = arc_buf_clone(buf);
1682 			acb->acb_buf = abuf;
1683 			abuf = NULL;
1684 		}
1685 	}
1686 	hdr->b_acb = NULL;
1687 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
1688 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
1689 	if (abuf == buf)
1690 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1691 
1692 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1693 
1694 	if (zio->io_error != 0) {
1695 		hdr->b_flags |= ARC_IO_ERROR;
1696 		if (hdr->b_state != arc.anon)
1697 			arc_change_state(arc.anon, hdr, hash_lock);
1698 		if (HDR_IN_HASH_TABLE(hdr))
1699 			buf_hash_remove(hdr);
1700 		freeable = refcount_is_zero(&hdr->b_refcnt);
1701 		/* convert checksum errors into IO errors */
1702 		if (zio->io_error == ECKSUM)
1703 			zio->io_error = EIO;
1704 	}
1705 
1706 	/*
1707 	 * Broadcast before we drop the hash_lock to avoid the possibility
1708 	 * that the hdr (and hence the cv) might be freed before we get to
1709 	 * the cv_broadcast().
1710 	 */
1711 	cv_broadcast(&hdr->b_cv);
1712 
1713 	if (hash_lock) {
1714 		/*
1715 		 * Only call arc_access on anonymous buffers.  This is because
1716 		 * if we've issued an I/O for an evicted buffer, we've already
1717 		 * called arc_access (to prevent any simultaneous readers from
1718 		 * getting confused).
1719 		 */
1720 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
1721 			arc_access(hdr, hash_lock);
1722 		mutex_exit(hash_lock);
1723 	} else {
1724 		/*
1725 		 * This block was freed while we waited for the read to
1726 		 * complete.  It has been removed from the hash table and
1727 		 * moved to the anonymous state (so that it won't show up
1728 		 * in the cache).
1729 		 */
1730 		ASSERT3P(hdr->b_state, ==, arc.anon);
1731 		freeable = refcount_is_zero(&hdr->b_refcnt);
1732 	}
1733 
1734 	/* execute each callback and free its structure */
1735 	while ((acb = callback_list) != NULL) {
1736 		if (acb->acb_done)
1737 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1738 
1739 		if (acb->acb_zio_dummy != NULL) {
1740 			acb->acb_zio_dummy->io_error = zio->io_error;
1741 			zio_nowait(acb->acb_zio_dummy);
1742 		}
1743 
1744 		callback_list = acb->acb_next;
1745 		kmem_free(acb, sizeof (arc_callback_t));
1746 	}
1747 
1748 	if (freeable)
1749 		arc_hdr_destroy(hdr);
1750 }
1751 
1752 /*
1753  * "Read" the block block at the specified DVA (in bp) via the
1754  * cache.  If the block is found in the cache, invoke the provided
1755  * callback immediately and return.  Note that the `zio' parameter
1756  * in the callback will be NULL in this case, since no IO was
1757  * required.  If the block is not in the cache pass the read request
1758  * on to the spa with a substitute callback function, so that the
1759  * requested block will be added to the cache.
1760  *
1761  * If a read request arrives for a block that has a read in-progress,
1762  * either wait for the in-progress read to complete (and return the
1763  * results); or, if this is a read with a "done" func, add a record
1764  * to the read to invoke the "done" func when the read completes,
1765  * and return; or just return.
1766  *
1767  * arc_read_done() will invoke all the requested "done" functions
1768  * for readers of this block.
1769  */
1770 int
1771 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1772     arc_done_func_t *done, void *private, int priority, int flags,
1773     uint32_t *arc_flags, zbookmark_t *zb)
1774 {
1775 	arc_buf_hdr_t *hdr;
1776 	arc_buf_t *buf;
1777 	kmutex_t *hash_lock;
1778 	zio_t	*rzio;
1779 
1780 top:
1781 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
1782 	if (hdr && hdr->b_datacnt > 0) {
1783 
1784 		*arc_flags |= ARC_CACHED;
1785 
1786 		if (HDR_IO_IN_PROGRESS(hdr)) {
1787 
1788 			if (*arc_flags & ARC_WAIT) {
1789 				cv_wait(&hdr->b_cv, hash_lock);
1790 				mutex_exit(hash_lock);
1791 				goto top;
1792 			}
1793 			ASSERT(*arc_flags & ARC_NOWAIT);
1794 
1795 			if (done) {
1796 				arc_callback_t	*acb = NULL;
1797 
1798 				acb = kmem_zalloc(sizeof (arc_callback_t),
1799 				    KM_SLEEP);
1800 				acb->acb_done = done;
1801 				acb->acb_private = private;
1802 				acb->acb_byteswap = swap;
1803 				if (pio != NULL)
1804 					acb->acb_zio_dummy = zio_null(pio,
1805 					    spa, NULL, NULL, flags);
1806 
1807 				ASSERT(acb->acb_done != NULL);
1808 				acb->acb_next = hdr->b_acb;
1809 				hdr->b_acb = acb;
1810 				add_reference(hdr, hash_lock, private);
1811 				mutex_exit(hash_lock);
1812 				return (0);
1813 			}
1814 			mutex_exit(hash_lock);
1815 			return (0);
1816 		}
1817 
1818 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1819 
1820 		if (done) {
1821 			add_reference(hdr, hash_lock, private);
1822 			/*
1823 			 * If this block is already in use, create a new
1824 			 * copy of the data so that we will be guaranteed
1825 			 * that arc_release() will always succeed.
1826 			 */
1827 			buf = hdr->b_buf;
1828 			ASSERT(buf);
1829 			ASSERT(buf->b_data);
1830 			if (HDR_BUF_AVAILABLE(hdr)) {
1831 				ASSERT(buf->b_efunc == NULL);
1832 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
1833 			} else {
1834 				buf = arc_buf_clone(buf);
1835 			}
1836 		} else if (*arc_flags & ARC_PREFETCH &&
1837 		    refcount_count(&hdr->b_refcnt) == 0) {
1838 			hdr->b_flags |= ARC_PREFETCH;
1839 		}
1840 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1841 		arc_access(hdr, hash_lock);
1842 		mutex_exit(hash_lock);
1843 		atomic_add_64(&arc.hits, 1);
1844 		if (done)
1845 			done(NULL, buf, private);
1846 	} else {
1847 		uint64_t size = BP_GET_LSIZE(bp);
1848 		arc_callback_t	*acb;
1849 
1850 		if (hdr == NULL) {
1851 			/* this block is not in the cache */
1852 			arc_buf_hdr_t	*exists;
1853 
1854 			buf = arc_buf_alloc(spa, size, private);
1855 			hdr = buf->b_hdr;
1856 			hdr->b_dva = *BP_IDENTITY(bp);
1857 			hdr->b_birth = bp->blk_birth;
1858 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1859 			exists = buf_hash_insert(hdr, &hash_lock);
1860 			if (exists) {
1861 				/* somebody beat us to the hash insert */
1862 				mutex_exit(hash_lock);
1863 				bzero(&hdr->b_dva, sizeof (dva_t));
1864 				hdr->b_birth = 0;
1865 				hdr->b_cksum0 = 0;
1866 				(void) arc_buf_remove_ref(buf, private);
1867 				goto top; /* restart the IO request */
1868 			}
1869 			/* if this is a prefetch, we don't have a reference */
1870 			if (*arc_flags & ARC_PREFETCH) {
1871 				(void) remove_reference(hdr, hash_lock,
1872 				    private);
1873 				hdr->b_flags |= ARC_PREFETCH;
1874 			}
1875 			if (BP_GET_LEVEL(bp) > 0)
1876 				hdr->b_flags |= ARC_INDIRECT;
1877 		} else {
1878 			/* this block is in the ghost cache */
1879 			ASSERT(GHOST_STATE(hdr->b_state));
1880 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1881 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
1882 			ASSERT(hdr->b_buf == NULL);
1883 
1884 			/* if this is a prefetch, we don't have a reference */
1885 			if (*arc_flags & ARC_PREFETCH)
1886 				hdr->b_flags |= ARC_PREFETCH;
1887 			else
1888 				add_reference(hdr, hash_lock, private);
1889 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1890 			buf->b_hdr = hdr;
1891 			buf->b_data = NULL;
1892 			buf->b_efunc = NULL;
1893 			buf->b_private = NULL;
1894 			buf->b_next = NULL;
1895 			hdr->b_buf = buf;
1896 			arc_get_data_buf(buf);
1897 			ASSERT(hdr->b_datacnt == 0);
1898 			hdr->b_datacnt = 1;
1899 
1900 		}
1901 
1902 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1903 		acb->acb_done = done;
1904 		acb->acb_private = private;
1905 		acb->acb_byteswap = swap;
1906 
1907 		ASSERT(hdr->b_acb == NULL);
1908 		hdr->b_acb = acb;
1909 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1910 
1911 		/*
1912 		 * If the buffer has been evicted, migrate it to a present state
1913 		 * before issuing the I/O.  Once we drop the hash-table lock,
1914 		 * the header will be marked as I/O in progress and have an
1915 		 * attached buffer.  At this point, anybody who finds this
1916 		 * buffer ought to notice that it's legit but has a pending I/O.
1917 		 */
1918 
1919 		if (GHOST_STATE(hdr->b_state))
1920 			arc_access(hdr, hash_lock);
1921 		mutex_exit(hash_lock);
1922 
1923 		ASSERT3U(hdr->b_size, ==, size);
1924 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
1925 		    zbookmark_t *, zb);
1926 		atomic_add_64(&arc.misses, 1);
1927 
1928 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
1929 		    arc_read_done, buf, priority, flags, zb);
1930 
1931 		if (*arc_flags & ARC_WAIT)
1932 			return (zio_wait(rzio));
1933 
1934 		ASSERT(*arc_flags & ARC_NOWAIT);
1935 		zio_nowait(rzio);
1936 	}
1937 	return (0);
1938 }
1939 
1940 /*
1941  * arc_read() variant to support pool traversal.  If the block is already
1942  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1943  * The idea is that we don't want pool traversal filling up memory, but
1944  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1945  */
1946 int
1947 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1948 {
1949 	arc_buf_hdr_t *hdr;
1950 	kmutex_t *hash_mtx;
1951 	int rc = 0;
1952 
1953 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1954 
1955 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
1956 		arc_buf_t *buf = hdr->b_buf;
1957 
1958 		ASSERT(buf);
1959 		while (buf->b_data == NULL) {
1960 			buf = buf->b_next;
1961 			ASSERT(buf);
1962 		}
1963 		bcopy(buf->b_data, data, hdr->b_size);
1964 	} else {
1965 		rc = ENOENT;
1966 	}
1967 
1968 	if (hash_mtx)
1969 		mutex_exit(hash_mtx);
1970 
1971 	return (rc);
1972 }
1973 
1974 void
1975 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
1976 {
1977 	ASSERT(buf->b_hdr != NULL);
1978 	ASSERT(buf->b_hdr->b_state != arc.anon);
1979 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
1980 	buf->b_efunc = func;
1981 	buf->b_private = private;
1982 }
1983 
1984 /*
1985  * This is used by the DMU to let the ARC know that a buffer is
1986  * being evicted, so the ARC should clean up.  If this arc buf
1987  * is not yet in the evicted state, it will be put there.
1988  */
1989 int
1990 arc_buf_evict(arc_buf_t *buf)
1991 {
1992 	arc_buf_hdr_t *hdr = buf->b_hdr;
1993 	kmutex_t *hash_lock;
1994 	arc_buf_t **bufp;
1995 
1996 	if (hdr == NULL) {
1997 		/*
1998 		 * We are in arc_do_user_evicts().
1999 		 */
2000 		ASSERT(buf->b_data == NULL);
2001 		return (0);
2002 	}
2003 
2004 	hash_lock = HDR_LOCK(hdr);
2005 	mutex_enter(hash_lock);
2006 
2007 	if (buf->b_data == NULL) {
2008 		/*
2009 		 * We are on the eviction list.
2010 		 */
2011 		mutex_exit(hash_lock);
2012 		mutex_enter(&arc_eviction_mtx);
2013 		if (buf->b_hdr == NULL) {
2014 			/*
2015 			 * We are already in arc_do_user_evicts().
2016 			 */
2017 			mutex_exit(&arc_eviction_mtx);
2018 			return (0);
2019 		} else {
2020 			arc_buf_t copy = *buf; /* structure assignment */
2021 			/*
2022 			 * Process this buffer now
2023 			 * but let arc_do_user_evicts() do the reaping.
2024 			 */
2025 			buf->b_efunc = NULL;
2026 			mutex_exit(&arc_eviction_mtx);
2027 			VERIFY(copy.b_efunc(&copy) == 0);
2028 			return (1);
2029 		}
2030 	}
2031 
2032 	ASSERT(buf->b_hdr == hdr);
2033 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2034 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
2035 
2036 	/*
2037 	 * Pull this buffer off of the hdr
2038 	 */
2039 	bufp = &hdr->b_buf;
2040 	while (*bufp != buf)
2041 		bufp = &(*bufp)->b_next;
2042 	*bufp = buf->b_next;
2043 
2044 	ASSERT(buf->b_data != NULL);
2045 	buf->b_hdr = hdr;
2046 	arc_buf_destroy(buf, FALSE, FALSE);
2047 
2048 	if (hdr->b_datacnt == 0) {
2049 		arc_state_t *old_state = hdr->b_state;
2050 		arc_state_t *evicted_state;
2051 
2052 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
2053 
2054 		evicted_state =
2055 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
2056 
2057 		mutex_enter(&old_state->mtx);
2058 		mutex_enter(&evicted_state->mtx);
2059 
2060 		arc_change_state(evicted_state, hdr, hash_lock);
2061 		ASSERT(HDR_IN_HASH_TABLE(hdr));
2062 		hdr->b_flags = ARC_IN_HASH_TABLE;
2063 
2064 		mutex_exit(&evicted_state->mtx);
2065 		mutex_exit(&old_state->mtx);
2066 	}
2067 	mutex_exit(hash_lock);
2068 
2069 	VERIFY(buf->b_efunc(buf) == 0);
2070 	buf->b_efunc = NULL;
2071 	buf->b_private = NULL;
2072 	buf->b_hdr = NULL;
2073 	kmem_cache_free(buf_cache, buf);
2074 	return (1);
2075 }
2076 
2077 /*
2078  * Release this buffer from the cache.  This must be done
2079  * after a read and prior to modifying the buffer contents.
2080  * If the buffer has more than one reference, we must make
2081  * make a new hdr for the buffer.
2082  */
2083 void
2084 arc_release(arc_buf_t *buf, void *tag)
2085 {
2086 	arc_buf_hdr_t *hdr = buf->b_hdr;
2087 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2088 
2089 	/* this buffer is not on any list */
2090 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2091 
2092 	if (hdr->b_state == arc.anon) {
2093 		/* this buffer is already released */
2094 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2095 		ASSERT(BUF_EMPTY(hdr));
2096 		ASSERT(buf->b_efunc == NULL);
2097 		return;
2098 	}
2099 
2100 	mutex_enter(hash_lock);
2101 
2102 	/*
2103 	 * Do we have more than one buf?
2104 	 */
2105 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2106 		arc_buf_hdr_t *nhdr;
2107 		arc_buf_t **bufp;
2108 		uint64_t blksz = hdr->b_size;
2109 		spa_t *spa = hdr->b_spa;
2110 
2111 		ASSERT(hdr->b_datacnt > 1);
2112 		/*
2113 		 * Pull the data off of this buf and attach it to
2114 		 * a new anonymous buf.
2115 		 */
2116 		(void) remove_reference(hdr, hash_lock, tag);
2117 		bufp = &hdr->b_buf;
2118 		while (*bufp != buf)
2119 			bufp = &(*bufp)->b_next;
2120 		*bufp = (*bufp)->b_next;
2121 
2122 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2123 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
2124 		if (refcount_is_zero(&hdr->b_refcnt)) {
2125 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
2126 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
2127 		}
2128 		hdr->b_datacnt -= 1;
2129 
2130 		mutex_exit(hash_lock);
2131 
2132 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2133 		nhdr->b_size = blksz;
2134 		nhdr->b_spa = spa;
2135 		nhdr->b_buf = buf;
2136 		nhdr->b_state = arc.anon;
2137 		nhdr->b_arc_access = 0;
2138 		nhdr->b_flags = 0;
2139 		nhdr->b_datacnt = 1;
2140 		buf->b_hdr = nhdr;
2141 		buf->b_next = NULL;
2142 		(void) refcount_add(&nhdr->b_refcnt, tag);
2143 		atomic_add_64(&arc.anon->size, blksz);
2144 
2145 		hdr = nhdr;
2146 	} else {
2147 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2148 		ASSERT(!list_link_active(&hdr->b_arc_node));
2149 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2150 		arc_change_state(arc.anon, hdr, hash_lock);
2151 		hdr->b_arc_access = 0;
2152 		mutex_exit(hash_lock);
2153 		bzero(&hdr->b_dva, sizeof (dva_t));
2154 		hdr->b_birth = 0;
2155 		hdr->b_cksum0 = 0;
2156 	}
2157 	buf->b_efunc = NULL;
2158 	buf->b_private = NULL;
2159 }
2160 
2161 int
2162 arc_released(arc_buf_t *buf)
2163 {
2164 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
2165 }
2166 
2167 int
2168 arc_has_callback(arc_buf_t *buf)
2169 {
2170 	return (buf->b_efunc != NULL);
2171 }
2172 
2173 #ifdef ZFS_DEBUG
2174 int
2175 arc_referenced(arc_buf_t *buf)
2176 {
2177 	return (refcount_count(&buf->b_hdr->b_refcnt));
2178 }
2179 #endif
2180 
2181 static void
2182 arc_write_done(zio_t *zio)
2183 {
2184 	arc_buf_t *buf;
2185 	arc_buf_hdr_t *hdr;
2186 	arc_callback_t *acb;
2187 
2188 	buf = zio->io_private;
2189 	hdr = buf->b_hdr;
2190 	acb = hdr->b_acb;
2191 	hdr->b_acb = NULL;
2192 	ASSERT(acb != NULL);
2193 
2194 	/* this buffer is on no lists and is not in the hash table */
2195 	ASSERT3P(hdr->b_state, ==, arc.anon);
2196 
2197 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2198 	hdr->b_birth = zio->io_bp->blk_birth;
2199 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
2200 	/*
2201 	 * If the block to be written was all-zero, we may have
2202 	 * compressed it away.  In this case no write was performed
2203 	 * so there will be no dva/birth-date/checksum.  The buffer
2204 	 * must therefor remain anonymous (and uncached).
2205 	 */
2206 	if (!BUF_EMPTY(hdr)) {
2207 		arc_buf_hdr_t *exists;
2208 		kmutex_t *hash_lock;
2209 
2210 		exists = buf_hash_insert(hdr, &hash_lock);
2211 		if (exists) {
2212 			/*
2213 			 * This can only happen if we overwrite for
2214 			 * sync-to-convergence, because we remove
2215 			 * buffers from the hash table when we arc_free().
2216 			 */
2217 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2218 			    BP_IDENTITY(zio->io_bp)));
2219 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2220 			    zio->io_bp->blk_birth);
2221 
2222 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2223 			arc_change_state(arc.anon, exists, hash_lock);
2224 			mutex_exit(hash_lock);
2225 			arc_hdr_destroy(exists);
2226 			exists = buf_hash_insert(hdr, &hash_lock);
2227 			ASSERT3P(exists, ==, NULL);
2228 		}
2229 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2230 		arc_access(hdr, hash_lock);
2231 		mutex_exit(hash_lock);
2232 	} else if (acb->acb_done == NULL) {
2233 		int destroy_hdr;
2234 		/*
2235 		 * This is an anonymous buffer with no user callback,
2236 		 * destroy it if there are no active references.
2237 		 */
2238 		mutex_enter(&arc_eviction_mtx);
2239 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
2240 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2241 		mutex_exit(&arc_eviction_mtx);
2242 		if (destroy_hdr)
2243 			arc_hdr_destroy(hdr);
2244 	} else {
2245 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2246 	}
2247 
2248 	if (acb->acb_done) {
2249 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2250 		acb->acb_done(zio, buf, acb->acb_private);
2251 	}
2252 
2253 	kmem_free(acb, sizeof (arc_callback_t));
2254 }
2255 
2256 int
2257 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2258     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2259     arc_done_func_t *done, void *private, int priority, int flags,
2260     uint32_t arc_flags, zbookmark_t *zb)
2261 {
2262 	arc_buf_hdr_t *hdr = buf->b_hdr;
2263 	arc_callback_t	*acb;
2264 	zio_t	*rzio;
2265 
2266 	/* this is a private buffer - no locking required */
2267 	ASSERT3P(hdr->b_state, ==, arc.anon);
2268 	ASSERT(BUF_EMPTY(hdr));
2269 	ASSERT(!HDR_IO_ERROR(hdr));
2270 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
2271 	ASSERT(hdr->b_acb == 0);
2272 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2273 	acb->acb_done = done;
2274 	acb->acb_private = private;
2275 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2276 	hdr->b_acb = acb;
2277 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
2278 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
2279 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2280 
2281 	if (arc_flags & ARC_WAIT)
2282 		return (zio_wait(rzio));
2283 
2284 	ASSERT(arc_flags & ARC_NOWAIT);
2285 	zio_nowait(rzio);
2286 
2287 	return (0);
2288 }
2289 
2290 int
2291 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2292     zio_done_func_t *done, void *private, uint32_t arc_flags)
2293 {
2294 	arc_buf_hdr_t *ab;
2295 	kmutex_t *hash_lock;
2296 	zio_t	*zio;
2297 
2298 	/*
2299 	 * If this buffer is in the cache, release it, so it
2300 	 * can be re-used.
2301 	 */
2302 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2303 	if (ab != NULL) {
2304 		/*
2305 		 * The checksum of blocks to free is not always
2306 		 * preserved (eg. on the deadlist).  However, if it is
2307 		 * nonzero, it should match what we have in the cache.
2308 		 */
2309 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2310 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2311 		if (ab->b_state != arc.anon)
2312 			arc_change_state(arc.anon, ab, hash_lock);
2313 		if (HDR_IO_IN_PROGRESS(ab)) {
2314 			/*
2315 			 * This should only happen when we prefetch.
2316 			 */
2317 			ASSERT(ab->b_flags & ARC_PREFETCH);
2318 			ASSERT3U(ab->b_datacnt, ==, 1);
2319 			ab->b_flags |= ARC_FREED_IN_READ;
2320 			if (HDR_IN_HASH_TABLE(ab))
2321 				buf_hash_remove(ab);
2322 			ab->b_arc_access = 0;
2323 			bzero(&ab->b_dva, sizeof (dva_t));
2324 			ab->b_birth = 0;
2325 			ab->b_cksum0 = 0;
2326 			ab->b_buf->b_efunc = NULL;
2327 			ab->b_buf->b_private = NULL;
2328 			mutex_exit(hash_lock);
2329 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2330 			mutex_exit(hash_lock);
2331 			arc_hdr_destroy(ab);
2332 			atomic_add_64(&arc.deleted, 1);
2333 		} else {
2334 			/*
2335 			 * We still have an active reference on this
2336 			 * buffer.  This can happen, e.g., from
2337 			 * dbuf_unoverride().
2338 			 */
2339 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2340 			ab->b_arc_access = 0;
2341 			bzero(&ab->b_dva, sizeof (dva_t));
2342 			ab->b_birth = 0;
2343 			ab->b_cksum0 = 0;
2344 			ab->b_buf->b_efunc = NULL;
2345 			ab->b_buf->b_private = NULL;
2346 			mutex_exit(hash_lock);
2347 		}
2348 	}
2349 
2350 	zio = zio_free(pio, spa, txg, bp, done, private);
2351 
2352 	if (arc_flags & ARC_WAIT)
2353 		return (zio_wait(zio));
2354 
2355 	ASSERT(arc_flags & ARC_NOWAIT);
2356 	zio_nowait(zio);
2357 
2358 	return (0);
2359 }
2360 
2361 void
2362 arc_tempreserve_clear(uint64_t tempreserve)
2363 {
2364 	atomic_add_64(&arc_tempreserve, -tempreserve);
2365 	ASSERT((int64_t)arc_tempreserve >= 0);
2366 }
2367 
2368 int
2369 arc_tempreserve_space(uint64_t tempreserve)
2370 {
2371 #ifdef ZFS_DEBUG
2372 	/*
2373 	 * Once in a while, fail for no reason.  Everything should cope.
2374 	 */
2375 	if (spa_get_random(10000) == 0) {
2376 		dprintf("forcing random failure\n");
2377 		return (ERESTART);
2378 	}
2379 #endif
2380 	if (tempreserve > arc.c/4 && !arc.no_grow)
2381 		arc.c = MIN(arc.c_max, tempreserve * 4);
2382 	if (tempreserve > arc.c)
2383 		return (ENOMEM);
2384 
2385 	/*
2386 	 * Throttle writes when the amount of dirty data in the cache
2387 	 * gets too large.  We try to keep the cache less than half full
2388 	 * of dirty blocks so that our sync times don't grow too large.
2389 	 * Note: if two requests come in concurrently, we might let them
2390 	 * both succeed, when one of them should fail.  Not a huge deal.
2391 	 *
2392 	 * XXX The limit should be adjusted dynamically to keep the time
2393 	 * to sync a dataset fixed (around 1-5 seconds?).
2394 	 */
2395 
2396 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2397 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2398 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2399 		    "tempreserve=%lluK arc.c=%lluK\n",
2400 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2401 		    tempreserve>>10, arc.c>>10);
2402 		return (ERESTART);
2403 	}
2404 	atomic_add_64(&arc_tempreserve, tempreserve);
2405 	return (0);
2406 }
2407 
2408 void
2409 arc_init(void)
2410 {
2411 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
2412 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2413 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2414 
2415 	/* Convert seconds to clock ticks */
2416 	arc_min_prefetch_lifespan = 1 * hz;
2417 
2418 	/* Start out with 1/8 of all memory */
2419 	arc.c = physmem * PAGESIZE / 8;
2420 
2421 #ifdef _KERNEL
2422 	/*
2423 	 * On architectures where the physical memory can be larger
2424 	 * than the addressable space (intel in 32-bit mode), we may
2425 	 * need to limit the cache to 1/8 of VM size.
2426 	 */
2427 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2428 #endif
2429 
2430 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2431 	arc.c_min = MAX(arc.c / 4, 64<<20);
2432 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2433 	if (arc.c * 8 >= 1<<30)
2434 		arc.c_max = (arc.c * 8) - (1<<30);
2435 	else
2436 		arc.c_max = arc.c_min;
2437 	arc.c_max = MAX(arc.c * 6, arc.c_max);
2438 
2439 	/*
2440 	 * Allow the tunables to override our calculations if they are
2441 	 * reasonable (ie. over 64MB)
2442 	 */
2443 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
2444 		arc.c_max = zfs_arc_max;
2445 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
2446 		arc.c_min = zfs_arc_min;
2447 
2448 	arc.c = arc.c_max;
2449 	arc.p = (arc.c >> 1);
2450 
2451 	/* if kmem_flags are set, lets try to use less memory */
2452 	if (kmem_debugging())
2453 		arc.c = arc.c / 2;
2454 	if (arc.c < arc.c_min)
2455 		arc.c = arc.c_min;
2456 
2457 	arc.anon = &ARC_anon;
2458 	arc.mru = &ARC_mru;
2459 	arc.mru_ghost = &ARC_mru_ghost;
2460 	arc.mfu = &ARC_mfu;
2461 	arc.mfu_ghost = &ARC_mfu_ghost;
2462 	arc.size = 0;
2463 
2464 	arc.hits = 0;
2465 	arc.recycle_miss = 0;
2466 	arc.evict_skip = 0;
2467 	arc.mutex_miss = 0;
2468 
2469 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
2470 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
2471 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
2472 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
2473 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
2474 
2475 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2476 	    offsetof(arc_buf_hdr_t, b_arc_node));
2477 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2478 	    offsetof(arc_buf_hdr_t, b_arc_node));
2479 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2480 	    offsetof(arc_buf_hdr_t, b_arc_node));
2481 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2482 	    offsetof(arc_buf_hdr_t, b_arc_node));
2483 
2484 	buf_init();
2485 
2486 	arc_thread_exit = 0;
2487 	arc_eviction_list = NULL;
2488 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2489 
2490 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2491 	    TS_RUN, minclsyspri);
2492 }
2493 
2494 void
2495 arc_fini(void)
2496 {
2497 	mutex_enter(&arc_reclaim_thr_lock);
2498 	arc_thread_exit = 1;
2499 	while (arc_thread_exit != 0)
2500 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2501 	mutex_exit(&arc_reclaim_thr_lock);
2502 
2503 	arc_flush();
2504 
2505 	arc_dead = TRUE;
2506 
2507 	mutex_destroy(&arc_eviction_mtx);
2508 	mutex_destroy(&arc_reclaim_lock);
2509 	mutex_destroy(&arc_reclaim_thr_lock);
2510 	cv_destroy(&arc_reclaim_thr_cv);
2511 
2512 	list_destroy(&arc.mru->list);
2513 	list_destroy(&arc.mru_ghost->list);
2514 	list_destroy(&arc.mfu->list);
2515 	list_destroy(&arc.mfu_ghost->list);
2516 
2517 	mutex_destroy(&arc.anon->mtx);
2518 	mutex_destroy(&arc.mru->mtx);
2519 	mutex_destroy(&arc.mru_ghost->mtx);
2520 	mutex_destroy(&arc.mfu->mtx);
2521 	mutex_destroy(&arc.mfu_ghost->mtx);
2522 
2523 	buf_fini();
2524 }
2525