1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2018, Joyent, Inc. 24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 #include <sys/spa.h> 30 #include <sys/zio.h> 31 #include <sys/spa_impl.h> 32 #include <sys/zio_compress.h> 33 #include <sys/zio_checksum.h> 34 #include <sys/zfs_context.h> 35 #include <sys/arc.h> 36 #include <sys/zfs_refcount.h> 37 #include <sys/vdev.h> 38 #include <sys/vdev_trim.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/dsl_pool.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/multilist.h> 43 #include <sys/abd.h> 44 #include <sys/zil.h> 45 #include <sys/fm/fs/zfs.h> 46 #ifdef _KERNEL 47 #include <sys/shrinker.h> 48 #include <sys/vmsystm.h> 49 #include <sys/zpl.h> 50 #include <linux/page_compat.h> 51 #endif 52 #include <sys/callb.h> 53 #include <sys/kstat.h> 54 #include <sys/zthr.h> 55 #include <zfs_fletcher.h> 56 #include <sys/arc_impl.h> 57 #include <sys/trace_zfs.h> 58 #include <sys/aggsum.h> 59 60 /* 61 * This is a limit on how many pages the ARC shrinker makes available for 62 * eviction in response to one page allocation attempt. Note that in 63 * practice, the kernel's shrinker can ask us to evict up to about 4x this 64 * for one allocation attempt. 65 * 66 * The default limit of 10,000 (in practice, 160MB per allocation attempt 67 * with 4K pages) limits the amount of time spent attempting to reclaim ARC 68 * memory to less than 100ms per allocation attempt, even with a small 69 * average compressed block size of ~8KB. 70 * 71 * See also the comment in arc_shrinker_count(). 72 * Set to 0 to disable limit. 73 */ 74 int zfs_arc_shrinker_limit = 10000; 75 76 77 /* 78 * Return a default max arc size based on the amount of physical memory. 79 */ 80 uint64_t 81 arc_default_max(uint64_t min, uint64_t allmem) 82 { 83 /* Default to 1/2 of all memory. */ 84 return (MAX(allmem / 2, min)); 85 } 86 87 #ifdef _KERNEL 88 /* 89 * Return maximum amount of memory that we could possibly use. Reduced 90 * to half of all memory in user space which is primarily used for testing. 91 */ 92 uint64_t 93 arc_all_memory(void) 94 { 95 #ifdef CONFIG_HIGHMEM 96 return (ptob(zfs_totalram_pages - zfs_totalhigh_pages)); 97 #else 98 return (ptob(zfs_totalram_pages)); 99 #endif /* CONFIG_HIGHMEM */ 100 } 101 102 /* 103 * Return the amount of memory that is considered free. In user space 104 * which is primarily used for testing we pretend that free memory ranges 105 * from 0-20% of all memory. 106 */ 107 uint64_t 108 arc_free_memory(void) 109 { 110 #ifdef CONFIG_HIGHMEM 111 struct sysinfo si; 112 si_meminfo(&si); 113 return (ptob(si.freeram - si.freehigh)); 114 #else 115 return (ptob(nr_free_pages() + 116 nr_inactive_file_pages() + 117 nr_slab_reclaimable_pages())); 118 #endif /* CONFIG_HIGHMEM */ 119 } 120 121 /* 122 * Return the amount of memory that can be consumed before reclaim will be 123 * needed. Positive if there is sufficient free memory, negative indicates 124 * the amount of memory that needs to be freed up. 125 */ 126 int64_t 127 arc_available_memory(void) 128 { 129 return (arc_free_memory() - arc_sys_free); 130 } 131 132 static uint64_t 133 arc_evictable_memory(void) 134 { 135 int64_t asize = aggsum_value(&arc_size); 136 uint64_t arc_clean = 137 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) + 138 zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) + 139 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) + 140 zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 141 uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0); 142 143 /* 144 * Scale reported evictable memory in proportion to page cache, cap 145 * at specified min/max. 146 */ 147 uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent; 148 min = MAX(arc_c_min, MIN(arc_c_max, min)); 149 150 if (arc_dirty >= min) 151 return (arc_clean); 152 153 return (MAX((int64_t)asize - (int64_t)min, 0)); 154 } 155 156 /* 157 * The _count() function returns the number of free-able objects. 158 * The _scan() function returns the number of objects that were freed. 159 */ 160 static unsigned long 161 arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) 162 { 163 /* 164 * __GFP_FS won't be set if we are called from ZFS code (see 165 * kmem_flags_convert(), which removes it). To avoid a deadlock, we 166 * don't allow evicting in this case. We return 0 rather than 167 * SHRINK_STOP so that the shrinker logic doesn't accumulate a 168 * deficit against us. 169 */ 170 if (!(sc->gfp_mask & __GFP_FS)) { 171 return (0); 172 } 173 174 /* 175 * This code is reached in the "direct reclaim" case, where the 176 * kernel (outside ZFS) is trying to allocate a page, and the system 177 * is low on memory. 178 * 179 * The kernel's shrinker code doesn't understand how many pages the 180 * ARC's callback actually frees, so it may ask the ARC to shrink a 181 * lot for one page allocation. This is problematic because it may 182 * take a long time, thus delaying the page allocation, and because 183 * it may force the ARC to unnecessarily shrink very small. 184 * 185 * Therefore, we limit the amount of data that we say is evictable, 186 * which limits the amount that the shrinker will ask us to evict for 187 * one page allocation attempt. 188 * 189 * In practice, we may be asked to shrink 4x the limit to satisfy one 190 * page allocation, before the kernel's shrinker code gives up on us. 191 * When that happens, we rely on the kernel code to find the pages 192 * that we freed before invoking the OOM killer. This happens in 193 * __alloc_pages_slowpath(), which retries and finds the pages we 194 * freed when it calls get_page_from_freelist(). 195 * 196 * See also the comment above zfs_arc_shrinker_limit. 197 */ 198 int64_t limit = zfs_arc_shrinker_limit != 0 ? 199 zfs_arc_shrinker_limit : INT64_MAX; 200 return (MIN(limit, btop((int64_t)arc_evictable_memory()))); 201 } 202 203 static unsigned long 204 arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) 205 { 206 ASSERT((sc->gfp_mask & __GFP_FS) != 0); 207 208 /* The arc is considered warm once reclaim has occurred */ 209 if (unlikely(arc_warm == B_FALSE)) 210 arc_warm = B_TRUE; 211 212 /* 213 * Evict the requested number of pages by reducing arc_c and waiting 214 * for the requested amount of data to be evicted. 215 */ 216 arc_reduce_target_size(ptob(sc->nr_to_scan)); 217 arc_wait_for_eviction(ptob(sc->nr_to_scan)); 218 if (current->reclaim_state != NULL) 219 current->reclaim_state->reclaimed_slab += sc->nr_to_scan; 220 221 /* 222 * We are experiencing memory pressure which the arc_evict_zthr was 223 * unable to keep up with. Set arc_no_grow to briefly pause arc 224 * growth to avoid compounding the memory pressure. 225 */ 226 arc_no_grow = B_TRUE; 227 228 /* 229 * When direct reclaim is observed it usually indicates a rapid 230 * increase in memory pressure. This occurs because the kswapd 231 * threads were unable to asynchronously keep enough free memory 232 * available. 233 */ 234 if (current_is_kswapd()) { 235 ARCSTAT_BUMP(arcstat_memory_indirect_count); 236 } else { 237 ARCSTAT_BUMP(arcstat_memory_direct_count); 238 } 239 240 return (sc->nr_to_scan); 241 } 242 243 SPL_SHRINKER_DECLARE(arc_shrinker, 244 arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS); 245 246 int 247 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) 248 { 249 uint64_t free_memory = arc_free_memory(); 250 251 if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100) 252 return (0); 253 254 if (txg > spa->spa_lowmem_last_txg) { 255 spa->spa_lowmem_last_txg = txg; 256 spa->spa_lowmem_page_load = 0; 257 } 258 /* 259 * If we are in pageout, we know that memory is already tight, 260 * the arc is already going to be evicting, so we just want to 261 * continue to let page writes occur as quickly as possible. 262 */ 263 if (current_is_kswapd()) { 264 if (spa->spa_lowmem_page_load > 265 MAX(arc_sys_free / 4, free_memory) / 4) { 266 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); 267 return (SET_ERROR(ERESTART)); 268 } 269 /* Note: reserve is inflated, so we deflate */ 270 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8); 271 return (0); 272 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) { 273 /* memory is low, delay before restarting */ 274 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 275 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); 276 return (SET_ERROR(EAGAIN)); 277 } 278 spa->spa_lowmem_page_load = 0; 279 return (0); 280 } 281 282 void 283 arc_lowmem_init(void) 284 { 285 uint64_t allmem = arc_all_memory(); 286 287 /* 288 * Register a shrinker to support synchronous (direct) memory 289 * reclaim from the arc. This is done to prevent kswapd from 290 * swapping out pages when it is preferable to shrink the arc. 291 */ 292 spl_register_shrinker(&arc_shrinker); 293 294 /* 295 * The ARC tries to keep at least this much memory available for the 296 * system. This gives the ARC time to shrink in response to memory 297 * pressure, before running completely out of memory and invoking the 298 * direct-reclaim ARC shrinker. 299 * 300 * This should be more than twice high_wmark_pages(), so that 301 * arc_wait_for_eviction() will wait until at least the 302 * high_wmark_pages() are free (see arc_evict_state_impl()). 303 * 304 * Note: Even when the system is very low on memory, the kernel's 305 * shrinker code may only ask for one "batch" of pages (512KB) to be 306 * evicted. If concurrent allocations consume these pages, there may 307 * still be insufficient free pages, and the OOM killer takes action. 308 * 309 * By setting arc_sys_free large enough, and having 310 * arc_wait_for_eviction() wait until there is at least arc_sys_free/2 311 * free memory, it is much less likely that concurrent allocations can 312 * consume all the memory that was evicted before checking for 313 * OOM. 314 * 315 * It's hard to iterate the zones from a linux kernel module, which 316 * makes it difficult to determine the watermark dynamically. Instead 317 * we compute the maximum high watermark for this system, based 318 * on the amount of memory, assuming default parameters on Linux kernel 319 * 5.3. 320 */ 321 322 /* 323 * Base wmark_low is 4 * the square root of Kbytes of RAM. 324 */ 325 long wmark = 4 * int_sqrt(allmem/1024) * 1024; 326 327 /* 328 * Clamp to between 128K and 64MB. 329 */ 330 wmark = MAX(wmark, 128 * 1024); 331 wmark = MIN(wmark, 64 * 1024 * 1024); 332 333 /* 334 * watermark_boost can increase the wmark by up to 150%. 335 */ 336 wmark += wmark * 150 / 100; 337 338 /* 339 * arc_sys_free needs to be more than 2x the watermark, because 340 * arc_wait_for_eviction() waits for half of arc_sys_free. Bump this up 341 * to 3x to ensure we're above it. 342 */ 343 arc_sys_free = wmark * 3 + allmem / 32; 344 } 345 346 void 347 arc_lowmem_fini(void) 348 { 349 spl_unregister_shrinker(&arc_shrinker); 350 } 351 352 int 353 param_set_arc_long(const char *buf, zfs_kernel_param_t *kp) 354 { 355 int error; 356 357 error = param_set_long(buf, kp); 358 if (error < 0) 359 return (SET_ERROR(error)); 360 361 arc_tuning_update(B_TRUE); 362 363 return (0); 364 } 365 366 int 367 param_set_arc_int(const char *buf, zfs_kernel_param_t *kp) 368 { 369 int error; 370 371 error = param_set_int(buf, kp); 372 if (error < 0) 373 return (SET_ERROR(error)); 374 375 arc_tuning_update(B_TRUE); 376 377 return (0); 378 } 379 #else /* _KERNEL */ 380 int64_t 381 arc_available_memory(void) 382 { 383 int64_t lowest = INT64_MAX; 384 385 /* Every 100 calls, free a small amount */ 386 if (spa_get_random(100) == 0) 387 lowest = -1024; 388 389 return (lowest); 390 } 391 392 int 393 arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) 394 { 395 return (0); 396 } 397 398 uint64_t 399 arc_all_memory(void) 400 { 401 return (ptob(physmem) / 2); 402 } 403 404 uint64_t 405 arc_free_memory(void) 406 { 407 return (spa_get_random(arc_all_memory() * 20 / 100)); 408 } 409 #endif /* _KERNEL */ 410 411 /* 412 * Helper function for arc_prune_async() it is responsible for safely 413 * handling the execution of a registered arc_prune_func_t. 414 */ 415 static void 416 arc_prune_task(void *ptr) 417 { 418 arc_prune_t *ap = (arc_prune_t *)ptr; 419 arc_prune_func_t *func = ap->p_pfunc; 420 421 if (func != NULL) 422 func(ap->p_adjust, ap->p_private); 423 424 zfs_refcount_remove(&ap->p_refcnt, func); 425 } 426 427 /* 428 * Notify registered consumers they must drop holds on a portion of the ARC 429 * buffered they reference. This provides a mechanism to ensure the ARC can 430 * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This 431 * is analogous to dnlc_reduce_cache() but more generic. 432 * 433 * This operation is performed asynchronously so it may be safely called 434 * in the context of the arc_reclaim_thread(). A reference is taken here 435 * for each registered arc_prune_t and the arc_prune_task() is responsible 436 * for releasing it once the registered arc_prune_func_t has completed. 437 */ 438 void 439 arc_prune_async(int64_t adjust) 440 { 441 arc_prune_t *ap; 442 443 mutex_enter(&arc_prune_mtx); 444 for (ap = list_head(&arc_prune_list); ap != NULL; 445 ap = list_next(&arc_prune_list, ap)) { 446 447 if (zfs_refcount_count(&ap->p_refcnt) >= 2) 448 continue; 449 450 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc); 451 ap->p_adjust = adjust; 452 if (taskq_dispatch(arc_prune_taskq, arc_prune_task, 453 ap, TQ_SLEEP) == TASKQID_INVALID) { 454 zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc); 455 continue; 456 } 457 ARCSTAT_BUMP(arcstat_prune); 458 } 459 mutex_exit(&arc_prune_mtx); 460 } 461 462 /* BEGIN CSTYLED */ 463 ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, shrinker_limit, INT, ZMOD_RW, 464 "Limit on number of pages that ARC shrinker can reclaim at once"); 465 /* END CSTYLED */ 466