xref: /linux/mm/Kconfig (revision c894ec016c9d0418dd832202225a8c64f450d71e)
1# SPDX-License-Identifier: GPL-2.0-only
2
3menu "Memory Management options"
4
5#
6# For some reason microblaze and nios2 hard code SWAP=n.  Hopefully we can
7# add proper SWAP support to them, in which case this can be remove.
8#
9config ARCH_NO_SWAP
10	bool
11
12config ZPOOL
13	bool
14
15menuconfig SWAP
16	bool "Support for paging of anonymous memory (swap)"
17	depends on MMU && BLOCK && !ARCH_NO_SWAP
18	default y
19	help
20	  This option allows you to choose whether you want to have support
21	  for so called swap devices or swap files in your kernel that are
22	  used to provide more virtual memory than the actual RAM present
23	  in your computer.  If unsure say Y.
24
25config ZSWAP
26	bool "Compressed cache for swap pages"
27	depends on SWAP
28	select FRONTSWAP
29	select CRYPTO
30	select ZPOOL
31	help
32	  A lightweight compressed cache for swap pages.  It takes
33	  pages that are in the process of being swapped out and attempts to
34	  compress them into a dynamically allocated RAM-based memory pool.
35	  This can result in a significant I/O reduction on swap device and,
36	  in the case where decompressing from RAM is faster than swap device
37	  reads, can also improve workload performance.
38
39config ZSWAP_DEFAULT_ON
40	bool "Enable the compressed cache for swap pages by default"
41	depends on ZSWAP
42	help
43	  If selected, the compressed cache for swap pages will be enabled
44	  at boot, otherwise it will be disabled.
45
46	  The selection made here can be overridden by using the kernel
47	  command line 'zswap.enabled=' option.
48
49choice
50	prompt "Default compressor"
51	depends on ZSWAP
52	default ZSWAP_COMPRESSOR_DEFAULT_LZO
53	help
54	  Selects the default compression algorithm for the compressed cache
55	  for swap pages.
56
57	  For an overview what kind of performance can be expected from
58	  a particular compression algorithm please refer to the benchmarks
59	  available at the following LWN page:
60	  https://lwn.net/Articles/751795/
61
62	  If in doubt, select 'LZO'.
63
64	  The selection made here can be overridden by using the kernel
65	  command line 'zswap.compressor=' option.
66
67config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
68	bool "Deflate"
69	select CRYPTO_DEFLATE
70	help
71	  Use the Deflate algorithm as the default compression algorithm.
72
73config ZSWAP_COMPRESSOR_DEFAULT_LZO
74	bool "LZO"
75	select CRYPTO_LZO
76	help
77	  Use the LZO algorithm as the default compression algorithm.
78
79config ZSWAP_COMPRESSOR_DEFAULT_842
80	bool "842"
81	select CRYPTO_842
82	help
83	  Use the 842 algorithm as the default compression algorithm.
84
85config ZSWAP_COMPRESSOR_DEFAULT_LZ4
86	bool "LZ4"
87	select CRYPTO_LZ4
88	help
89	  Use the LZ4 algorithm as the default compression algorithm.
90
91config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
92	bool "LZ4HC"
93	select CRYPTO_LZ4HC
94	help
95	  Use the LZ4HC algorithm as the default compression algorithm.
96
97config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
98	bool "zstd"
99	select CRYPTO_ZSTD
100	help
101	  Use the zstd algorithm as the default compression algorithm.
102endchoice
103
104config ZSWAP_COMPRESSOR_DEFAULT
105       string
106       depends on ZSWAP
107       default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
108       default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
109       default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
110       default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
111       default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
112       default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
113       default ""
114
115choice
116	prompt "Default allocator"
117	depends on ZSWAP
118	default ZSWAP_ZPOOL_DEFAULT_ZBUD
119	help
120	  Selects the default allocator for the compressed cache for
121	  swap pages.
122	  The default is 'zbud' for compatibility, however please do
123	  read the description of each of the allocators below before
124	  making a right choice.
125
126	  The selection made here can be overridden by using the kernel
127	  command line 'zswap.zpool=' option.
128
129config ZSWAP_ZPOOL_DEFAULT_ZBUD
130	bool "zbud"
131	select ZBUD
132	help
133	  Use the zbud allocator as the default allocator.
134
135config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
136	bool "z3fold"
137	select Z3FOLD
138	help
139	  Use the z3fold allocator as the default allocator.
140
141config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
142	bool "zsmalloc"
143	select ZSMALLOC
144	help
145	  Use the zsmalloc allocator as the default allocator.
146endchoice
147
148config ZSWAP_ZPOOL_DEFAULT
149       string
150       depends on ZSWAP
151       default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
152       default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
153       default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
154       default ""
155
156config ZBUD
157	tristate "2:1 compression allocator (zbud)"
158	depends on ZSWAP
159	help
160	  A special purpose allocator for storing compressed pages.
161	  It is designed to store up to two compressed pages per physical
162	  page.  While this design limits storage density, it has simple and
163	  deterministic reclaim properties that make it preferable to a higher
164	  density approach when reclaim will be used.
165
166config Z3FOLD
167	tristate "3:1 compression allocator (z3fold)"
168	depends on ZSWAP
169	help
170	  A special purpose allocator for storing compressed pages.
171	  It is designed to store up to three compressed pages per physical
172	  page. It is a ZBUD derivative so the simplicity and determinism are
173	  still there.
174
175config ZSMALLOC
176	tristate
177	prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
178	depends on MMU
179	help
180	  zsmalloc is a slab-based memory allocator designed to store
181	  pages of various compression levels efficiently. It achieves
182	  the highest storage density with the least amount of fragmentation.
183
184config ZSMALLOC_STAT
185	bool "Export zsmalloc statistics"
186	depends on ZSMALLOC
187	select DEBUG_FS
188	help
189	  This option enables code in the zsmalloc to collect various
190	  statistics about what's happening in zsmalloc and exports that
191	  information to userspace via debugfs.
192	  If unsure, say N.
193
194config ZSMALLOC_CHAIN_SIZE
195	int "Maximum number of physical pages per-zspage"
196	default 8
197	range 4 16
198	depends on ZSMALLOC
199	help
200	  This option sets the upper limit on the number of physical pages
201	  that a zmalloc page (zspage) can consist of. The optimal zspage
202	  chain size is calculated for each size class during the
203	  initialization of the pool.
204
205	  Changing this option can alter the characteristics of size classes,
206	  such as the number of pages per zspage and the number of objects
207	  per zspage. This can also result in different configurations of
208	  the pool, as zsmalloc merges size classes with similar
209	  characteristics.
210
211	  For more information, see zsmalloc documentation.
212
213menu "SLAB allocator options"
214
215choice
216	prompt "Choose SLAB allocator"
217	default SLUB
218	help
219	   This option allows to select a slab allocator.
220
221config SLAB
222	bool "SLAB"
223	depends on !PREEMPT_RT
224	select HAVE_HARDENED_USERCOPY_ALLOCATOR
225	help
226	  The regular slab allocator that is established and known to work
227	  well in all environments. It organizes cache hot objects in
228	  per cpu and per node queues.
229
230config SLUB
231	bool "SLUB (Unqueued Allocator)"
232	select HAVE_HARDENED_USERCOPY_ALLOCATOR
233	help
234	   SLUB is a slab allocator that minimizes cache line usage
235	   instead of managing queues of cached objects (SLAB approach).
236	   Per cpu caching is realized using slabs of objects instead
237	   of queues of objects. SLUB can use memory efficiently
238	   and has enhanced diagnostics. SLUB is the default choice for
239	   a slab allocator.
240
241endchoice
242
243config SLUB_TINY
244	bool "Configure SLUB for minimal memory footprint"
245	depends on SLUB && EXPERT
246	select SLAB_MERGE_DEFAULT
247	help
248	   Configures the SLUB allocator in a way to achieve minimal memory
249	   footprint, sacrificing scalability, debugging and other features.
250	   This is intended only for the smallest system that had used the
251	   SLOB allocator and is not recommended for systems with more than
252	   16MB RAM.
253
254	   If unsure, say N.
255
256config SLAB_MERGE_DEFAULT
257	bool "Allow slab caches to be merged"
258	default y
259	depends on SLAB || SLUB
260	help
261	  For reduced kernel memory fragmentation, slab caches can be
262	  merged when they share the same size and other characteristics.
263	  This carries a risk of kernel heap overflows being able to
264	  overwrite objects from merged caches (and more easily control
265	  cache layout), which makes such heap attacks easier to exploit
266	  by attackers. By keeping caches unmerged, these kinds of exploits
267	  can usually only damage objects in the same cache. To disable
268	  merging at runtime, "slab_nomerge" can be passed on the kernel
269	  command line.
270
271config SLAB_FREELIST_RANDOM
272	bool "Randomize slab freelist"
273	depends on SLAB || (SLUB && !SLUB_TINY)
274	help
275	  Randomizes the freelist order used on creating new pages. This
276	  security feature reduces the predictability of the kernel slab
277	  allocator against heap overflows.
278
279config SLAB_FREELIST_HARDENED
280	bool "Harden slab freelist metadata"
281	depends on SLAB || (SLUB && !SLUB_TINY)
282	help
283	  Many kernel heap attacks try to target slab cache metadata and
284	  other infrastructure. This options makes minor performance
285	  sacrifices to harden the kernel slab allocator against common
286	  freelist exploit methods. Some slab implementations have more
287	  sanity-checking than others. This option is most effective with
288	  CONFIG_SLUB.
289
290config SLUB_STATS
291	default n
292	bool "Enable SLUB performance statistics"
293	depends on SLUB && SYSFS && !SLUB_TINY
294	help
295	  SLUB statistics are useful to debug SLUBs allocation behavior in
296	  order find ways to optimize the allocator. This should never be
297	  enabled for production use since keeping statistics slows down
298	  the allocator by a few percentage points. The slabinfo command
299	  supports the determination of the most active slabs to figure
300	  out which slabs are relevant to a particular load.
301	  Try running: slabinfo -DA
302
303config SLUB_CPU_PARTIAL
304	default y
305	depends on SLUB && SMP && !SLUB_TINY
306	bool "SLUB per cpu partial cache"
307	help
308	  Per cpu partial caches accelerate objects allocation and freeing
309	  that is local to a processor at the price of more indeterminism
310	  in the latency of the free. On overflow these caches will be cleared
311	  which requires the taking of locks that may cause latency spikes.
312	  Typically one would choose no for a realtime system.
313
314endmenu # SLAB allocator options
315
316config SHUFFLE_PAGE_ALLOCATOR
317	bool "Page allocator randomization"
318	default SLAB_FREELIST_RANDOM && ACPI_NUMA
319	help
320	  Randomization of the page allocator improves the average
321	  utilization of a direct-mapped memory-side-cache. See section
322	  5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
323	  6.2a specification for an example of how a platform advertises
324	  the presence of a memory-side-cache. There are also incidental
325	  security benefits as it reduces the predictability of page
326	  allocations to compliment SLAB_FREELIST_RANDOM, but the
327	  default granularity of shuffling on the MAX_ORDER i.e, 10th
328	  order of pages is selected based on cache utilization benefits
329	  on x86.
330
331	  While the randomization improves cache utilization it may
332	  negatively impact workloads on platforms without a cache. For
333	  this reason, by default, the randomization is enabled only
334	  after runtime detection of a direct-mapped memory-side-cache.
335	  Otherwise, the randomization may be force enabled with the
336	  'page_alloc.shuffle' kernel command line parameter.
337
338	  Say Y if unsure.
339
340config COMPAT_BRK
341	bool "Disable heap randomization"
342	default y
343	help
344	  Randomizing heap placement makes heap exploits harder, but it
345	  also breaks ancient binaries (including anything libc5 based).
346	  This option changes the bootup default to heap randomization
347	  disabled, and can be overridden at runtime by setting
348	  /proc/sys/kernel/randomize_va_space to 2.
349
350	  On non-ancient distros (post-2000 ones) N is usually a safe choice.
351
352config MMAP_ALLOW_UNINITIALIZED
353	bool "Allow mmapped anonymous memory to be uninitialized"
354	depends on EXPERT && !MMU
355	default n
356	help
357	  Normally, and according to the Linux spec, anonymous memory obtained
358	  from mmap() has its contents cleared before it is passed to
359	  userspace.  Enabling this config option allows you to request that
360	  mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
361	  providing a huge performance boost.  If this option is not enabled,
362	  then the flag will be ignored.
363
364	  This is taken advantage of by uClibc's malloc(), and also by
365	  ELF-FDPIC binfmt's brk and stack allocator.
366
367	  Because of the obvious security issues, this option should only be
368	  enabled on embedded devices where you control what is run in
369	  userspace.  Since that isn't generally a problem on no-MMU systems,
370	  it is normally safe to say Y here.
371
372	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
373
374config SELECT_MEMORY_MODEL
375	def_bool y
376	depends on ARCH_SELECT_MEMORY_MODEL
377
378choice
379	prompt "Memory model"
380	depends on SELECT_MEMORY_MODEL
381	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
382	default FLATMEM_MANUAL
383	help
384	  This option allows you to change some of the ways that
385	  Linux manages its memory internally. Most users will
386	  only have one option here selected by the architecture
387	  configuration. This is normal.
388
389config FLATMEM_MANUAL
390	bool "Flat Memory"
391	depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
392	help
393	  This option is best suited for non-NUMA systems with
394	  flat address space. The FLATMEM is the most efficient
395	  system in terms of performance and resource consumption
396	  and it is the best option for smaller systems.
397
398	  For systems that have holes in their physical address
399	  spaces and for features like NUMA and memory hotplug,
400	  choose "Sparse Memory".
401
402	  If unsure, choose this option (Flat Memory) over any other.
403
404config SPARSEMEM_MANUAL
405	bool "Sparse Memory"
406	depends on ARCH_SPARSEMEM_ENABLE
407	help
408	  This will be the only option for some systems, including
409	  memory hot-plug systems.  This is normal.
410
411	  This option provides efficient support for systems with
412	  holes is their physical address space and allows memory
413	  hot-plug and hot-remove.
414
415	  If unsure, choose "Flat Memory" over this option.
416
417endchoice
418
419config SPARSEMEM
420	def_bool y
421	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
422
423config FLATMEM
424	def_bool y
425	depends on !SPARSEMEM || FLATMEM_MANUAL
426
427#
428# SPARSEMEM_EXTREME (which is the default) does some bootmem
429# allocations when sparse_init() is called.  If this cannot
430# be done on your architecture, select this option.  However,
431# statically allocating the mem_section[] array can potentially
432# consume vast quantities of .bss, so be careful.
433#
434# This option will also potentially produce smaller runtime code
435# with gcc 3.4 and later.
436#
437config SPARSEMEM_STATIC
438	bool
439
440#
441# Architecture platforms which require a two level mem_section in SPARSEMEM
442# must select this option. This is usually for architecture platforms with
443# an extremely sparse physical address space.
444#
445config SPARSEMEM_EXTREME
446	def_bool y
447	depends on SPARSEMEM && !SPARSEMEM_STATIC
448
449config SPARSEMEM_VMEMMAP_ENABLE
450	bool
451
452config SPARSEMEM_VMEMMAP
453	bool "Sparse Memory virtual memmap"
454	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
455	default y
456	help
457	  SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
458	  pfn_to_page and page_to_pfn operations.  This is the most
459	  efficient option when sufficient kernel resources are available.
460#
461# Select this config option from the architecture Kconfig, if it is preferred
462# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
463#
464config ARCH_WANT_OPTIMIZE_VMEMMAP
465	bool
466
467config HAVE_MEMBLOCK_PHYS_MAP
468	bool
469
470config HAVE_FAST_GUP
471	depends on MMU
472	bool
473
474# Don't discard allocated memory used to track "memory" and "reserved" memblocks
475# after early boot, so it can still be used to test for validity of memory.
476# Also, memblocks are updated with memory hot(un)plug.
477config ARCH_KEEP_MEMBLOCK
478	bool
479
480# Keep arch NUMA mapping infrastructure post-init.
481config NUMA_KEEP_MEMINFO
482	bool
483
484config MEMORY_ISOLATION
485	bool
486
487# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
488# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
489# /dev/mem.
490config EXCLUSIVE_SYSTEM_RAM
491	def_bool y
492	depends on !DEVMEM || STRICT_DEVMEM
493
494#
495# Only be set on architectures that have completely implemented memory hotplug
496# feature. If you are not sure, don't touch it.
497#
498config HAVE_BOOTMEM_INFO_NODE
499	def_bool n
500
501config ARCH_ENABLE_MEMORY_HOTPLUG
502	bool
503
504config ARCH_ENABLE_MEMORY_HOTREMOVE
505	bool
506
507# eventually, we can have this option just 'select SPARSEMEM'
508menuconfig MEMORY_HOTPLUG
509	bool "Memory hotplug"
510	select MEMORY_ISOLATION
511	depends on SPARSEMEM
512	depends on ARCH_ENABLE_MEMORY_HOTPLUG
513	depends on 64BIT
514	select NUMA_KEEP_MEMINFO if NUMA
515
516if MEMORY_HOTPLUG
517
518config MEMORY_HOTPLUG_DEFAULT_ONLINE
519	bool "Online the newly added memory blocks by default"
520	depends on MEMORY_HOTPLUG
521	help
522	  This option sets the default policy setting for memory hotplug
523	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
524	  determines what happens to newly added memory regions. Policy setting
525	  can always be changed at runtime.
526	  See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
527
528	  Say Y here if you want all hot-plugged memory blocks to appear in
529	  'online' state by default.
530	  Say N here if you want the default policy to keep all hot-plugged
531	  memory blocks in 'offline' state.
532
533config MEMORY_HOTREMOVE
534	bool "Allow for memory hot remove"
535	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
536	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
537	depends on MIGRATION
538
539config MHP_MEMMAP_ON_MEMORY
540	def_bool y
541	depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
542	depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
543
544endif # MEMORY_HOTPLUG
545
546# Heavily threaded applications may benefit from splitting the mm-wide
547# page_table_lock, so that faults on different parts of the user address
548# space can be handled with less contention: split it at this NR_CPUS.
549# Default to 4 for wider testing, though 8 might be more appropriate.
550# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
551# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
552# SPARC32 allocates multiple pte tables within a single page, and therefore
553# a per-page lock leads to problems when multiple tables need to be locked
554# at the same time (e.g. copy_page_range()).
555# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
556#
557config SPLIT_PTLOCK_CPUS
558	int
559	default "999999" if !MMU
560	default "999999" if ARM && !CPU_CACHE_VIPT
561	default "999999" if PARISC && !PA20
562	default "999999" if SPARC32
563	default "4"
564
565config ARCH_ENABLE_SPLIT_PMD_PTLOCK
566	bool
567
568#
569# support for memory balloon
570config MEMORY_BALLOON
571	bool
572
573#
574# support for memory balloon compaction
575config BALLOON_COMPACTION
576	bool "Allow for balloon memory compaction/migration"
577	def_bool y
578	depends on COMPACTION && MEMORY_BALLOON
579	help
580	  Memory fragmentation introduced by ballooning might reduce
581	  significantly the number of 2MB contiguous memory blocks that can be
582	  used within a guest, thus imposing performance penalties associated
583	  with the reduced number of transparent huge pages that could be used
584	  by the guest workload. Allowing the compaction & migration for memory
585	  pages enlisted as being part of memory balloon devices avoids the
586	  scenario aforementioned and helps improving memory defragmentation.
587
588#
589# support for memory compaction
590config COMPACTION
591	bool "Allow for memory compaction"
592	def_bool y
593	select MIGRATION
594	depends on MMU
595	help
596	  Compaction is the only memory management component to form
597	  high order (larger physically contiguous) memory blocks
598	  reliably. The page allocator relies on compaction heavily and
599	  the lack of the feature can lead to unexpected OOM killer
600	  invocations for high order memory requests. You shouldn't
601	  disable this option unless there really is a strong reason for
602	  it and then we would be really interested to hear about that at
603	  linux-mm@kvack.org.
604
605config COMPACT_UNEVICTABLE_DEFAULT
606	int
607	depends on COMPACTION
608	default 0 if PREEMPT_RT
609	default 1
610
611#
612# support for free page reporting
613config PAGE_REPORTING
614	bool "Free page reporting"
615	def_bool n
616	help
617	  Free page reporting allows for the incremental acquisition of
618	  free pages from the buddy allocator for the purpose of reporting
619	  those pages to another entity, such as a hypervisor, so that the
620	  memory can be freed within the host for other uses.
621
622#
623# support for page migration
624#
625config MIGRATION
626	bool "Page migration"
627	def_bool y
628	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
629	help
630	  Allows the migration of the physical location of pages of processes
631	  while the virtual addresses are not changed. This is useful in
632	  two situations. The first is on NUMA systems to put pages nearer
633	  to the processors accessing. The second is when allocating huge
634	  pages as migration can relocate pages to satisfy a huge page
635	  allocation instead of reclaiming.
636
637config DEVICE_MIGRATION
638	def_bool MIGRATION && ZONE_DEVICE
639
640config ARCH_ENABLE_HUGEPAGE_MIGRATION
641	bool
642
643config ARCH_ENABLE_THP_MIGRATION
644	bool
645
646config HUGETLB_PAGE_SIZE_VARIABLE
647	def_bool n
648	help
649	  Allows the pageblock_order value to be dynamic instead of just standard
650	  HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
651	  on a platform.
652
653	  Note that the pageblock_order cannot exceed MAX_ORDER and will be
654	  clamped down to MAX_ORDER.
655
656config CONTIG_ALLOC
657	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
658
659config PHYS_ADDR_T_64BIT
660	def_bool 64BIT
661
662config BOUNCE
663	bool "Enable bounce buffers"
664	default y
665	depends on BLOCK && MMU && HIGHMEM
666	help
667	  Enable bounce buffers for devices that cannot access the full range of
668	  memory available to the CPU. Enabled by default when HIGHMEM is
669	  selected, but you may say n to override this.
670
671config MMU_NOTIFIER
672	bool
673	select INTERVAL_TREE
674
675config KSM
676	bool "Enable KSM for page merging"
677	depends on MMU
678	select XXHASH
679	help
680	  Enable Kernel Samepage Merging: KSM periodically scans those areas
681	  of an application's address space that an app has advised may be
682	  mergeable.  When it finds pages of identical content, it replaces
683	  the many instances by a single page with that content, so
684	  saving memory until one or another app needs to modify the content.
685	  Recommended for use with KVM, or with other duplicative applications.
686	  See Documentation/mm/ksm.rst for more information: KSM is inactive
687	  until a program has madvised that an area is MADV_MERGEABLE, and
688	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
689
690config DEFAULT_MMAP_MIN_ADDR
691	int "Low address space to protect from user allocation"
692	depends on MMU
693	default 4096
694	help
695	  This is the portion of low virtual memory which should be protected
696	  from userspace allocation.  Keeping a user from writing to low pages
697	  can help reduce the impact of kernel NULL pointer bugs.
698
699	  For most ia64, ppc64 and x86 users with lots of address space
700	  a value of 65536 is reasonable and should cause no problems.
701	  On arm and other archs it should not be higher than 32768.
702	  Programs which use vm86 functionality or have some need to map
703	  this low address space will need CAP_SYS_RAWIO or disable this
704	  protection by setting the value to 0.
705
706	  This value can be changed after boot using the
707	  /proc/sys/vm/mmap_min_addr tunable.
708
709config ARCH_SUPPORTS_MEMORY_FAILURE
710	bool
711
712config MEMORY_FAILURE
713	depends on MMU
714	depends on ARCH_SUPPORTS_MEMORY_FAILURE
715	bool "Enable recovery from hardware memory errors"
716	select MEMORY_ISOLATION
717	select RAS
718	help
719	  Enables code to recover from some memory failures on systems
720	  with MCA recovery. This allows a system to continue running
721	  even when some of its memory has uncorrected errors. This requires
722	  special hardware support and typically ECC memory.
723
724config HWPOISON_INJECT
725	tristate "HWPoison pages injector"
726	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
727	select PROC_PAGE_MONITOR
728
729config NOMMU_INITIAL_TRIM_EXCESS
730	int "Turn on mmap() excess space trimming before booting"
731	depends on !MMU
732	default 1
733	help
734	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
735	  of memory on which to store mappings, but it can only ask the system
736	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
737	  more than it requires.  To deal with this, mmap() is able to trim off
738	  the excess and return it to the allocator.
739
740	  If trimming is enabled, the excess is trimmed off and returned to the
741	  system allocator, which can cause extra fragmentation, particularly
742	  if there are a lot of transient processes.
743
744	  If trimming is disabled, the excess is kept, but not used, which for
745	  long-term mappings means that the space is wasted.
746
747	  Trimming can be dynamically controlled through a sysctl option
748	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
749	  excess pages there must be before trimming should occur, or zero if
750	  no trimming is to occur.
751
752	  This option specifies the initial value of this option.  The default
753	  of 1 says that all excess pages should be trimmed.
754
755	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
756
757config ARCH_WANT_GENERAL_HUGETLB
758	bool
759
760config ARCH_WANTS_THP_SWAP
761	def_bool n
762
763menuconfig TRANSPARENT_HUGEPAGE
764	bool "Transparent Hugepage Support"
765	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
766	select COMPACTION
767	select XARRAY_MULTI
768	help
769	  Transparent Hugepages allows the kernel to use huge pages and
770	  huge tlb transparently to the applications whenever possible.
771	  This feature can improve computing performance to certain
772	  applications by speeding up page faults during memory
773	  allocation, by reducing the number of tlb misses and by speeding
774	  up the pagetable walking.
775
776	  If memory constrained on embedded, you may want to say N.
777
778if TRANSPARENT_HUGEPAGE
779
780choice
781	prompt "Transparent Hugepage Support sysfs defaults"
782	depends on TRANSPARENT_HUGEPAGE
783	default TRANSPARENT_HUGEPAGE_ALWAYS
784	help
785	  Selects the sysfs defaults for Transparent Hugepage Support.
786
787	config TRANSPARENT_HUGEPAGE_ALWAYS
788		bool "always"
789	help
790	  Enabling Transparent Hugepage always, can increase the
791	  memory footprint of applications without a guaranteed
792	  benefit but it will work automatically for all applications.
793
794	config TRANSPARENT_HUGEPAGE_MADVISE
795		bool "madvise"
796	help
797	  Enabling Transparent Hugepage madvise, will only provide a
798	  performance improvement benefit to the applications using
799	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
800	  memory footprint of applications without a guaranteed
801	  benefit.
802endchoice
803
804config THP_SWAP
805	def_bool y
806	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
807	help
808	  Swap transparent huge pages in one piece, without splitting.
809	  XXX: For now, swap cluster backing transparent huge page
810	  will be split after swapout.
811
812	  For selection by architectures with reasonable THP sizes.
813
814config READ_ONLY_THP_FOR_FS
815	bool "Read-only THP for filesystems (EXPERIMENTAL)"
816	depends on TRANSPARENT_HUGEPAGE && SHMEM
817
818	help
819	  Allow khugepaged to put read-only file-backed pages in THP.
820
821	  This is marked experimental because it is a new feature. Write
822	  support of file THPs will be developed in the next few release
823	  cycles.
824
825endif # TRANSPARENT_HUGEPAGE
826
827#
828# UP and nommu archs use km based percpu allocator
829#
830config NEED_PER_CPU_KM
831	depends on !SMP || !MMU
832	bool
833	default y
834
835config NEED_PER_CPU_EMBED_FIRST_CHUNK
836	bool
837
838config NEED_PER_CPU_PAGE_FIRST_CHUNK
839	bool
840
841config USE_PERCPU_NUMA_NODE_ID
842	bool
843
844config HAVE_SETUP_PER_CPU_AREA
845	bool
846
847config FRONTSWAP
848	bool
849
850config CMA
851	bool "Contiguous Memory Allocator"
852	depends on MMU
853	select MIGRATION
854	select MEMORY_ISOLATION
855	help
856	  This enables the Contiguous Memory Allocator which allows other
857	  subsystems to allocate big physically-contiguous blocks of memory.
858	  CMA reserves a region of memory and allows only movable pages to
859	  be allocated from it. This way, the kernel can use the memory for
860	  pagecache and when a subsystem requests for contiguous area, the
861	  allocated pages are migrated away to serve the contiguous request.
862
863	  If unsure, say "n".
864
865config CMA_DEBUG
866	bool "CMA debug messages (DEVELOPMENT)"
867	depends on DEBUG_KERNEL && CMA
868	help
869	  Turns on debug messages in CMA.  This produces KERN_DEBUG
870	  messages for every CMA call as well as various messages while
871	  processing calls such as dma_alloc_from_contiguous().
872	  This option does not affect warning and error messages.
873
874config CMA_DEBUGFS
875	bool "CMA debugfs interface"
876	depends on CMA && DEBUG_FS
877	help
878	  Turns on the DebugFS interface for CMA.
879
880config CMA_SYSFS
881	bool "CMA information through sysfs interface"
882	depends on CMA && SYSFS
883	help
884	  This option exposes some sysfs attributes to get information
885	  from CMA.
886
887config CMA_AREAS
888	int "Maximum count of the CMA areas"
889	depends on CMA
890	default 19 if NUMA
891	default 7
892	help
893	  CMA allows to create CMA areas for particular purpose, mainly,
894	  used as device private area. This parameter sets the maximum
895	  number of CMA area in the system.
896
897	  If unsure, leave the default value "7" in UMA and "19" in NUMA.
898
899config MEM_SOFT_DIRTY
900	bool "Track memory changes"
901	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
902	select PROC_PAGE_MONITOR
903	help
904	  This option enables memory changes tracking by introducing a
905	  soft-dirty bit on pte-s. This bit it set when someone writes
906	  into a page just as regular dirty bit, but unlike the latter
907	  it can be cleared by hands.
908
909	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
910
911config GENERIC_EARLY_IOREMAP
912	bool
913
914config STACK_MAX_DEFAULT_SIZE_MB
915	int "Default maximum user stack size for 32-bit processes (MB)"
916	default 100
917	range 8 2048
918	depends on STACK_GROWSUP && (!64BIT || COMPAT)
919	help
920	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
921	  user processes when the stack grows upwards (currently only on parisc
922	  arch) when the RLIMIT_STACK hard limit is unlimited.
923
924	  A sane initial value is 100 MB.
925
926config DEFERRED_STRUCT_PAGE_INIT
927	bool "Defer initialisation of struct pages to kthreads"
928	depends on SPARSEMEM
929	depends on !NEED_PER_CPU_KM
930	depends on 64BIT
931	select PADATA
932	help
933	  Ordinarily all struct pages are initialised during early boot in a
934	  single thread. On very large machines this can take a considerable
935	  amount of time. If this option is set, large machines will bring up
936	  a subset of memmap at boot and then initialise the rest in parallel.
937	  This has a potential performance impact on tasks running early in the
938	  lifetime of the system until these kthreads finish the
939	  initialisation.
940
941config PAGE_IDLE_FLAG
942	bool
943	select PAGE_EXTENSION if !64BIT
944	help
945	  This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
946	  bit writers can set the state of the bit in the flags so that PTE
947	  Accessed bit readers may avoid disturbance.
948
949config IDLE_PAGE_TRACKING
950	bool "Enable idle page tracking"
951	depends on SYSFS && MMU
952	select PAGE_IDLE_FLAG
953	help
954	  This feature allows to estimate the amount of user pages that have
955	  not been touched during a given period of time. This information can
956	  be useful to tune memory cgroup limits and/or for job placement
957	  within a compute cluster.
958
959	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
960	  more details.
961
962config ARCH_HAS_CACHE_LINE_SIZE
963	bool
964
965config ARCH_HAS_CURRENT_STACK_POINTER
966	bool
967	help
968	  In support of HARDENED_USERCOPY performing stack variable lifetime
969	  checking, an architecture-agnostic way to find the stack pointer
970	  is needed. Once an architecture defines an unsigned long global
971	  register alias named "current_stack_pointer", this config can be
972	  selected.
973
974config ARCH_HAS_PTE_DEVMAP
975	bool
976
977config ARCH_HAS_ZONE_DMA_SET
978	bool
979
980config ZONE_DMA
981	bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
982	default y if ARM64 || X86
983
984config ZONE_DMA32
985	bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
986	depends on !X86_32
987	default y if ARM64
988
989config ZONE_DEVICE
990	bool "Device memory (pmem, HMM, etc...) hotplug support"
991	depends on MEMORY_HOTPLUG
992	depends on MEMORY_HOTREMOVE
993	depends on SPARSEMEM_VMEMMAP
994	depends on ARCH_HAS_PTE_DEVMAP
995	select XARRAY_MULTI
996
997	help
998	  Device memory hotplug support allows for establishing pmem,
999	  or other device driver discovered memory regions, in the
1000	  memmap. This allows pfn_to_page() lookups of otherwise
1001	  "device-physical" addresses which is needed for using a DAX
1002	  mapping in an O_DIRECT operation, among other things.
1003
1004	  If FS_DAX is enabled, then say Y.
1005
1006#
1007# Helpers to mirror range of the CPU page tables of a process into device page
1008# tables.
1009#
1010config HMM_MIRROR
1011	bool
1012	depends on MMU
1013
1014config GET_FREE_REGION
1015	depends on SPARSEMEM
1016	bool
1017
1018config DEVICE_PRIVATE
1019	bool "Unaddressable device memory (GPU memory, ...)"
1020	depends on ZONE_DEVICE
1021	select GET_FREE_REGION
1022
1023	help
1024	  Allows creation of struct pages to represent unaddressable device
1025	  memory; i.e., memory that is only accessible from the device (or
1026	  group of devices). You likely also want to select HMM_MIRROR.
1027
1028config VMAP_PFN
1029	bool
1030
1031config ARCH_USES_HIGH_VMA_FLAGS
1032	bool
1033config ARCH_HAS_PKEYS
1034	bool
1035
1036config ARCH_USES_PG_ARCH_X
1037	bool
1038	help
1039	  Enable the definition of PG_arch_x page flags with x > 1. Only
1040	  suitable for 64-bit architectures with CONFIG_FLATMEM or
1041	  CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
1042	  enough room for additional bits in page->flags.
1043
1044config VM_EVENT_COUNTERS
1045	default y
1046	bool "Enable VM event counters for /proc/vmstat" if EXPERT
1047	help
1048	  VM event counters are needed for event counts to be shown.
1049	  This option allows the disabling of the VM event counters
1050	  on EXPERT systems.  /proc/vmstat will only show page counts
1051	  if VM event counters are disabled.
1052
1053config PERCPU_STATS
1054	bool "Collect percpu memory statistics"
1055	help
1056	  This feature collects and exposes statistics via debugfs. The
1057	  information includes global and per chunk statistics, which can
1058	  be used to help understand percpu memory usage.
1059
1060config GUP_TEST
1061	bool "Enable infrastructure for get_user_pages()-related unit tests"
1062	depends on DEBUG_FS
1063	help
1064	  Provides /sys/kernel/debug/gup_test, which in turn provides a way
1065	  to make ioctl calls that can launch kernel-based unit tests for
1066	  the get_user_pages*() and pin_user_pages*() family of API calls.
1067
1068	  These tests include benchmark testing of the _fast variants of
1069	  get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1070	  the non-_fast variants.
1071
1072	  There is also a sub-test that allows running dump_page() on any
1073	  of up to eight pages (selected by command line args) within the
1074	  range of user-space addresses. These pages are either pinned via
1075	  pin_user_pages*(), or pinned via get_user_pages*(), as specified
1076	  by other command line arguments.
1077
1078	  See tools/testing/selftests/mm/gup_test.c
1079
1080comment "GUP_TEST needs to have DEBUG_FS enabled"
1081	depends on !GUP_TEST && !DEBUG_FS
1082
1083config GUP_GET_PXX_LOW_HIGH
1084	bool
1085
1086config DMAPOOL_TEST
1087	tristate "Enable a module to run time tests on dma_pool"
1088	depends on HAS_DMA
1089	help
1090	  Provides a test module that will allocate and free many blocks of
1091	  various sizes and report how long it takes. This is intended to
1092	  provide a consistent way to measure how changes to the
1093	  dma_pool_alloc/free routines affect performance.
1094
1095config ARCH_HAS_PTE_SPECIAL
1096	bool
1097
1098#
1099# Some architectures require a special hugepage directory format that is
1100# required to support multiple hugepage sizes. For example a4fe3ce76
1101# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
1102# introduced it on powerpc.  This allows for a more flexible hugepage
1103# pagetable layouts.
1104#
1105config ARCH_HAS_HUGEPD
1106	bool
1107
1108config MAPPING_DIRTY_HELPERS
1109        bool
1110
1111config KMAP_LOCAL
1112	bool
1113
1114config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1115	bool
1116
1117# struct io_mapping based helper.  Selected by drivers that need them
1118config IO_MAPPING
1119	bool
1120
1121config SECRETMEM
1122	default y
1123	bool "Enable memfd_secret() system call" if EXPERT
1124	depends on ARCH_HAS_SET_DIRECT_MAP
1125	help
1126	  Enable the memfd_secret() system call with the ability to create
1127	  memory areas visible only in the context of the owning process and
1128	  not mapped to other processes and other kernel page tables.
1129
1130config ANON_VMA_NAME
1131	bool "Anonymous VMA name support"
1132	depends on PROC_FS && ADVISE_SYSCALLS && MMU
1133
1134	help
1135	  Allow naming anonymous virtual memory areas.
1136
1137	  This feature allows assigning names to virtual memory areas. Assigned
1138	  names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1139	  and help identifying individual anonymous memory areas.
1140	  Assigning a name to anonymous virtual memory area might prevent that
1141	  area from being merged with adjacent virtual memory areas due to the
1142	  difference in their name.
1143
1144config USERFAULTFD
1145	bool "Enable userfaultfd() system call"
1146	depends on MMU
1147	help
1148	  Enable the userfaultfd() system call that allows to intercept and
1149	  handle page faults in userland.
1150
1151config HAVE_ARCH_USERFAULTFD_WP
1152	bool
1153	help
1154	  Arch has userfaultfd write protection support
1155
1156config HAVE_ARCH_USERFAULTFD_MINOR
1157	bool
1158	help
1159	  Arch has userfaultfd minor fault support
1160
1161config PTE_MARKER_UFFD_WP
1162	bool "Userfaultfd write protection support for shmem/hugetlbfs"
1163	default y
1164	depends on HAVE_ARCH_USERFAULTFD_WP
1165
1166	help
1167	  Allows to create marker PTEs for userfaultfd write protection
1168	  purposes.  It is required to enable userfaultfd write protection on
1169	  file-backed memory types like shmem and hugetlbfs.
1170
1171# multi-gen LRU {
1172config LRU_GEN
1173	bool "Multi-Gen LRU"
1174	depends on MMU
1175	# make sure folio->flags has enough spare bits
1176	depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1177	help
1178	  A high performance LRU implementation to overcommit memory. See
1179	  Documentation/admin-guide/mm/multigen_lru.rst for details.
1180
1181config LRU_GEN_ENABLED
1182	bool "Enable by default"
1183	depends on LRU_GEN
1184	help
1185	  This option enables the multi-gen LRU by default.
1186
1187config LRU_GEN_STATS
1188	bool "Full stats for debugging"
1189	depends on LRU_GEN
1190	help
1191	  Do not enable this option unless you plan to look at historical stats
1192	  from evicted generations for debugging purpose.
1193
1194	  This option has a per-memcg and per-node memory overhead.
1195# }
1196
1197config ARCH_SUPPORTS_PER_VMA_LOCK
1198       def_bool n
1199
1200config PER_VMA_LOCK
1201	def_bool y
1202	depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1203	help
1204	  Allow per-vma locking during page fault handling.
1205
1206	  This feature allows locking each virtual memory area separately when
1207	  handling page faults instead of taking mmap_lock.
1208
1209source "mm/damon/Kconfig"
1210
1211endmenu
1212