xref: /linux/mm/Kconfig (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1# SPDX-License-Identifier: GPL-2.0-only
2
3menu "Memory Management options"
4
5#
6# For some reason microblaze and nios2 hard code SWAP=n.  Hopefully we can
7# add proper SWAP support to them, in which case this can be remove.
8#
9config ARCH_NO_SWAP
10	bool
11
12menuconfig SWAP
13	bool "Support for paging of anonymous memory (swap)"
14	depends on MMU && BLOCK && !ARCH_NO_SWAP
15	default y
16	help
17	  This option allows you to choose whether you want to have support
18	  for so called swap devices or swap files in your kernel that are
19	  used to provide more virtual memory than the actual RAM present
20	  in your computer.  If unsure say Y.
21
22config ZSWAP
23	bool "Compressed cache for swap pages"
24	depends on SWAP
25	select CRYPTO
26	select ZSMALLOC
27	help
28	  A lightweight compressed cache for swap pages.  It takes
29	  pages that are in the process of being swapped out and attempts to
30	  compress them into a dynamically allocated RAM-based memory pool.
31	  This can result in a significant I/O reduction on swap device and,
32	  in the case where decompressing from RAM is faster than swap device
33	  reads, can also improve workload performance.
34
35config ZSWAP_DEFAULT_ON
36	bool "Enable the compressed cache for swap pages by default"
37	depends on ZSWAP
38	help
39	  If selected, the compressed cache for swap pages will be enabled
40	  at boot, otherwise it will be disabled.
41
42	  The selection made here can be overridden by using the kernel
43	  command line 'zswap.enabled=' option.
44
45config ZSWAP_SHRINKER_DEFAULT_ON
46	bool "Shrink the zswap pool on memory pressure"
47	depends on ZSWAP
48	default n
49	help
50	  If selected, the zswap shrinker will be enabled, and the pages
51	  stored in the zswap pool will become available for reclaim (i.e
52	  written back to the backing swap device) on memory pressure.
53
54	  This means that zswap writeback could happen even if the pool is
55	  not yet full, or the cgroup zswap limit has not been reached,
56	  reducing the chance that cold pages will reside in the zswap pool
57	  and consume memory indefinitely.
58
59choice
60	prompt "Default compressor"
61	depends on ZSWAP
62	default ZSWAP_COMPRESSOR_DEFAULT_LZO
63	help
64	  Selects the default compression algorithm for the compressed cache
65	  for swap pages.
66
67	  For an overview what kind of performance can be expected from
68	  a particular compression algorithm please refer to the benchmarks
69	  available at the following LWN page:
70	  https://lwn.net/Articles/751795/
71
72	  If in doubt, select 'LZO'.
73
74	  The selection made here can be overridden by using the kernel
75	  command line 'zswap.compressor=' option.
76
77config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
78	bool "Deflate"
79	select CRYPTO_DEFLATE
80	help
81	  Use the Deflate algorithm as the default compression algorithm.
82
83config ZSWAP_COMPRESSOR_DEFAULT_LZO
84	bool "LZO"
85	select CRYPTO_LZO
86	help
87	  Use the LZO algorithm as the default compression algorithm.
88
89config ZSWAP_COMPRESSOR_DEFAULT_842
90	bool "842"
91	select CRYPTO_842
92	help
93	  Use the 842 algorithm as the default compression algorithm.
94
95config ZSWAP_COMPRESSOR_DEFAULT_LZ4
96	bool "LZ4"
97	select CRYPTO_LZ4
98	help
99	  Use the LZ4 algorithm as the default compression algorithm.
100
101config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
102	bool "LZ4HC"
103	select CRYPTO_LZ4HC
104	help
105	  Use the LZ4HC algorithm as the default compression algorithm.
106
107config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
108	bool "zstd"
109	select CRYPTO_ZSTD
110	help
111	  Use the zstd algorithm as the default compression algorithm.
112endchoice
113
114config ZSWAP_COMPRESSOR_DEFAULT
115       string
116       depends on ZSWAP
117       default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
118       default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
119       default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
120       default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
121       default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
122       default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
123       default ""
124
125config ZSMALLOC
126	tristate
127
128if ZSMALLOC
129
130menu "Zsmalloc allocator options"
131	depends on ZSMALLOC
132
133comment "Zsmalloc is a common backend allocator for zswap & zram"
134
135config ZSMALLOC_STAT
136	bool "Export zsmalloc statistics"
137	select DEBUG_FS
138	help
139	  This option enables code in the zsmalloc to collect various
140	  statistics about what's happening in zsmalloc and exports that
141	  information to userspace via debugfs.
142	  If unsure, say N.
143
144config ZSMALLOC_CHAIN_SIZE
145	int "Maximum number of physical pages per-zspage"
146	default 8
147	range 4 16
148	help
149	  This option sets the upper limit on the number of physical pages
150	  that a zmalloc page (zspage) can consist of. The optimal zspage
151	  chain size is calculated for each size class during the
152	  initialization of the pool.
153
154	  Changing this option can alter the characteristics of size classes,
155	  such as the number of pages per zspage and the number of objects
156	  per zspage. This can also result in different configurations of
157	  the pool, as zsmalloc merges size classes with similar
158	  characteristics.
159
160	  For more information, see zsmalloc documentation.
161
162endmenu
163
164endif
165
166menu "Slab allocator options"
167
168config SLUB
169	def_bool y
170	select IRQ_WORK
171
172config KVFREE_RCU_BATCHED
173	def_bool y
174	depends on !SLUB_TINY && !TINY_RCU
175
176config SLUB_TINY
177	bool "Configure for minimal memory footprint"
178	depends on EXPERT && !COMPILE_TEST
179	select SLAB_MERGE_DEFAULT
180	help
181	   Configures the slab allocator in a way to achieve minimal memory
182	   footprint, sacrificing scalability, debugging and other features.
183	   This is intended only for the smallest system that had used the
184	   SLOB allocator and is not recommended for systems with more than
185	   16MB RAM.
186
187	   If unsure, say N.
188
189config SLAB_MERGE_DEFAULT
190	bool "Allow slab caches to be merged"
191	default y
192	help
193	  For reduced kernel memory fragmentation, slab caches can be
194	  merged when they share the same size and other characteristics.
195	  This carries a risk of kernel heap overflows being able to
196	  overwrite objects from merged caches (and more easily control
197	  cache layout), which makes such heap attacks easier to exploit
198	  by attackers. By keeping caches unmerged, these kinds of exploits
199	  can usually only damage objects in the same cache. To disable
200	  merging at runtime, "slab_nomerge" can be passed on the kernel
201	  command line.
202
203config SLAB_FREELIST_RANDOM
204	bool "Randomize slab freelist"
205	depends on !SLUB_TINY
206	help
207	  Randomizes the freelist order used on creating new pages. This
208	  security feature reduces the predictability of the kernel slab
209	  allocator against heap overflows.
210
211config SLAB_FREELIST_HARDENED
212	bool "Harden slab freelist metadata"
213	depends on !SLUB_TINY
214	help
215	  Many kernel heap attacks try to target slab cache metadata and
216	  other infrastructure. This options makes minor performance
217	  sacrifices to harden the kernel slab allocator against common
218	  freelist exploit methods.
219
220config SLAB_BUCKETS
221	bool "Support allocation from separate kmalloc buckets"
222	depends on !SLUB_TINY
223	default SLAB_FREELIST_HARDENED
224	help
225	  Kernel heap attacks frequently depend on being able to create
226	  specifically-sized allocations with user-controlled contents
227	  that will be allocated into the same kmalloc bucket as a
228	  target object. To avoid sharing these allocation buckets,
229	  provide an explicitly separated set of buckets to be used for
230	  user-controlled allocations. This may very slightly increase
231	  memory fragmentation, though in practice it's only a handful
232	  of extra pages since the bulk of user-controlled allocations
233	  are relatively long-lived.
234
235	  If unsure, say Y.
236
237config SLUB_STATS
238	default n
239	bool "Enable performance statistics"
240	depends on SYSFS && !SLUB_TINY
241	help
242	  The statistics are useful to debug slab allocation behavior in
243	  order find ways to optimize the allocator. This should never be
244	  enabled for production use since keeping statistics slows down
245	  the allocator by a few percentage points. The slabinfo command
246	  supports the determination of the most active slabs to figure
247	  out which slabs are relevant to a particular load.
248	  Try running: slabinfo -DA
249
250config SLUB_CPU_PARTIAL
251	default y
252	depends on SMP && !SLUB_TINY
253	bool "Enable per cpu partial caches"
254	help
255	  Per cpu partial caches accelerate objects allocation and freeing
256	  that is local to a processor at the price of more indeterminism
257	  in the latency of the free. On overflow these caches will be cleared
258	  which requires the taking of locks that may cause latency spikes.
259	  Typically one would choose no for a realtime system.
260
261config RANDOM_KMALLOC_CACHES
262	default n
263	depends on !SLUB_TINY
264	bool "Randomize slab caches for normal kmalloc"
265	help
266	  A hardening feature that creates multiple copies of slab caches for
267	  normal kmalloc allocation and makes kmalloc randomly pick one based
268	  on code address, which makes the attackers more difficult to spray
269	  vulnerable memory objects on the heap for the purpose of exploiting
270	  memory vulnerabilities.
271
272	  Currently the number of copies is set to 16, a reasonably large value
273	  that effectively diverges the memory objects allocated for different
274	  subsystems or modules into different caches, at the expense of a
275	  limited degree of memory and CPU overhead that relates to hardware and
276	  system workload.
277
278endmenu # Slab allocator options
279
280config SHUFFLE_PAGE_ALLOCATOR
281	bool "Page allocator randomization"
282	default SLAB_FREELIST_RANDOM && ACPI_NUMA
283	help
284	  Randomization of the page allocator improves the average
285	  utilization of a direct-mapped memory-side-cache. See section
286	  5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
287	  6.2a specification for an example of how a platform advertises
288	  the presence of a memory-side-cache. There are also incidental
289	  security benefits as it reduces the predictability of page
290	  allocations to compliment SLAB_FREELIST_RANDOM, but the
291	  default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
292	  order of pages is selected based on cache utilization benefits
293	  on x86.
294
295	  While the randomization improves cache utilization it may
296	  negatively impact workloads on platforms without a cache. For
297	  this reason, by default, the randomization is not enabled even
298	  if SHUFFLE_PAGE_ALLOCATOR=y. The randomization may be force enabled
299	  with the 'page_alloc.shuffle' kernel command line parameter.
300
301	  Say Y if unsure.
302
303config COMPAT_BRK
304	bool "Disable heap randomization"
305	default y
306	help
307	  Randomizing heap placement makes heap exploits harder, but it
308	  also breaks ancient binaries (including anything libc5 based).
309	  This option changes the bootup default to heap randomization
310	  disabled, and can be overridden at runtime by setting
311	  /proc/sys/kernel/randomize_va_space to 2.
312
313	  On non-ancient distros (post-2000 ones) N is usually a safe choice.
314
315config MMAP_ALLOW_UNINITIALIZED
316	bool "Allow mmapped anonymous memory to be uninitialized"
317	depends on EXPERT && !MMU
318	default n
319	help
320	  Normally, and according to the Linux spec, anonymous memory obtained
321	  from mmap() has its contents cleared before it is passed to
322	  userspace.  Enabling this config option allows you to request that
323	  mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
324	  providing a huge performance boost.  If this option is not enabled,
325	  then the flag will be ignored.
326
327	  This is taken advantage of by uClibc's malloc(), and also by
328	  ELF-FDPIC binfmt's brk and stack allocator.
329
330	  Because of the obvious security issues, this option should only be
331	  enabled on embedded devices where you control what is run in
332	  userspace.  Since that isn't generally a problem on no-MMU systems,
333	  it is normally safe to say Y here.
334
335	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
336
337config SELECT_MEMORY_MODEL
338	def_bool y
339	depends on ARCH_SELECT_MEMORY_MODEL
340
341choice
342	prompt "Memory model"
343	depends on SELECT_MEMORY_MODEL
344	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
345	default FLATMEM_MANUAL
346	help
347	  This option allows you to change some of the ways that
348	  Linux manages its memory internally. Most users will
349	  only have one option here selected by the architecture
350	  configuration. This is normal.
351
352config FLATMEM_MANUAL
353	bool "Flat Memory"
354	depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
355	help
356	  This option is best suited for non-NUMA systems with
357	  flat address space. The FLATMEM is the most efficient
358	  system in terms of performance and resource consumption
359	  and it is the best option for smaller systems.
360
361	  For systems that have holes in their physical address
362	  spaces and for features like NUMA and memory hotplug,
363	  choose "Sparse Memory".
364
365	  If unsure, choose this option (Flat Memory) over any other.
366
367config SPARSEMEM_MANUAL
368	bool "Sparse Memory"
369	depends on ARCH_SPARSEMEM_ENABLE
370	help
371	  This will be the only option for some systems, including
372	  memory hot-plug systems.  This is normal.
373
374	  This option provides efficient support for systems with
375	  holes is their physical address space and allows memory
376	  hot-plug and hot-remove.
377
378	  If unsure, choose "Flat Memory" over this option.
379
380endchoice
381
382config SPARSEMEM
383	def_bool y
384	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
385
386config FLATMEM
387	def_bool y
388	depends on !SPARSEMEM || FLATMEM_MANUAL
389
390#
391# SPARSEMEM_EXTREME (which is the default) does some bootmem
392# allocations when sparse_init() is called.  If this cannot
393# be done on your architecture, select this option.  However,
394# statically allocating the mem_section[] array can potentially
395# consume vast quantities of .bss, so be careful.
396#
397# This option will also potentially produce smaller runtime code
398# with gcc 3.4 and later.
399#
400config SPARSEMEM_STATIC
401	bool
402
403#
404# Architecture platforms which require a two level mem_section in SPARSEMEM
405# must select this option. This is usually for architecture platforms with
406# an extremely sparse physical address space.
407#
408config SPARSEMEM_EXTREME
409	def_bool y
410	depends on SPARSEMEM && !SPARSEMEM_STATIC
411
412config SPARSEMEM_VMEMMAP_ENABLE
413	bool
414
415config SPARSEMEM_VMEMMAP
416	def_bool y
417	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
418	help
419	  SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
420	  pfn_to_page and page_to_pfn operations.  This is the most
421	  efficient option when sufficient kernel resources are available.
422
423config SPARSEMEM_VMEMMAP_PREINIT
424	bool
425#
426# Select this config option from the architecture Kconfig, if it is preferred
427# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
428#
429config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
430	bool
431
432config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
433	bool
434
435config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
436	bool
437
438config HAVE_MEMBLOCK_PHYS_MAP
439	bool
440
441config HAVE_GUP_FAST
442	depends on MMU
443	bool
444
445# Enable memblock support for scratch memory which is needed for kexec handover
446config MEMBLOCK_KHO_SCRATCH
447	bool
448
449# Don't discard allocated memory used to track "memory" and "reserved" memblocks
450# after early boot, so it can still be used to test for validity of memory.
451# Also, memblocks are updated with memory hot(un)plug.
452config ARCH_KEEP_MEMBLOCK
453	bool
454
455# Keep arch NUMA mapping infrastructure post-init.
456config NUMA_KEEP_MEMINFO
457	bool
458
459config MEMORY_ISOLATION
460	bool
461
462# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
463# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
464# /dev/mem.
465config EXCLUSIVE_SYSTEM_RAM
466	def_bool y
467	depends on !DEVMEM || STRICT_DEVMEM
468
469#
470# Only be set on architectures that have completely implemented memory hotplug
471# feature. If you are not sure, don't touch it.
472#
473config HAVE_BOOTMEM_INFO_NODE
474	def_bool n
475
476config ARCH_ENABLE_MEMORY_HOTPLUG
477	bool
478
479config ARCH_ENABLE_MEMORY_HOTREMOVE
480	bool
481
482# eventually, we can have this option just 'select SPARSEMEM'
483menuconfig MEMORY_HOTPLUG
484	bool "Memory hotplug"
485	select MEMORY_ISOLATION
486	depends on SPARSEMEM
487	depends on ARCH_ENABLE_MEMORY_HOTPLUG
488	depends on 64BIT
489	select NUMA_KEEP_MEMINFO if NUMA
490
491if MEMORY_HOTPLUG
492
493choice
494	prompt "Memory Hotplug Default Online Type"
495	default MHP_DEFAULT_ONLINE_TYPE_OFFLINE
496	help
497	  Default memory type for hotplugged memory.
498
499	  This option sets the default policy setting for memory hotplug
500	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
501	  determines what happens to newly added memory regions. Policy setting
502	  can always be changed at runtime.
503
504	  The default is 'offline'.
505
506	  Select offline to defer onlining to drivers and user policy.
507	  Select auto to let the kernel choose what zones to utilize.
508	  Select online_kernel to generally allow kernel usage of this memory.
509	  Select online_movable to generally disallow kernel usage of this memory.
510
511	  Example kernel usage would be page structs and page tables.
512
513	  See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
514
515config MHP_DEFAULT_ONLINE_TYPE_OFFLINE
516	bool "offline"
517	help
518	  Hotplugged memory will not be onlined by default.
519	  Choose this for systems with drivers and user policy that
520	  handle onlining of hotplug memory policy.
521
522config MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO
523	bool "auto"
524	help
525	  Select this if you want the kernel to automatically online
526	  hotplugged memory into the zone it thinks is reasonable.
527	  This memory may be utilized for kernel data.
528
529config MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL
530	bool "kernel"
531	help
532	  Select this if you want the kernel to automatically online
533	  hotplugged memory into a zone capable of being used for kernel
534	  data. This typically means ZONE_NORMAL.
535
536config MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE
537	bool "movable"
538	help
539	  Select this if you want the kernel to automatically online
540	  hotplug memory into ZONE_MOVABLE. This memory will generally
541	  not be utilized for kernel data.
542
543	  This should only be used when the admin knows sufficient
544	  ZONE_NORMAL memory is available to describe hotplug memory,
545	  otherwise hotplug memory may fail to online. For example,
546	  sufficient kernel-capable memory (ZONE_NORMAL) must be
547	  available to allocate page structs to describe ZONE_MOVABLE.
548
549endchoice
550
551config MEMORY_HOTREMOVE
552	bool "Allow for memory hot remove"
553	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
554	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
555	depends on MIGRATION
556
557config MHP_MEMMAP_ON_MEMORY
558	def_bool y
559	depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
560	depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
561
562endif # MEMORY_HOTPLUG
563
564config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
565       bool
566
567# Heavily threaded applications may benefit from splitting the mm-wide
568# page_table_lock, so that faults on different parts of the user address
569# space can be handled with less contention: split it at this NR_CPUS.
570# Default to 4 for wider testing, though 8 might be more appropriate.
571# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
572# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
573# SPARC32 allocates multiple pte tables within a single page, and therefore
574# a per-page lock leads to problems when multiple tables need to be locked
575# at the same time (e.g. copy_page_range()).
576# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
577#
578config SPLIT_PTE_PTLOCKS
579	def_bool y
580	depends on MMU
581	depends on SMP
582	depends on NR_CPUS >= 4
583	depends on !ARM || CPU_CACHE_VIPT
584	depends on !PARISC || PA20
585	depends on !SPARC32
586
587config ARCH_ENABLE_SPLIT_PMD_PTLOCK
588	bool
589
590config SPLIT_PMD_PTLOCKS
591	def_bool y
592	depends on SPLIT_PTE_PTLOCKS && ARCH_ENABLE_SPLIT_PMD_PTLOCK
593
594#
595# support for memory balloon
596config MEMORY_BALLOON
597	bool
598
599#
600# support for memory balloon compaction
601config BALLOON_COMPACTION
602	bool "Allow for balloon memory compaction/migration"
603	default y
604	depends on COMPACTION && MEMORY_BALLOON
605	help
606	  Memory fragmentation introduced by ballooning might reduce
607	  significantly the number of 2MB contiguous memory blocks that can be
608	  used within a guest, thus imposing performance penalties associated
609	  with the reduced number of transparent huge pages that could be used
610	  by the guest workload. Allowing the compaction & migration for memory
611	  pages enlisted as being part of memory balloon devices avoids the
612	  scenario aforementioned and helps improving memory defragmentation.
613
614#
615# support for memory compaction
616config COMPACTION
617	bool "Allow for memory compaction"
618	default y
619	select MIGRATION
620	depends on MMU
621	help
622	  Compaction is the only memory management component to form
623	  high order (larger physically contiguous) memory blocks
624	  reliably. The page allocator relies on compaction heavily and
625	  the lack of the feature can lead to unexpected OOM killer
626	  invocations for high order memory requests. You shouldn't
627	  disable this option unless there really is a strong reason for
628	  it and then we would be really interested to hear about that at
629	  linux-mm@kvack.org.
630
631config COMPACT_UNEVICTABLE_DEFAULT
632	int
633	depends on COMPACTION
634	default 0 if PREEMPT_RT
635	default 1
636
637#
638# support for free page reporting
639config PAGE_REPORTING
640	bool "Free page reporting"
641	help
642	  Free page reporting allows for the incremental acquisition of
643	  free pages from the buddy allocator for the purpose of reporting
644	  those pages to another entity, such as a hypervisor, so that the
645	  memory can be freed within the host for other uses.
646
647#
648# support for page migration
649#
650config MIGRATION
651	bool "Page migration"
652	default y
653	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
654	help
655	  Allows the migration of the physical location of pages of processes
656	  while the virtual addresses are not changed. This is useful in
657	  two situations. The first is on NUMA systems to put pages nearer
658	  to the processors accessing. The second is when allocating huge
659	  pages as migration can relocate pages to satisfy a huge page
660	  allocation instead of reclaiming.
661
662config DEVICE_MIGRATION
663	def_bool MIGRATION && ZONE_DEVICE
664
665config ARCH_ENABLE_HUGEPAGE_MIGRATION
666	bool
667
668config ARCH_ENABLE_THP_MIGRATION
669	bool
670
671config HUGETLB_PAGE_SIZE_VARIABLE
672	def_bool n
673	help
674	  Allows the pageblock_order value to be dynamic instead of just standard
675	  HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
676	  on a platform.
677
678	  Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
679	  clamped down to MAX_PAGE_ORDER.
680
681config CONTIG_ALLOC
682	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
683
684config PCP_BATCH_SCALE_MAX
685	int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
686	default 5
687	range 0 6
688	help
689	  In page allocator, PCP (Per-CPU pageset) is refilled and drained in
690	  batches.  The batch number is scaled automatically to improve page
691	  allocation/free throughput.  But too large scale factor may hurt
692	  latency.  This option sets the upper limit of scale factor to limit
693	  the maximum latency.
694
695config PHYS_ADDR_T_64BIT
696	def_bool 64BIT
697
698config BOUNCE
699	bool "Enable bounce buffers"
700	default y
701	depends on BLOCK && MMU && HIGHMEM
702	help
703	  Enable bounce buffers for devices that cannot access the full range of
704	  memory available to the CPU. Enabled by default when HIGHMEM is
705	  selected, but you may say n to override this.
706
707config MMU_NOTIFIER
708	bool
709	select INTERVAL_TREE
710
711config KSM
712	bool "Enable KSM for page merging"
713	depends on MMU
714	select XXHASH
715	help
716	  Enable Kernel Samepage Merging: KSM periodically scans those areas
717	  of an application's address space that an app has advised may be
718	  mergeable.  When it finds pages of identical content, it replaces
719	  the many instances by a single page with that content, so
720	  saving memory until one or another app needs to modify the content.
721	  Recommended for use with KVM, or with other duplicative applications.
722	  See Documentation/mm/ksm.rst for more information: KSM is inactive
723	  until a program has madvised that an area is MADV_MERGEABLE, and
724	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
725
726config DEFAULT_MMAP_MIN_ADDR
727	int "Low address space to protect from user allocation"
728	depends on MMU
729	default 4096
730	help
731	  This is the portion of low virtual memory which should be protected
732	  from userspace allocation.  Keeping a user from writing to low pages
733	  can help reduce the impact of kernel NULL pointer bugs.
734
735	  For most arm64, ppc64 and x86 users with lots of address space
736	  a value of 65536 is reasonable and should cause no problems.
737	  On arm and other archs it should not be higher than 32768.
738	  Programs which use vm86 functionality or have some need to map
739	  this low address space will need CAP_SYS_RAWIO or disable this
740	  protection by setting the value to 0.
741
742	  This value can be changed after boot using the
743	  /proc/sys/vm/mmap_min_addr tunable.
744
745config ARCH_SUPPORTS_MEMORY_FAILURE
746	bool
747
748config MEMORY_FAILURE
749	depends on MMU
750	depends on ARCH_SUPPORTS_MEMORY_FAILURE
751	bool "Enable recovery from hardware memory errors"
752	select RAS
753	help
754	  Enables code to recover from some memory failures on systems
755	  with MCA recovery. This allows a system to continue running
756	  even when some of its memory has uncorrected errors. This requires
757	  special hardware support and typically ECC memory.
758
759config HWPOISON_INJECT
760	tristate "HWPoison pages injector"
761	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
762	select PROC_PAGE_MONITOR
763
764config NOMMU_INITIAL_TRIM_EXCESS
765	int "Turn on mmap() excess space trimming before booting"
766	depends on !MMU
767	default 1
768	help
769	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
770	  of memory on which to store mappings, but it can only ask the system
771	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
772	  more than it requires.  To deal with this, mmap() is able to trim off
773	  the excess and return it to the allocator.
774
775	  If trimming is enabled, the excess is trimmed off and returned to the
776	  system allocator, which can cause extra fragmentation, particularly
777	  if there are a lot of transient processes.
778
779	  If trimming is disabled, the excess is kept, but not used, which for
780	  long-term mappings means that the space is wasted.
781
782	  Trimming can be dynamically controlled through a sysctl option
783	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
784	  excess pages there must be before trimming should occur, or zero if
785	  no trimming is to occur.
786
787	  This option specifies the initial value of this option.  The default
788	  of 1 says that all excess pages should be trimmed.
789
790	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
791
792config ARCH_WANT_GENERAL_HUGETLB
793	bool
794
795config ARCH_WANTS_THP_SWAP
796	def_bool n
797
798config PERSISTENT_HUGE_ZERO_FOLIO
799	bool "Allocate a PMD sized folio for zeroing"
800	depends on TRANSPARENT_HUGEPAGE
801	help
802	  Enable this option to reduce the runtime refcounting overhead
803	  of the huge zero folio and expand the places in the kernel
804	  that can use huge zero folios. For instance, block I/O benefits
805	  from access to large folios for zeroing memory.
806
807	  With this option enabled, the huge zero folio is allocated
808	  once and never freed. One full huge page's worth of memory shall
809	  be used.
810
811	  Say Y if your system has lots of memory. Say N if you are
812	  memory constrained.
813
814config MM_ID
815	def_bool n
816
817menuconfig TRANSPARENT_HUGEPAGE
818	bool "Transparent Hugepage Support"
819	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
820	select COMPACTION
821	select XARRAY_MULTI
822	select MM_ID
823	help
824	  Transparent Hugepages allows the kernel to use huge pages and
825	  huge tlb transparently to the applications whenever possible.
826	  This feature can improve computing performance to certain
827	  applications by speeding up page faults during memory
828	  allocation, by reducing the number of tlb misses and by speeding
829	  up the pagetable walking.
830
831	  If memory constrained on embedded, you may want to say N.
832
833if TRANSPARENT_HUGEPAGE
834
835choice
836	prompt "Transparent Hugepage Support sysfs defaults"
837	depends on TRANSPARENT_HUGEPAGE
838	default TRANSPARENT_HUGEPAGE_ALWAYS
839	help
840	  Selects the sysfs defaults for Transparent Hugepage Support.
841
842	config TRANSPARENT_HUGEPAGE_ALWAYS
843		bool "always"
844	help
845	  Enabling Transparent Hugepage always, can increase the
846	  memory footprint of applications without a guaranteed
847	  benefit but it will work automatically for all applications.
848
849	config TRANSPARENT_HUGEPAGE_MADVISE
850		bool "madvise"
851	help
852	  Enabling Transparent Hugepage madvise, will only provide a
853	  performance improvement benefit to the applications using
854	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
855	  memory footprint of applications without a guaranteed
856	  benefit.
857
858	config TRANSPARENT_HUGEPAGE_NEVER
859		bool "never"
860	help
861	  Disable Transparent Hugepage by default. It can still be
862	  enabled at runtime via sysfs.
863endchoice
864
865config THP_SWAP
866	def_bool y
867	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
868	help
869	  Swap transparent huge pages in one piece, without splitting.
870	  XXX: For now, swap cluster backing transparent huge page
871	  will be split after swapout.
872
873	  For selection by architectures with reasonable THP sizes.
874
875config READ_ONLY_THP_FOR_FS
876	bool "Read-only THP for filesystems (EXPERIMENTAL)"
877	depends on TRANSPARENT_HUGEPAGE
878
879	help
880	  Allow khugepaged to put read-only file-backed pages in THP.
881
882	  This is marked experimental because it is a new feature. Write
883	  support of file THPs will be developed in the next few release
884	  cycles.
885
886config NO_PAGE_MAPCOUNT
887	bool "No per-page mapcount (EXPERIMENTAL)"
888	help
889	  Do not maintain per-page mapcounts for pages part of larger
890	  allocations, such as transparent huge pages.
891
892	  When this config option is enabled, some interfaces that relied on
893	  this information will rely on less-precise per-allocation information
894	  instead: for example, using the average per-page mapcount in such
895	  a large allocation instead of the per-page mapcount.
896
897	  EXPERIMENTAL because the impact of some changes is still unclear.
898
899endif # TRANSPARENT_HUGEPAGE
900
901# simple helper to make the code a bit easier to read
902config PAGE_MAPCOUNT
903	def_bool !NO_PAGE_MAPCOUNT
904
905#
906# The architecture supports pgtable leaves that is larger than PAGE_SIZE
907#
908config PGTABLE_HAS_HUGE_LEAVES
909	def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
910
911# TODO: Allow to be enabled without THP
912config ARCH_SUPPORTS_HUGE_PFNMAP
913	def_bool n
914	depends on TRANSPARENT_HUGEPAGE
915
916config ARCH_SUPPORTS_PMD_PFNMAP
917	def_bool y
918	depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE
919
920config ARCH_SUPPORTS_PUD_PFNMAP
921	def_bool y
922	depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
923
924#
925# Architectures that always use weak definitions for percpu
926# variables in modules should set this.
927#
928config ARCH_MODULE_NEEDS_WEAK_PER_CPU
929       bool
930
931#
932# UP and nommu archs use km based percpu allocator
933#
934config NEED_PER_CPU_KM
935	depends on !SMP || !MMU
936	bool
937	default y
938
939config NEED_PER_CPU_EMBED_FIRST_CHUNK
940	bool
941
942config NEED_PER_CPU_PAGE_FIRST_CHUNK
943	bool
944
945config USE_PERCPU_NUMA_NODE_ID
946	bool
947
948config HAVE_SETUP_PER_CPU_AREA
949	bool
950
951config CMA
952	bool "Contiguous Memory Allocator"
953	depends on MMU
954	select MIGRATION
955	select MEMORY_ISOLATION
956	help
957	  This enables the Contiguous Memory Allocator which allows other
958	  subsystems to allocate big physically-contiguous blocks of memory.
959	  CMA reserves a region of memory and allows only movable pages to
960	  be allocated from it. This way, the kernel can use the memory for
961	  pagecache and when a subsystem requests for contiguous area, the
962	  allocated pages are migrated away to serve the contiguous request.
963
964	  If unsure, say "n".
965
966config CMA_DEBUGFS
967	bool "CMA debugfs interface"
968	depends on CMA && DEBUG_FS
969	help
970	  Turns on the DebugFS interface for CMA.
971
972config CMA_SYSFS
973	bool "CMA information through sysfs interface"
974	depends on CMA && SYSFS
975	help
976	  This option exposes some sysfs attributes to get information
977	  from CMA.
978
979config CMA_AREAS
980	int "Maximum count of the CMA areas"
981	depends on CMA
982	default 20 if NUMA
983	default 8
984	help
985	  CMA allows to create CMA areas for particular purpose, mainly,
986	  used as device private area. This parameter sets the maximum
987	  number of CMA area in the system.
988
989	  If unsure, leave the default value "8" in UMA and "20" in NUMA.
990
991#
992# Select this config option from the architecture Kconfig, if available, to set
993# the max page order for physically contiguous allocations.
994#
995config ARCH_FORCE_MAX_ORDER
996	int
997
998#
999# When ARCH_FORCE_MAX_ORDER is not defined,
1000# the default page block order is MAX_PAGE_ORDER (10) as per
1001# include/linux/mmzone.h.
1002#
1003config PAGE_BLOCK_MAX_ORDER
1004	int "Page Block Order Upper Limit"
1005	range 1 10 if ARCH_FORCE_MAX_ORDER = 0
1006	default 10 if ARCH_FORCE_MAX_ORDER = 0
1007	range 1 ARCH_FORCE_MAX_ORDER if ARCH_FORCE_MAX_ORDER != 0
1008	default ARCH_FORCE_MAX_ORDER if ARCH_FORCE_MAX_ORDER != 0
1009	help
1010	  The page block order refers to the power of two number of pages that
1011	  are physically contiguous and can have a migrate type associated to
1012	  them. The maximum size of the page block order is at least limited by
1013	  ARCH_FORCE_MAX_ORDER/MAX_PAGE_ORDER.
1014
1015	  This config adds a new upper limit of default page block
1016	  order when the page block order is required to be smaller than
1017	  ARCH_FORCE_MAX_ORDER/MAX_PAGE_ORDER or other limits
1018	  (see include/linux/pageblock-flags.h for details).
1019
1020	  Reducing pageblock order can negatively impact THP generation
1021	  success rate. If your workloads use THP heavily, please use this
1022	  option with caution.
1023
1024	  Don't change if unsure.
1025
1026config MEM_SOFT_DIRTY
1027	bool "Track memory changes"
1028	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
1029	select PROC_PAGE_MONITOR
1030	help
1031	  This option enables memory changes tracking by introducing a
1032	  soft-dirty bit on pte-s. This bit it set when someone writes
1033	  into a page just as regular dirty bit, but unlike the latter
1034	  it can be cleared by hands.
1035
1036	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
1037
1038config GENERIC_EARLY_IOREMAP
1039	bool
1040
1041config STACK_MAX_DEFAULT_SIZE_MB
1042	int "Default maximum user stack size for 32-bit processes (MB)"
1043	default 100
1044	range 8 2048
1045	depends on STACK_GROWSUP && (!64BIT || COMPAT)
1046	help
1047	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
1048	  user processes when the stack grows upwards (currently only on parisc
1049	  arch) when the RLIMIT_STACK hard limit is unlimited.
1050
1051	  A sane initial value is 100 MB.
1052
1053config DEFERRED_STRUCT_PAGE_INIT
1054	bool "Defer initialisation of struct pages to kthreads"
1055	depends on SPARSEMEM
1056	depends on !NEED_PER_CPU_KM
1057	depends on 64BIT
1058	depends on !KMSAN
1059	select PADATA
1060	help
1061	  Ordinarily all struct pages are initialised during early boot in a
1062	  single thread. On very large machines this can take a considerable
1063	  amount of time. If this option is set, large machines will bring up
1064	  a subset of memmap at boot and then initialise the rest in parallel.
1065	  This has a potential performance impact on tasks running early in the
1066	  lifetime of the system until these kthreads finish the
1067	  initialisation.
1068
1069config PAGE_IDLE_FLAG
1070	bool
1071	select PAGE_EXTENSION if !64BIT
1072	help
1073	  This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
1074	  bit writers can set the state of the bit in the flags so that PTE
1075	  Accessed bit readers may avoid disturbance.
1076
1077config IDLE_PAGE_TRACKING
1078	bool "Enable idle page tracking"
1079	depends on SYSFS && MMU
1080	select PAGE_IDLE_FLAG
1081	help
1082	  This feature allows to estimate the amount of user pages that have
1083	  not been touched during a given period of time. This information can
1084	  be useful to tune memory cgroup limits and/or for job placement
1085	  within a compute cluster.
1086
1087	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
1088	  more details.
1089
1090# Architectures which implement cpu_dcache_is_aliasing() to query
1091# whether the data caches are aliased (VIVT or VIPT with dcache
1092# aliasing) need to select this.
1093config ARCH_HAS_CPU_CACHE_ALIASING
1094	bool
1095
1096config ARCH_HAS_CACHE_LINE_SIZE
1097	bool
1098
1099config ARCH_HAS_CURRENT_STACK_POINTER
1100	bool
1101	help
1102	  In support of HARDENED_USERCOPY performing stack variable lifetime
1103	  checking, an architecture-agnostic way to find the stack pointer
1104	  is needed. Once an architecture defines an unsigned long global
1105	  register alias named "current_stack_pointer", this config can be
1106	  selected.
1107
1108config ARCH_HAS_ZONE_DMA_SET
1109	bool
1110
1111config ZONE_DMA
1112	bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
1113	default y if ARM64 || X86
1114
1115config ZONE_DMA32
1116	bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
1117	depends on !X86_32
1118	default y if ARM64
1119
1120config ZONE_DEVICE
1121	bool "Device memory (pmem, HMM, etc...) hotplug support"
1122	depends on MEMORY_HOTPLUG
1123	depends on MEMORY_HOTREMOVE
1124	depends on SPARSEMEM_VMEMMAP
1125	select XARRAY_MULTI
1126
1127	help
1128	  Device memory hotplug support allows for establishing pmem,
1129	  or other device driver discovered memory regions, in the
1130	  memmap. This allows pfn_to_page() lookups of otherwise
1131	  "device-physical" addresses which is needed for using a DAX
1132	  mapping in an O_DIRECT operation, among other things.
1133
1134	  If FS_DAX is enabled, then say Y.
1135
1136#
1137# Helpers to mirror range of the CPU page tables of a process into device page
1138# tables.
1139#
1140config HMM_MIRROR
1141	bool
1142	depends on MMU
1143
1144config GET_FREE_REGION
1145	bool
1146
1147config DEVICE_PRIVATE
1148	bool "Unaddressable device memory (GPU memory, ...)"
1149	depends on ZONE_DEVICE
1150	select GET_FREE_REGION
1151
1152	help
1153	  Allows creation of struct pages to represent unaddressable device
1154	  memory; i.e., memory that is only accessible from the device (or
1155	  group of devices). You likely also want to select HMM_MIRROR.
1156
1157config VMAP_PFN
1158	bool
1159
1160config ARCH_USES_HIGH_VMA_FLAGS
1161	bool
1162config ARCH_HAS_PKEYS
1163	bool
1164
1165config ARCH_USES_PG_ARCH_2
1166	bool
1167config ARCH_USES_PG_ARCH_3
1168	bool
1169
1170config VM_EVENT_COUNTERS
1171	default y
1172	bool "Enable VM event counters for /proc/vmstat" if EXPERT
1173	help
1174	  VM event counters are needed for event counts to be shown.
1175	  This option allows the disabling of the VM event counters
1176	  on EXPERT systems.  /proc/vmstat will only show page counts
1177	  if VM event counters are disabled.
1178
1179config PERCPU_STATS
1180	bool "Collect percpu memory statistics"
1181	help
1182	  This feature collects and exposes statistics via debugfs. The
1183	  information includes global and per chunk statistics, which can
1184	  be used to help understand percpu memory usage.
1185
1186config GUP_TEST
1187	bool "Enable infrastructure for get_user_pages()-related unit tests"
1188	depends on DEBUG_FS
1189	help
1190	  Provides /sys/kernel/debug/gup_test, which in turn provides a way
1191	  to make ioctl calls that can launch kernel-based unit tests for
1192	  the get_user_pages*() and pin_user_pages*() family of API calls.
1193
1194	  These tests include benchmark testing of the _fast variants of
1195	  get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1196	  the non-_fast variants.
1197
1198	  There is also a sub-test that allows running dump_page() on any
1199	  of up to eight pages (selected by command line args) within the
1200	  range of user-space addresses. These pages are either pinned via
1201	  pin_user_pages*(), or pinned via get_user_pages*(), as specified
1202	  by other command line arguments.
1203
1204	  See tools/testing/selftests/mm/gup_test.c
1205
1206comment "GUP_TEST needs to have DEBUG_FS enabled"
1207	depends on !GUP_TEST && !DEBUG_FS
1208
1209config GUP_GET_PXX_LOW_HIGH
1210	bool
1211
1212config DMAPOOL_TEST
1213	tristate "Enable a module to run time tests on dma_pool"
1214	depends on HAS_DMA
1215	help
1216	  Provides a test module that will allocate and free many blocks of
1217	  various sizes and report how long it takes. This is intended to
1218	  provide a consistent way to measure how changes to the
1219	  dma_pool_alloc/free routines affect performance.
1220
1221config ARCH_HAS_PTE_SPECIAL
1222	bool
1223
1224config MAPPING_DIRTY_HELPERS
1225        bool
1226
1227config KMAP_LOCAL
1228	bool
1229
1230config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1231	bool
1232
1233config MEMFD_CREATE
1234	bool "Enable memfd_create() system call" if EXPERT
1235
1236config SECRETMEM
1237	default y
1238	bool "Enable memfd_secret() system call" if EXPERT
1239	depends on ARCH_HAS_SET_DIRECT_MAP
1240	help
1241	  Enable the memfd_secret() system call with the ability to create
1242	  memory areas visible only in the context of the owning process and
1243	  not mapped to other processes and other kernel page tables.
1244
1245config ANON_VMA_NAME
1246	bool "Anonymous VMA name support"
1247	depends on PROC_FS && ADVISE_SYSCALLS && MMU
1248
1249	help
1250	  Allow naming anonymous virtual memory areas.
1251
1252	  This feature allows assigning names to virtual memory areas. Assigned
1253	  names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1254	  and help identifying individual anonymous memory areas.
1255	  Assigning a name to anonymous virtual memory area might prevent that
1256	  area from being merged with adjacent virtual memory areas due to the
1257	  difference in their name.
1258
1259config HAVE_ARCH_USERFAULTFD_WP
1260	bool
1261	help
1262	  Arch has userfaultfd write protection support
1263
1264config HAVE_ARCH_USERFAULTFD_MINOR
1265	bool
1266	help
1267	  Arch has userfaultfd minor fault support
1268
1269menuconfig USERFAULTFD
1270	bool "Enable userfaultfd() system call"
1271	depends on MMU
1272	help
1273	  Enable the userfaultfd() system call that allows to intercept and
1274	  handle page faults in userland.
1275
1276if USERFAULTFD
1277config PTE_MARKER_UFFD_WP
1278	bool "Userfaultfd write protection support for shmem/hugetlbfs"
1279	default y
1280	depends on HAVE_ARCH_USERFAULTFD_WP
1281
1282	help
1283	  Allows to create marker PTEs for userfaultfd write protection
1284	  purposes.  It is required to enable userfaultfd write protection on
1285	  file-backed memory types like shmem and hugetlbfs.
1286endif # USERFAULTFD
1287
1288# multi-gen LRU {
1289config LRU_GEN
1290	bool "Multi-Gen LRU"
1291	depends on MMU
1292	# make sure folio->flags has enough spare bits
1293	depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1294	help
1295	  A high performance LRU implementation to overcommit memory. See
1296	  Documentation/admin-guide/mm/multigen_lru.rst for details.
1297
1298config LRU_GEN_ENABLED
1299	bool "Enable by default"
1300	depends on LRU_GEN
1301	help
1302	  This option enables the multi-gen LRU by default.
1303
1304config LRU_GEN_STATS
1305	bool "Full stats for debugging"
1306	depends on LRU_GEN
1307	help
1308	  Do not enable this option unless you plan to look at historical stats
1309	  from evicted generations for debugging purpose.
1310
1311	  This option has a per-memcg and per-node memory overhead.
1312
1313config LRU_GEN_WALKS_MMU
1314	def_bool y
1315	depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
1316# }
1317
1318config ARCH_SUPPORTS_PER_VMA_LOCK
1319       def_bool n
1320
1321config PER_VMA_LOCK
1322	def_bool y
1323	depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1324	help
1325	  Allow per-vma locking during page fault handling.
1326
1327	  This feature allows locking each virtual memory area separately when
1328	  handling page faults instead of taking mmap_lock.
1329
1330config LOCK_MM_AND_FIND_VMA
1331	bool
1332	depends on !STACK_GROWSUP
1333
1334config IOMMU_MM_DATA
1335	bool
1336
1337config EXECMEM
1338	bool
1339
1340config NUMA_MEMBLKS
1341	bool
1342
1343config NUMA_EMU
1344	bool "NUMA emulation"
1345	depends on NUMA_MEMBLKS
1346	depends on X86 || GENERIC_ARCH_NUMA
1347	help
1348	  Enable NUMA emulation. A flat machine will be split
1349	  into virtual nodes when booted with "numa=fake=N", where N is the
1350	  number of nodes. This is only useful for debugging.
1351
1352config ARCH_HAS_USER_SHADOW_STACK
1353	bool
1354	help
1355	  The architecture has hardware support for userspace shadow call
1356          stacks (eg, x86 CET, arm64 GCS or RISC-V Zicfiss).
1357
1358config ARCH_SUPPORTS_PT_RECLAIM
1359	def_bool n
1360
1361config PT_RECLAIM
1362	bool "reclaim empty user page table pages"
1363	default y
1364	depends on ARCH_SUPPORTS_PT_RECLAIM && MMU && SMP
1365	select MMU_GATHER_RCU_TABLE_FREE
1366	help
1367	  Try to reclaim empty user page table pages in paths other than munmap
1368	  and exit_mmap path.
1369
1370	  Note: now only empty user PTE page table pages will be reclaimed.
1371
1372config FIND_NORMAL_PAGE
1373	def_bool n
1374
1375source "mm/damon/Kconfig"
1376
1377endmenu
1378