xref: /linux/mm/Kconfig (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1# SPDX-License-Identifier: GPL-2.0-only
2
3menu "Memory Management options"
4
5#
6# For some reason microblaze and nios2 hard code SWAP=n.  Hopefully we can
7# add proper SWAP support to them, in which case this can be remove.
8#
9config ARCH_NO_SWAP
10	bool
11
12menuconfig SWAP
13	bool "Support for paging of anonymous memory (swap)"
14	depends on MMU && BLOCK && !ARCH_NO_SWAP
15	default y
16	help
17	  This option allows you to choose whether you want to have support
18	  for so called swap devices or swap files in your kernel that are
19	  used to provide more virtual memory than the actual RAM present
20	  in your computer.  If unsure say Y.
21
22config ZSWAP
23	bool "Compressed cache for swap pages"
24	depends on SWAP
25	select CRYPTO
26	select ZSMALLOC
27	help
28	  A lightweight compressed cache for swap pages.  It takes
29	  pages that are in the process of being swapped out and attempts to
30	  compress them into a dynamically allocated RAM-based memory pool.
31	  This can result in a significant I/O reduction on swap device and,
32	  in the case where decompressing from RAM is faster than swap device
33	  reads, can also improve workload performance.
34
35config ZSWAP_DEFAULT_ON
36	bool "Enable the compressed cache for swap pages by default"
37	depends on ZSWAP
38	help
39	  If selected, the compressed cache for swap pages will be enabled
40	  at boot, otherwise it will be disabled.
41
42	  The selection made here can be overridden by using the kernel
43	  command line 'zswap.enabled=' option.
44
45config ZSWAP_SHRINKER_DEFAULT_ON
46	bool "Shrink the zswap pool on memory pressure"
47	depends on ZSWAP
48	default n
49	help
50	  If selected, the zswap shrinker will be enabled, and the pages
51	  stored in the zswap pool will become available for reclaim (i.e
52	  written back to the backing swap device) on memory pressure.
53
54	  This means that zswap writeback could happen even if the pool is
55	  not yet full, or the cgroup zswap limit has not been reached,
56	  reducing the chance that cold pages will reside in the zswap pool
57	  and consume memory indefinitely.
58
59choice
60	prompt "Default compressor"
61	depends on ZSWAP
62	default ZSWAP_COMPRESSOR_DEFAULT_LZO
63	help
64	  Selects the default compression algorithm for the compressed cache
65	  for swap pages.
66
67	  For an overview what kind of performance can be expected from
68	  a particular compression algorithm please refer to the benchmarks
69	  available at the following LWN page:
70	  https://lwn.net/Articles/751795/
71
72	  If in doubt, select 'LZO'.
73
74	  The selection made here can be overridden by using the kernel
75	  command line 'zswap.compressor=' option.
76
77config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
78	bool "Deflate"
79	select CRYPTO_DEFLATE
80	help
81	  Use the Deflate algorithm as the default compression algorithm.
82
83config ZSWAP_COMPRESSOR_DEFAULT_LZO
84	bool "LZO"
85	select CRYPTO_LZO
86	help
87	  Use the LZO algorithm as the default compression algorithm.
88
89config ZSWAP_COMPRESSOR_DEFAULT_842
90	bool "842"
91	select CRYPTO_842
92	help
93	  Use the 842 algorithm as the default compression algorithm.
94
95config ZSWAP_COMPRESSOR_DEFAULT_LZ4
96	bool "LZ4"
97	select CRYPTO_LZ4
98	help
99	  Use the LZ4 algorithm as the default compression algorithm.
100
101config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
102	bool "LZ4HC"
103	select CRYPTO_LZ4HC
104	help
105	  Use the LZ4HC algorithm as the default compression algorithm.
106
107config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
108	bool "zstd"
109	select CRYPTO_ZSTD
110	help
111	  Use the zstd algorithm as the default compression algorithm.
112endchoice
113
114config ZSWAP_COMPRESSOR_DEFAULT
115       string
116       depends on ZSWAP
117       default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
118       default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
119       default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
120       default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
121       default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
122       default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
123       default ""
124
125config ZSMALLOC
126	tristate
127
128if ZSMALLOC
129
130menu "Zsmalloc allocator options"
131	depends on ZSMALLOC
132
133comment "Zsmalloc is a common backend allocator for zswap & zram"
134
135config ZSMALLOC_STAT
136	bool "Export zsmalloc statistics"
137	select DEBUG_FS
138	help
139	  This option enables code in the zsmalloc to collect various
140	  statistics about what's happening in zsmalloc and exports that
141	  information to userspace via debugfs.
142	  If unsure, say N.
143
144config ZSMALLOC_CHAIN_SIZE
145	int "Maximum number of physical pages per-zspage"
146	default 8
147	range 4 16
148	help
149	  This option sets the upper limit on the number of physical pages
150	  that a zmalloc page (zspage) can consist of. The optimal zspage
151	  chain size is calculated for each size class during the
152	  initialization of the pool.
153
154	  Changing this option can alter the characteristics of size classes,
155	  such as the number of pages per zspage and the number of objects
156	  per zspage. This can also result in different configurations of
157	  the pool, as zsmalloc merges size classes with similar
158	  characteristics.
159
160	  For more information, see zsmalloc documentation.
161
162endmenu
163
164endif
165
166menu "Slab allocator options"
167
168config SLUB
169	def_bool y
170	select IRQ_WORK
171
172config KVFREE_RCU_BATCHED
173	def_bool y
174	depends on !SLUB_TINY && !TINY_RCU
175
176config SLUB_TINY
177	bool "Configure for minimal memory footprint"
178	depends on EXPERT && !COMPILE_TEST
179	select SLAB_MERGE_DEFAULT
180	help
181	   Configures the slab allocator in a way to achieve minimal memory
182	   footprint, sacrificing scalability, debugging and other features.
183	   This is intended only for the smallest system that had used the
184	   SLOB allocator and is not recommended for systems with more than
185	   16MB RAM.
186
187	   If unsure, say N.
188
189config SLAB_MERGE_DEFAULT
190	bool "Allow slab caches to be merged"
191	default y
192	help
193	  For reduced kernel memory fragmentation, slab caches can be
194	  merged when they share the same size and other characteristics.
195	  This carries a risk of kernel heap overflows being able to
196	  overwrite objects from merged caches (and more easily control
197	  cache layout), which makes such heap attacks easier to exploit
198	  by attackers. By keeping caches unmerged, these kinds of exploits
199	  can usually only damage objects in the same cache. To disable
200	  merging at runtime, "slab_nomerge" can be passed on the kernel
201	  command line.
202
203config SLAB_FREELIST_RANDOM
204	bool "Randomize slab freelist"
205	depends on !SLUB_TINY
206	help
207	  Randomizes the freelist order used on creating new pages. This
208	  security feature reduces the predictability of the kernel slab
209	  allocator against heap overflows.
210
211config SLAB_FREELIST_HARDENED
212	bool "Harden slab freelist metadata"
213	depends on !SLUB_TINY
214	help
215	  Many kernel heap attacks try to target slab cache metadata and
216	  other infrastructure. This options makes minor performance
217	  sacrifices to harden the kernel slab allocator against common
218	  freelist exploit methods.
219
220config SLAB_BUCKETS
221	bool "Support allocation from separate kmalloc buckets"
222	depends on !SLUB_TINY
223	default SLAB_FREELIST_HARDENED
224	help
225	  Kernel heap attacks frequently depend on being able to create
226	  specifically-sized allocations with user-controlled contents
227	  that will be allocated into the same kmalloc bucket as a
228	  target object. To avoid sharing these allocation buckets,
229	  provide an explicitly separated set of buckets to be used for
230	  user-controlled allocations. This may very slightly increase
231	  memory fragmentation, though in practice it's only a handful
232	  of extra pages since the bulk of user-controlled allocations
233	  are relatively long-lived.
234
235	  If unsure, say Y.
236
237config SLUB_STATS
238	default n
239	bool "Enable performance statistics"
240	depends on SYSFS && !SLUB_TINY
241	help
242	  The statistics are useful to debug slab allocation behavior in
243	  order find ways to optimize the allocator. This should never be
244	  enabled for production use since keeping statistics slows down
245	  the allocator by a few percentage points. The slabinfo command
246	  supports the determination of the most active slabs to figure
247	  out which slabs are relevant to a particular load.
248	  Try running: slabinfo -DA
249
250config SLUB_CPU_PARTIAL
251	default y
252	depends on SMP && !SLUB_TINY
253	bool "Enable per cpu partial caches"
254	help
255	  Per cpu partial caches accelerate objects allocation and freeing
256	  that is local to a processor at the price of more indeterminism
257	  in the latency of the free. On overflow these caches will be cleared
258	  which requires the taking of locks that may cause latency spikes.
259	  Typically one would choose no for a realtime system.
260
261config RANDOM_KMALLOC_CACHES
262	default n
263	depends on !SLUB_TINY
264	bool "Randomize slab caches for normal kmalloc"
265	help
266	  A hardening feature that creates multiple copies of slab caches for
267	  normal kmalloc allocation and makes kmalloc randomly pick one based
268	  on code address, which makes the attackers more difficult to spray
269	  vulnerable memory objects on the heap for the purpose of exploiting
270	  memory vulnerabilities.
271
272	  Currently the number of copies is set to 16, a reasonably large value
273	  that effectively diverges the memory objects allocated for different
274	  subsystems or modules into different caches, at the expense of a
275	  limited degree of memory and CPU overhead that relates to hardware and
276	  system workload.
277
278endmenu # Slab allocator options
279
280config SHUFFLE_PAGE_ALLOCATOR
281	bool "Page allocator randomization"
282	default SLAB_FREELIST_RANDOM && ACPI_NUMA
283	help
284	  Randomization of the page allocator improves the average
285	  utilization of a direct-mapped memory-side-cache. See section
286	  5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
287	  6.2a specification for an example of how a platform advertises
288	  the presence of a memory-side-cache. There are also incidental
289	  security benefits as it reduces the predictability of page
290	  allocations to compliment SLAB_FREELIST_RANDOM, but the
291	  default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
292	  order of pages is selected based on cache utilization benefits
293	  on x86.
294
295	  While the randomization improves cache utilization it may
296	  negatively impact workloads on platforms without a cache. For
297	  this reason, by default, the randomization is not enabled even
298	  if SHUFFLE_PAGE_ALLOCATOR=y. The randomization may be force enabled
299	  with the 'page_alloc.shuffle' kernel command line parameter.
300
301	  Say Y if unsure.
302
303config COMPAT_BRK
304	bool "Disable heap randomization"
305	default y
306	help
307	  Randomizing heap placement makes heap exploits harder, but it
308	  also breaks ancient binaries (including anything libc5 based).
309	  This option changes the bootup default to heap randomization
310	  disabled, and can be overridden at runtime by setting
311	  /proc/sys/kernel/randomize_va_space to 2.
312
313	  On non-ancient distros (post-2000 ones) N is usually a safe choice.
314
315config MMAP_ALLOW_UNINITIALIZED
316	bool "Allow mmapped anonymous memory to be uninitialized"
317	depends on EXPERT && !MMU
318	default n
319	help
320	  Normally, and according to the Linux spec, anonymous memory obtained
321	  from mmap() has its contents cleared before it is passed to
322	  userspace.  Enabling this config option allows you to request that
323	  mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
324	  providing a huge performance boost.  If this option is not enabled,
325	  then the flag will be ignored.
326
327	  This is taken advantage of by uClibc's malloc(), and also by
328	  ELF-FDPIC binfmt's brk and stack allocator.
329
330	  Because of the obvious security issues, this option should only be
331	  enabled on embedded devices where you control what is run in
332	  userspace.  Since that isn't generally a problem on no-MMU systems,
333	  it is normally safe to say Y here.
334
335	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
336
337config SELECT_MEMORY_MODEL
338	def_bool y
339	depends on ARCH_SELECT_MEMORY_MODEL
340
341choice
342	prompt "Memory model"
343	depends on SELECT_MEMORY_MODEL
344	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
345	default FLATMEM_MANUAL
346	help
347	  This option allows you to change some of the ways that
348	  Linux manages its memory internally. Most users will
349	  only have one option here selected by the architecture
350	  configuration. This is normal.
351
352config FLATMEM_MANUAL
353	bool "Flat Memory"
354	depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
355	help
356	  This option is best suited for non-NUMA systems with
357	  flat address space. The FLATMEM is the most efficient
358	  system in terms of performance and resource consumption
359	  and it is the best option for smaller systems.
360
361	  For systems that have holes in their physical address
362	  spaces and for features like NUMA and memory hotplug,
363	  choose "Sparse Memory".
364
365	  If unsure, choose this option (Flat Memory) over any other.
366
367config SPARSEMEM_MANUAL
368	bool "Sparse Memory"
369	depends on ARCH_SPARSEMEM_ENABLE
370	help
371	  This will be the only option for some systems, including
372	  memory hot-plug systems.  This is normal.
373
374	  This option provides efficient support for systems with
375	  holes is their physical address space and allows memory
376	  hot-plug and hot-remove.
377
378	  If unsure, choose "Flat Memory" over this option.
379
380endchoice
381
382config SPARSEMEM
383	def_bool y
384	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
385
386config FLATMEM
387	def_bool y
388	depends on !SPARSEMEM || FLATMEM_MANUAL
389
390#
391# SPARSEMEM_EXTREME (which is the default) does some bootmem
392# allocations when sparse_init() is called.  If this cannot
393# be done on your architecture, select this option.  However,
394# statically allocating the mem_section[] array can potentially
395# consume vast quantities of .bss, so be careful.
396#
397# This option will also potentially produce smaller runtime code
398# with gcc 3.4 and later.
399#
400config SPARSEMEM_STATIC
401	bool
402
403#
404# Architecture platforms which require a two level mem_section in SPARSEMEM
405# must select this option. This is usually for architecture platforms with
406# an extremely sparse physical address space.
407#
408config SPARSEMEM_EXTREME
409	def_bool y
410	depends on SPARSEMEM && !SPARSEMEM_STATIC
411
412config SPARSEMEM_VMEMMAP_ENABLE
413	bool
414
415config SPARSEMEM_VMEMMAP
416	def_bool y
417	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
418	help
419	  SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
420	  pfn_to_page and page_to_pfn operations.  This is the most
421	  efficient option when sufficient kernel resources are available.
422
423config SPARSEMEM_VMEMMAP_PREINIT
424	bool
425#
426# Select this config option from the architecture Kconfig, if it is preferred
427# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
428#
429config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
430	bool
431
432config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
433	bool
434
435config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
436	bool
437
438config HAVE_MEMBLOCK_PHYS_MAP
439	bool
440
441config HAVE_GUP_FAST
442	depends on MMU
443	bool
444
445# Enable memblock support for scratch memory which is needed for kexec handover
446config MEMBLOCK_KHO_SCRATCH
447	bool
448
449# Don't discard allocated memory used to track "memory" and "reserved" memblocks
450# after early boot, so it can still be used to test for validity of memory.
451# Also, memblocks are updated with memory hot(un)plug.
452config ARCH_KEEP_MEMBLOCK
453	bool
454
455# Keep arch NUMA mapping infrastructure post-init.
456config NUMA_KEEP_MEMINFO
457	bool
458
459config MEMORY_ISOLATION
460	bool
461
462# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
463# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
464# /dev/mem.
465config EXCLUSIVE_SYSTEM_RAM
466	def_bool y
467	depends on !DEVMEM || STRICT_DEVMEM
468
469#
470# Only be set on architectures that have completely implemented memory hotplug
471# feature. If you are not sure, don't touch it.
472#
473config HAVE_BOOTMEM_INFO_NODE
474	def_bool n
475
476config ARCH_ENABLE_MEMORY_HOTPLUG
477	bool
478
479config ARCH_ENABLE_MEMORY_HOTREMOVE
480	bool
481
482# eventually, we can have this option just 'select SPARSEMEM'
483menuconfig MEMORY_HOTPLUG
484	bool "Memory hotplug"
485	select MEMORY_ISOLATION
486	depends on SPARSEMEM
487	depends on ARCH_ENABLE_MEMORY_HOTPLUG
488	depends on 64BIT
489	select NUMA_KEEP_MEMINFO if NUMA
490
491if MEMORY_HOTPLUG
492
493choice
494	prompt "Memory Hotplug Default Online Type"
495	default MHP_DEFAULT_ONLINE_TYPE_OFFLINE
496	help
497	  Default memory type for hotplugged memory.
498
499	  This option sets the default policy setting for memory hotplug
500	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
501	  determines what happens to newly added memory regions. Policy setting
502	  can always be changed at runtime.
503
504	  The default is 'offline'.
505
506	  Select offline to defer onlining to drivers and user policy.
507	  Select auto to let the kernel choose what zones to utilize.
508	  Select online_kernel to generally allow kernel usage of this memory.
509	  Select online_movable to generally disallow kernel usage of this memory.
510
511	  Example kernel usage would be page structs and page tables.
512
513	  See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
514
515config MHP_DEFAULT_ONLINE_TYPE_OFFLINE
516	bool "offline"
517	help
518	  Hotplugged memory will not be onlined by default.
519	  Choose this for systems with drivers and user policy that
520	  handle onlining of hotplug memory policy.
521
522config MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO
523	bool "auto"
524	help
525	  Select this if you want the kernel to automatically online
526	  hotplugged memory into the zone it thinks is reasonable.
527	  This memory may be utilized for kernel data.
528
529config MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL
530	bool "kernel"
531	help
532	  Select this if you want the kernel to automatically online
533	  hotplugged memory into a zone capable of being used for kernel
534	  data. This typically means ZONE_NORMAL.
535
536config MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE
537	bool "movable"
538	help
539	  Select this if you want the kernel to automatically online
540	  hotplug memory into ZONE_MOVABLE. This memory will generally
541	  not be utilized for kernel data.
542
543	  This should only be used when the admin knows sufficient
544	  ZONE_NORMAL memory is available to describe hotplug memory,
545	  otherwise hotplug memory may fail to online. For example,
546	  sufficient kernel-capable memory (ZONE_NORMAL) must be
547	  available to allocate page structs to describe ZONE_MOVABLE.
548
549endchoice
550
551config MEMORY_HOTREMOVE
552	bool "Allow for memory hot remove"
553	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
554	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
555	depends on MIGRATION
556
557config MHP_MEMMAP_ON_MEMORY
558	def_bool y
559	depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
560	depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
561
562endif # MEMORY_HOTPLUG
563
564config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
565       bool
566
567# Heavily threaded applications may benefit from splitting the mm-wide
568# page_table_lock, so that faults on different parts of the user address
569# space can be handled with less contention: split it at this NR_CPUS.
570# Default to 4 for wider testing, though 8 might be more appropriate.
571# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
572# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
573# SPARC32 allocates multiple pte tables within a single page, and therefore
574# a per-page lock leads to problems when multiple tables need to be locked
575# at the same time (e.g. copy_page_range()).
576# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
577#
578config SPLIT_PTE_PTLOCKS
579	def_bool y
580	depends on MMU
581	depends on SMP
582	depends on NR_CPUS >= 4
583	depends on !ARM || CPU_CACHE_VIPT
584	depends on !PARISC || PA20
585	depends on !SPARC32
586
587config ARCH_ENABLE_SPLIT_PMD_PTLOCK
588	bool
589
590config SPLIT_PMD_PTLOCKS
591	def_bool y
592	depends on SPLIT_PTE_PTLOCKS && ARCH_ENABLE_SPLIT_PMD_PTLOCK
593
594#
595# support for memory balloon
596config MEMORY_BALLOON
597	bool
598
599#
600# support for memory balloon compaction
601config BALLOON_COMPACTION
602	bool "Allow for balloon memory compaction/migration"
603	default y
604	depends on COMPACTION && MEMORY_BALLOON
605	help
606	  Memory fragmentation introduced by ballooning might reduce
607	  significantly the number of 2MB contiguous memory blocks that can be
608	  used within a guest, thus imposing performance penalties associated
609	  with the reduced number of transparent huge pages that could be used
610	  by the guest workload. Allowing the compaction & migration for memory
611	  pages enlisted as being part of memory balloon devices avoids the
612	  scenario aforementioned and helps improving memory defragmentation.
613
614#
615# support for memory compaction
616config COMPACTION
617	bool "Allow for memory compaction"
618	default y
619	select MIGRATION
620	depends on MMU
621	help
622	  Compaction is the only memory management component to form
623	  high order (larger physically contiguous) memory blocks
624	  reliably. The page allocator relies on compaction heavily and
625	  the lack of the feature can lead to unexpected OOM killer
626	  invocations for high order memory requests. You shouldn't
627	  disable this option unless there really is a strong reason for
628	  it and then we would be really interested to hear about that at
629	  linux-mm@kvack.org.
630
631config COMPACT_UNEVICTABLE_DEFAULT
632	int
633	depends on COMPACTION
634	default 0 if PREEMPT_RT
635	default 1
636
637#
638# support for free page reporting
639config PAGE_REPORTING
640	bool "Free page reporting"
641	help
642	  Free page reporting allows for the incremental acquisition of
643	  free pages from the buddy allocator for the purpose of reporting
644	  those pages to another entity, such as a hypervisor, so that the
645	  memory can be freed within the host for other uses.
646
647#
648# support for page migration
649#
650config MIGRATION
651	bool "Page migration"
652	default y
653	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
654	help
655	  Allows the migration of the physical location of pages of processes
656	  while the virtual addresses are not changed. This is useful in
657	  two situations. The first is on NUMA systems to put pages nearer
658	  to the processors accessing. The second is when allocating huge
659	  pages as migration can relocate pages to satisfy a huge page
660	  allocation instead of reclaiming.
661
662config DEVICE_MIGRATION
663	def_bool MIGRATION && ZONE_DEVICE
664
665config ARCH_ENABLE_HUGEPAGE_MIGRATION
666	bool
667
668config ARCH_ENABLE_THP_MIGRATION
669	bool
670
671config HUGETLB_PAGE_SIZE_VARIABLE
672	def_bool n
673	help
674	  Allows the pageblock_order value to be dynamic instead of just standard
675	  HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
676	  on a platform.
677
678	  Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
679	  clamped down to MAX_PAGE_ORDER.
680
681config CONTIG_ALLOC
682	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
683
684config PCP_BATCH_SCALE_MAX
685	int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
686	default 5
687	range 0 6
688	help
689	  In page allocator, PCP (Per-CPU pageset) is refilled and drained in
690	  batches.  The batch number is scaled automatically to improve page
691	  allocation/free throughput.  But too large scale factor may hurt
692	  latency.  This option sets the upper limit of scale factor to limit
693	  the maximum latency.
694
695config PHYS_ADDR_T_64BIT
696	def_bool 64BIT
697
698config MMU_NOTIFIER
699	bool
700	select INTERVAL_TREE
701
702config KSM
703	bool "Enable KSM for page merging"
704	depends on MMU
705	select XXHASH
706	help
707	  Enable Kernel Samepage Merging: KSM periodically scans those areas
708	  of an application's address space that an app has advised may be
709	  mergeable.  When it finds pages of identical content, it replaces
710	  the many instances by a single page with that content, so
711	  saving memory until one or another app needs to modify the content.
712	  Recommended for use with KVM, or with other duplicative applications.
713	  See Documentation/mm/ksm.rst for more information: KSM is inactive
714	  until a program has madvised that an area is MADV_MERGEABLE, and
715	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
716
717config DEFAULT_MMAP_MIN_ADDR
718	int "Low address space to protect from user allocation"
719	depends on MMU
720	default 4096
721	help
722	  This is the portion of low virtual memory which should be protected
723	  from userspace allocation.  Keeping a user from writing to low pages
724	  can help reduce the impact of kernel NULL pointer bugs.
725
726	  For most arm64, ppc64 and x86 users with lots of address space
727	  a value of 65536 is reasonable and should cause no problems.
728	  On arm and other archs it should not be higher than 32768.
729	  Programs which use vm86 functionality or have some need to map
730	  this low address space will need CAP_SYS_RAWIO or disable this
731	  protection by setting the value to 0.
732
733	  This value can be changed after boot using the
734	  /proc/sys/vm/mmap_min_addr tunable.
735
736config ARCH_SUPPORTS_MEMORY_FAILURE
737	bool
738
739config MEMORY_FAILURE
740	depends on MMU
741	depends on ARCH_SUPPORTS_MEMORY_FAILURE
742	bool "Enable recovery from hardware memory errors"
743	select INTERVAL_TREE
744	help
745	  Enables code to recover from some memory failures on systems
746	  with MCA recovery. This allows a system to continue running
747	  even when some of its memory has uncorrected errors. This requires
748	  special hardware support and typically ECC memory.
749
750config HWPOISON_INJECT
751	tristate "HWPoison pages injector"
752	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
753	select PROC_PAGE_MONITOR
754
755config NOMMU_INITIAL_TRIM_EXCESS
756	int "Turn on mmap() excess space trimming before booting"
757	depends on !MMU
758	default 1
759	help
760	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
761	  of memory on which to store mappings, but it can only ask the system
762	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
763	  more than it requires.  To deal with this, mmap() is able to trim off
764	  the excess and return it to the allocator.
765
766	  If trimming is enabled, the excess is trimmed off and returned to the
767	  system allocator, which can cause extra fragmentation, particularly
768	  if there are a lot of transient processes.
769
770	  If trimming is disabled, the excess is kept, but not used, which for
771	  long-term mappings means that the space is wasted.
772
773	  Trimming can be dynamically controlled through a sysctl option
774	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
775	  excess pages there must be before trimming should occur, or zero if
776	  no trimming is to occur.
777
778	  This option specifies the initial value of this option.  The default
779	  of 1 says that all excess pages should be trimmed.
780
781	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
782
783config ARCH_WANT_GENERAL_HUGETLB
784	bool
785
786config ARCH_WANTS_THP_SWAP
787	def_bool n
788
789config PERSISTENT_HUGE_ZERO_FOLIO
790	bool "Allocate a PMD sized folio for zeroing"
791	depends on TRANSPARENT_HUGEPAGE
792	help
793	  Enable this option to reduce the runtime refcounting overhead
794	  of the huge zero folio and expand the places in the kernel
795	  that can use huge zero folios. For instance, block I/O benefits
796	  from access to large folios for zeroing memory.
797
798	  With this option enabled, the huge zero folio is allocated
799	  once and never freed. One full huge page's worth of memory shall
800	  be used.
801
802	  Say Y if your system has lots of memory. Say N if you are
803	  memory constrained.
804
805config MM_ID
806	def_bool n
807
808menuconfig TRANSPARENT_HUGEPAGE
809	bool "Transparent Hugepage Support"
810	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
811	select COMPACTION
812	select XARRAY_MULTI
813	select MM_ID
814	help
815	  Transparent Hugepages allows the kernel to use huge pages and
816	  huge tlb transparently to the applications whenever possible.
817	  This feature can improve computing performance to certain
818	  applications by speeding up page faults during memory
819	  allocation, by reducing the number of tlb misses and by speeding
820	  up the pagetable walking.
821
822	  If memory constrained on embedded, you may want to say N.
823
824if TRANSPARENT_HUGEPAGE
825
826choice
827	prompt "Transparent Hugepage Support sysfs defaults"
828	depends on TRANSPARENT_HUGEPAGE
829	default TRANSPARENT_HUGEPAGE_ALWAYS
830	help
831	  Selects the sysfs defaults for Transparent Hugepage Support.
832
833	config TRANSPARENT_HUGEPAGE_ALWAYS
834		bool "always"
835	help
836	  Enabling Transparent Hugepage always, can increase the
837	  memory footprint of applications without a guaranteed
838	  benefit but it will work automatically for all applications.
839
840	config TRANSPARENT_HUGEPAGE_MADVISE
841		bool "madvise"
842	help
843	  Enabling Transparent Hugepage madvise, will only provide a
844	  performance improvement benefit to the applications using
845	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
846	  memory footprint of applications without a guaranteed
847	  benefit.
848
849	config TRANSPARENT_HUGEPAGE_NEVER
850		bool "never"
851	help
852	  Disable Transparent Hugepage by default. It can still be
853	  enabled at runtime via sysfs.
854endchoice
855
856choice
857	prompt "Shmem hugepage allocation defaults"
858	depends on TRANSPARENT_HUGEPAGE
859	default TRANSPARENT_HUGEPAGE_SHMEM_HUGE_NEVER
860	help
861	  Selects the hugepage allocation policy defaults for
862	  the internal shmem mount.
863
864	  The selection made here can be overridden by using the kernel
865	  command line 'transparent_hugepage_shmem=' option.
866
867	config TRANSPARENT_HUGEPAGE_SHMEM_HUGE_NEVER
868		bool "never"
869	help
870	  Disable hugepage allocation for shmem mount by default. It can
871	  still be enabled with the kernel command line
872	  'transparent_hugepage_shmem=' option or at runtime via sysfs
873	  knob. Note that madvise(MADV_COLLAPSE) can still cause
874	  transparent huge pages to be obtained even if this mode is
875	  specified.
876
877	config TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ALWAYS
878		bool "always"
879	help
880	  Always attempt to allocate hugepage for shmem mount, can
881	  increase the memory footprint of applications without a
882	  guaranteed benefit but it will work automatically for all
883	  applications.
884
885	config TRANSPARENT_HUGEPAGE_SHMEM_HUGE_WITHIN_SIZE
886		bool "within_size"
887	help
888	  Enable hugepage allocation for shmem mount if the allocation
889	  will be fully within the i_size. This configuration also takes
890	  into account any madvise(MADV_HUGEPAGE) hints that may be
891	  provided by the applications.
892
893	config TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ADVISE
894		bool "advise"
895	help
896	  Enable hugepage allocation for the shmem mount exclusively when
897	  applications supply the madvise(MADV_HUGEPAGE) hint.
898	  This ensures that hugepages are used only in response to explicit
899	  requests from applications.
900endchoice
901
902choice
903	prompt "Tmpfs hugepage allocation defaults"
904	depends on TRANSPARENT_HUGEPAGE
905	default TRANSPARENT_HUGEPAGE_TMPFS_HUGE_NEVER
906	help
907	  Selects the hugepage allocation policy defaults for
908	  the tmpfs mount.
909
910	  The selection made here can be overridden by using the kernel
911	  command line 'transparent_hugepage_tmpfs=' option.
912
913	config TRANSPARENT_HUGEPAGE_TMPFS_HUGE_NEVER
914		bool "never"
915	help
916	  Disable hugepage allocation for tmpfs mount by default. It can
917	  still be enabled with the kernel command line
918	  'transparent_hugepage_tmpfs=' option. Note that
919	  madvise(MADV_COLLAPSE) can still cause transparent huge pages
920	  to be obtained even if this mode is specified.
921
922	config TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ALWAYS
923		bool "always"
924	help
925	  Always attempt to allocate hugepage for tmpfs mount, can
926	  increase the memory footprint of applications without a
927	  guaranteed benefit but it will work automatically for all
928	  applications.
929
930	config TRANSPARENT_HUGEPAGE_TMPFS_HUGE_WITHIN_SIZE
931		bool "within_size"
932	help
933	  Enable hugepage allocation for tmpfs mount if the allocation
934	  will be fully within the i_size. This configuration also takes
935	  into account any madvise(MADV_HUGEPAGE) hints that may be
936	  provided by the applications.
937
938	config TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ADVISE
939		bool "advise"
940	help
941	  Enable hugepage allocation for the tmpfs mount exclusively when
942	  applications supply the madvise(MADV_HUGEPAGE) hint.
943	  This ensures that hugepages are used only in response to explicit
944	  requests from applications.
945endchoice
946
947config THP_SWAP
948	def_bool y
949	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
950	help
951	  Swap transparent huge pages in one piece, without splitting.
952	  XXX: For now, swap cluster backing transparent huge page
953	  will be split after swapout.
954
955	  For selection by architectures with reasonable THP sizes.
956
957config READ_ONLY_THP_FOR_FS
958	bool "Read-only THP for filesystems (EXPERIMENTAL)"
959	depends on TRANSPARENT_HUGEPAGE
960
961	help
962	  Allow khugepaged to put read-only file-backed pages in THP.
963
964	  This is marked experimental because it is a new feature. Write
965	  support of file THPs will be developed in the next few release
966	  cycles.
967
968config NO_PAGE_MAPCOUNT
969	bool "No per-page mapcount (EXPERIMENTAL)"
970	help
971	  Do not maintain per-page mapcounts for pages part of larger
972	  allocations, such as transparent huge pages.
973
974	  When this config option is enabled, some interfaces that relied on
975	  this information will rely on less-precise per-allocation information
976	  instead: for example, using the average per-page mapcount in such
977	  a large allocation instead of the per-page mapcount.
978
979	  EXPERIMENTAL because the impact of some changes is still unclear.
980
981endif # TRANSPARENT_HUGEPAGE
982
983# simple helper to make the code a bit easier to read
984config PAGE_MAPCOUNT
985	def_bool !NO_PAGE_MAPCOUNT
986
987#
988# The architecture supports pgtable leaves that is larger than PAGE_SIZE
989#
990config PGTABLE_HAS_HUGE_LEAVES
991	def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
992
993#
994# We can end up creating gigantic folio.
995#
996config HAVE_GIGANTIC_FOLIOS
997	def_bool (HUGETLB_PAGE && ARCH_HAS_GIGANTIC_PAGE) || \
998		 (ZONE_DEVICE && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
999
1000config ASYNC_KERNEL_PGTABLE_FREE
1001	def_bool n
1002
1003# TODO: Allow to be enabled without THP
1004config ARCH_SUPPORTS_HUGE_PFNMAP
1005	def_bool n
1006	depends on TRANSPARENT_HUGEPAGE
1007
1008config ARCH_SUPPORTS_PMD_PFNMAP
1009	def_bool y
1010	depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE
1011
1012config ARCH_SUPPORTS_PUD_PFNMAP
1013	def_bool y
1014	depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1015
1016#
1017# Architectures that always use weak definitions for percpu
1018# variables in modules should set this.
1019#
1020config ARCH_MODULE_NEEDS_WEAK_PER_CPU
1021       bool
1022
1023#
1024# UP and nommu archs use km based percpu allocator
1025#
1026config NEED_PER_CPU_KM
1027	depends on !SMP || !MMU
1028	bool
1029	default y
1030
1031config NEED_PER_CPU_EMBED_FIRST_CHUNK
1032	bool
1033
1034config NEED_PER_CPU_PAGE_FIRST_CHUNK
1035	bool
1036
1037config USE_PERCPU_NUMA_NODE_ID
1038	bool
1039
1040config HAVE_SETUP_PER_CPU_AREA
1041	bool
1042
1043config CMA
1044	bool "Contiguous Memory Allocator"
1045	depends on MMU
1046	select MIGRATION
1047	select MEMORY_ISOLATION
1048	help
1049	  This enables the Contiguous Memory Allocator which allows other
1050	  subsystems to allocate big physically-contiguous blocks of memory.
1051	  CMA reserves a region of memory and allows only movable pages to
1052	  be allocated from it. This way, the kernel can use the memory for
1053	  pagecache and when a subsystem requests for contiguous area, the
1054	  allocated pages are migrated away to serve the contiguous request.
1055
1056	  If unsure, say "n".
1057
1058config CMA_DEBUGFS
1059	bool "CMA debugfs interface"
1060	depends on CMA && DEBUG_FS
1061	help
1062	  Turns on the DebugFS interface for CMA.
1063
1064config CMA_SYSFS
1065	bool "CMA information through sysfs interface"
1066	depends on CMA && SYSFS
1067	help
1068	  This option exposes some sysfs attributes to get information
1069	  from CMA.
1070
1071config CMA_AREAS
1072	int "Maximum count of the CMA areas"
1073	depends on CMA
1074	default 20 if NUMA
1075	default 8
1076	help
1077	  CMA allows to create CMA areas for particular purpose, mainly,
1078	  used as device private area. This parameter sets the maximum
1079	  number of CMA area in the system.
1080
1081	  If unsure, leave the default value "8" in UMA and "20" in NUMA.
1082
1083#
1084# Select this config option from the architecture Kconfig, if available, to set
1085# the max page order for physically contiguous allocations.
1086#
1087config ARCH_FORCE_MAX_ORDER
1088	int
1089
1090#
1091# When ARCH_FORCE_MAX_ORDER is not defined,
1092# the default page block order is MAX_PAGE_ORDER (10) as per
1093# include/linux/mmzone.h.
1094#
1095config PAGE_BLOCK_MAX_ORDER
1096	int "Page Block Order Upper Limit"
1097	range 1 10 if ARCH_FORCE_MAX_ORDER = 0
1098	default 10 if ARCH_FORCE_MAX_ORDER = 0
1099	range 1 ARCH_FORCE_MAX_ORDER if ARCH_FORCE_MAX_ORDER != 0
1100	default ARCH_FORCE_MAX_ORDER if ARCH_FORCE_MAX_ORDER != 0
1101	help
1102	  The page block order refers to the power of two number of pages that
1103	  are physically contiguous and can have a migrate type associated to
1104	  them. The maximum size of the page block order is at least limited by
1105	  ARCH_FORCE_MAX_ORDER/MAX_PAGE_ORDER.
1106
1107	  This config adds a new upper limit of default page block
1108	  order when the page block order is required to be smaller than
1109	  ARCH_FORCE_MAX_ORDER/MAX_PAGE_ORDER or other limits
1110	  (see include/linux/pageblock-flags.h for details).
1111
1112	  Reducing pageblock order can negatively impact THP generation
1113	  success rate. If your workloads use THP heavily, please use this
1114	  option with caution.
1115
1116	  Don't change if unsure.
1117
1118config MEM_SOFT_DIRTY
1119	bool "Track memory changes"
1120	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
1121	select PROC_PAGE_MONITOR
1122	help
1123	  This option enables memory changes tracking by introducing a
1124	  soft-dirty bit on pte-s. This bit it set when someone writes
1125	  into a page just as regular dirty bit, but unlike the latter
1126	  it can be cleared by hands.
1127
1128	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
1129
1130config GENERIC_EARLY_IOREMAP
1131	bool
1132
1133config STACK_MAX_DEFAULT_SIZE_MB
1134	int "Default maximum user stack size for 32-bit processes (MB)"
1135	default 100
1136	range 8 2048
1137	depends on STACK_GROWSUP && (!64BIT || COMPAT)
1138	help
1139	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
1140	  user processes when the stack grows upwards (currently only on parisc
1141	  arch) when the RLIMIT_STACK hard limit is unlimited.
1142
1143	  A sane initial value is 100 MB.
1144
1145config DEFERRED_STRUCT_PAGE_INIT
1146	bool "Defer initialisation of struct pages to kthreads"
1147	depends on SPARSEMEM
1148	depends on !NEED_PER_CPU_KM
1149	depends on 64BIT
1150	depends on !KMSAN
1151	select PADATA
1152	help
1153	  Ordinarily all struct pages are initialised during early boot in a
1154	  single thread. On very large machines this can take a considerable
1155	  amount of time. If this option is set, large machines will bring up
1156	  a subset of memmap at boot and then initialise the rest in parallel.
1157	  This has a potential performance impact on tasks running early in the
1158	  lifetime of the system until these kthreads finish the
1159	  initialisation.
1160
1161config PAGE_IDLE_FLAG
1162	bool
1163	select PAGE_EXTENSION if !64BIT
1164	help
1165	  This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
1166	  bit writers can set the state of the bit in the flags so that PTE
1167	  Accessed bit readers may avoid disturbance.
1168
1169config IDLE_PAGE_TRACKING
1170	bool "Enable idle page tracking"
1171	depends on SYSFS && MMU
1172	select PAGE_IDLE_FLAG
1173	help
1174	  This feature allows to estimate the amount of user pages that have
1175	  not been touched during a given period of time. This information can
1176	  be useful to tune memory cgroup limits and/or for job placement
1177	  within a compute cluster.
1178
1179	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
1180	  more details.
1181
1182# Architectures which implement cpu_dcache_is_aliasing() to query
1183# whether the data caches are aliased (VIVT or VIPT with dcache
1184# aliasing) need to select this.
1185config ARCH_HAS_CPU_CACHE_ALIASING
1186	bool
1187
1188config ARCH_HAS_CACHE_LINE_SIZE
1189	bool
1190
1191config ARCH_HAS_CURRENT_STACK_POINTER
1192	bool
1193	help
1194	  In support of HARDENED_USERCOPY performing stack variable lifetime
1195	  checking, an architecture-agnostic way to find the stack pointer
1196	  is needed. Once an architecture defines an unsigned long global
1197	  register alias named "current_stack_pointer", this config can be
1198	  selected.
1199
1200config ARCH_HAS_ZONE_DMA_SET
1201	bool
1202
1203config ZONE_DMA
1204	bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
1205	default y if ARM64 || X86
1206
1207config ZONE_DMA32
1208	bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
1209	depends on !X86_32
1210	default y if ARM64
1211
1212config ZONE_DEVICE
1213	bool "Device memory (pmem, HMM, etc...) hotplug support"
1214	depends on MEMORY_HOTPLUG
1215	depends on MEMORY_HOTREMOVE
1216	depends on SPARSEMEM_VMEMMAP
1217	select XARRAY_MULTI
1218
1219	help
1220	  Device memory hotplug support allows for establishing pmem,
1221	  or other device driver discovered memory regions, in the
1222	  memmap. This allows pfn_to_page() lookups of otherwise
1223	  "device-physical" addresses which is needed for using a DAX
1224	  mapping in an O_DIRECT operation, among other things.
1225
1226	  If FS_DAX is enabled, then say Y.
1227
1228#
1229# Helpers to mirror range of the CPU page tables of a process into device page
1230# tables.
1231#
1232config HMM_MIRROR
1233	bool
1234	depends on MMU
1235
1236config GET_FREE_REGION
1237	bool
1238
1239config DEVICE_PRIVATE
1240	bool "Unaddressable device memory (GPU memory, ...)"
1241	depends on ZONE_DEVICE
1242	select GET_FREE_REGION
1243
1244	help
1245	  Allows creation of struct pages to represent unaddressable device
1246	  memory; i.e., memory that is only accessible from the device (or
1247	  group of devices). You likely also want to select HMM_MIRROR.
1248
1249config VMAP_PFN
1250	bool
1251
1252config ARCH_USES_HIGH_VMA_FLAGS
1253	bool
1254config ARCH_HAS_PKEYS
1255	bool
1256
1257config ARCH_USES_PG_ARCH_2
1258	bool
1259config ARCH_USES_PG_ARCH_3
1260	bool
1261
1262config VM_EVENT_COUNTERS
1263	default y
1264	bool "Enable VM event counters for /proc/vmstat" if EXPERT
1265	help
1266	  VM event counters are needed for event counts to be shown.
1267	  This option allows the disabling of the VM event counters
1268	  on EXPERT systems.  /proc/vmstat will only show page counts
1269	  if VM event counters are disabled.
1270
1271config PERCPU_STATS
1272	bool "Collect percpu memory statistics"
1273	help
1274	  This feature collects and exposes statistics via debugfs. The
1275	  information includes global and per chunk statistics, which can
1276	  be used to help understand percpu memory usage.
1277
1278config GUP_TEST
1279	bool "Enable infrastructure for get_user_pages()-related unit tests"
1280	depends on DEBUG_FS
1281	help
1282	  Provides /sys/kernel/debug/gup_test, which in turn provides a way
1283	  to make ioctl calls that can launch kernel-based unit tests for
1284	  the get_user_pages*() and pin_user_pages*() family of API calls.
1285
1286	  These tests include benchmark testing of the _fast variants of
1287	  get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1288	  the non-_fast variants.
1289
1290	  There is also a sub-test that allows running dump_page() on any
1291	  of up to eight pages (selected by command line args) within the
1292	  range of user-space addresses. These pages are either pinned via
1293	  pin_user_pages*(), or pinned via get_user_pages*(), as specified
1294	  by other command line arguments.
1295
1296	  See tools/testing/selftests/mm/gup_test.c
1297
1298comment "GUP_TEST needs to have DEBUG_FS enabled"
1299	depends on !GUP_TEST && !DEBUG_FS
1300
1301config GUP_GET_PXX_LOW_HIGH
1302	bool
1303
1304config DMAPOOL_TEST
1305	tristate "Enable a module to run time tests on dma_pool"
1306	depends on HAS_DMA
1307	help
1308	  Provides a test module that will allocate and free many blocks of
1309	  various sizes and report how long it takes. This is intended to
1310	  provide a consistent way to measure how changes to the
1311	  dma_pool_alloc/free routines affect performance.
1312
1313config ARCH_HAS_PTE_SPECIAL
1314	bool
1315
1316config MAPPING_DIRTY_HELPERS
1317        bool
1318
1319config KMAP_LOCAL
1320	bool
1321
1322config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1323	bool
1324
1325config MEMFD_CREATE
1326	bool "Enable memfd_create() system call" if EXPERT
1327
1328config SECRETMEM
1329	default y
1330	bool "Enable memfd_secret() system call" if EXPERT
1331	depends on ARCH_HAS_SET_DIRECT_MAP
1332	help
1333	  Enable the memfd_secret() system call with the ability to create
1334	  memory areas visible only in the context of the owning process and
1335	  not mapped to other processes and other kernel page tables.
1336
1337config ANON_VMA_NAME
1338	bool "Anonymous VMA name support"
1339	depends on PROC_FS && ADVISE_SYSCALLS && MMU
1340
1341	help
1342	  Allow naming anonymous virtual memory areas.
1343
1344	  This feature allows assigning names to virtual memory areas. Assigned
1345	  names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1346	  and help identifying individual anonymous memory areas.
1347	  Assigning a name to anonymous virtual memory area might prevent that
1348	  area from being merged with adjacent virtual memory areas due to the
1349	  difference in their name.
1350
1351config HAVE_ARCH_USERFAULTFD_WP
1352	bool
1353	help
1354	  Arch has userfaultfd write protection support
1355
1356config HAVE_ARCH_USERFAULTFD_MINOR
1357	bool
1358	help
1359	  Arch has userfaultfd minor fault support
1360
1361menuconfig USERFAULTFD
1362	bool "Enable userfaultfd() system call"
1363	depends on MMU
1364	help
1365	  Enable the userfaultfd() system call that allows to intercept and
1366	  handle page faults in userland.
1367
1368if USERFAULTFD
1369config PTE_MARKER_UFFD_WP
1370	bool "Userfaultfd write protection support for shmem/hugetlbfs"
1371	default y
1372	depends on HAVE_ARCH_USERFAULTFD_WP
1373
1374	help
1375	  Allows to create marker PTEs for userfaultfd write protection
1376	  purposes.  It is required to enable userfaultfd write protection on
1377	  file-backed memory types like shmem and hugetlbfs.
1378endif # USERFAULTFD
1379
1380# multi-gen LRU {
1381config LRU_GEN
1382	bool "Multi-Gen LRU"
1383	depends on MMU
1384	# make sure folio->flags has enough spare bits
1385	depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1386	help
1387	  A high performance LRU implementation to overcommit memory. See
1388	  Documentation/admin-guide/mm/multigen_lru.rst for details.
1389
1390config LRU_GEN_ENABLED
1391	bool "Enable by default"
1392	depends on LRU_GEN
1393	help
1394	  This option enables the multi-gen LRU by default.
1395
1396config LRU_GEN_STATS
1397	bool "Full stats for debugging"
1398	depends on LRU_GEN
1399	help
1400	  Do not enable this option unless you plan to look at historical stats
1401	  from evicted generations for debugging purpose.
1402
1403	  This option has a per-memcg and per-node memory overhead.
1404
1405config LRU_GEN_WALKS_MMU
1406	def_bool y
1407	depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
1408# }
1409
1410config ARCH_SUPPORTS_PER_VMA_LOCK
1411       def_bool n
1412
1413config PER_VMA_LOCK
1414	def_bool y
1415	depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1416	help
1417	  Allow per-vma locking during page fault handling.
1418
1419	  This feature allows locking each virtual memory area separately when
1420	  handling page faults instead of taking mmap_lock.
1421
1422config LOCK_MM_AND_FIND_VMA
1423	bool
1424	depends on !STACK_GROWSUP
1425
1426config IOMMU_MM_DATA
1427	bool
1428
1429config EXECMEM
1430	bool
1431
1432config NUMA_MEMBLKS
1433	bool
1434
1435config NUMA_EMU
1436	bool "NUMA emulation"
1437	depends on NUMA_MEMBLKS
1438	depends on X86 || GENERIC_ARCH_NUMA
1439	help
1440	  Enable NUMA emulation. A flat machine will be split
1441	  into virtual nodes when booted with "numa=fake=N", where N is the
1442	  number of nodes. This is only useful for debugging.
1443
1444config ARCH_HAS_USER_SHADOW_STACK
1445	bool
1446	help
1447	  The architecture has hardware support for userspace shadow call
1448          stacks (eg, x86 CET, arm64 GCS or RISC-V Zicfiss).
1449
1450config ARCH_SUPPORTS_PT_RECLAIM
1451	def_bool n
1452
1453config PT_RECLAIM
1454	bool "reclaim empty user page table pages"
1455	default y
1456	depends on ARCH_SUPPORTS_PT_RECLAIM && MMU && SMP
1457	select MMU_GATHER_RCU_TABLE_FREE
1458	help
1459	  Try to reclaim empty user page table pages in paths other than munmap
1460	  and exit_mmap path.
1461
1462	  Note: now only empty user PTE page table pages will be reclaimed.
1463
1464config FIND_NORMAL_PAGE
1465	def_bool n
1466
1467source "mm/damon/Kconfig"
1468
1469endmenu
1470