1# SPDX-License-Identifier: GPL-2.0-only 2 3menu "Memory Management options" 4 5config SELECT_MEMORY_MODEL 6 def_bool y 7 depends on ARCH_SELECT_MEMORY_MODEL 8 9choice 10 prompt "Memory model" 11 depends on SELECT_MEMORY_MODEL 12 default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT 13 default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT 14 default FLATMEM_MANUAL 15 help 16 This option allows you to change some of the ways that 17 Linux manages its memory internally. Most users will 18 only have one option here selected by the architecture 19 configuration. This is normal. 20 21config FLATMEM_MANUAL 22 bool "Flat Memory" 23 depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE 24 help 25 This option is best suited for non-NUMA systems with 26 flat address space. The FLATMEM is the most efficient 27 system in terms of performance and resource consumption 28 and it is the best option for smaller systems. 29 30 For systems that have holes in their physical address 31 spaces and for features like NUMA and memory hotplug, 32 choose "Sparse Memory" 33 34 If unsure, choose this option (Flat Memory) over any other. 35 36config DISCONTIGMEM_MANUAL 37 bool "Discontiguous Memory" 38 depends on ARCH_DISCONTIGMEM_ENABLE 39 help 40 This option provides enhanced support for discontiguous 41 memory systems, over FLATMEM. These systems have holes 42 in their physical address spaces, and this option provides 43 more efficient handling of these holes. 44 45 Although "Discontiguous Memory" is still used by several 46 architectures, it is considered deprecated in favor of 47 "Sparse Memory". 48 49 If unsure, choose "Sparse Memory" over this option. 50 51config SPARSEMEM_MANUAL 52 bool "Sparse Memory" 53 depends on ARCH_SPARSEMEM_ENABLE 54 help 55 This will be the only option for some systems, including 56 memory hot-plug systems. This is normal. 57 58 This option provides efficient support for systems with 59 holes is their physical address space and allows memory 60 hot-plug and hot-remove. 61 62 If unsure, choose "Flat Memory" over this option. 63 64endchoice 65 66config DISCONTIGMEM 67 def_bool y 68 depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL 69 70config SPARSEMEM 71 def_bool y 72 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL 73 74config FLATMEM 75 def_bool y 76 depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL 77 78config FLAT_NODE_MEM_MAP 79 def_bool y 80 depends on !SPARSEMEM 81 82# 83# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's 84# to represent different areas of memory. This variable allows 85# those dependencies to exist individually. 86# 87config NEED_MULTIPLE_NODES 88 def_bool y 89 depends on DISCONTIGMEM || NUMA 90 91config HAVE_MEMORY_PRESENT 92 def_bool y 93 depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM 94 95# 96# SPARSEMEM_EXTREME (which is the default) does some bootmem 97# allocations when memory_present() is called. If this cannot 98# be done on your architecture, select this option. However, 99# statically allocating the mem_section[] array can potentially 100# consume vast quantities of .bss, so be careful. 101# 102# This option will also potentially produce smaller runtime code 103# with gcc 3.4 and later. 104# 105config SPARSEMEM_STATIC 106 bool 107 108# 109# Architecture platforms which require a two level mem_section in SPARSEMEM 110# must select this option. This is usually for architecture platforms with 111# an extremely sparse physical address space. 112# 113config SPARSEMEM_EXTREME 114 def_bool y 115 depends on SPARSEMEM && !SPARSEMEM_STATIC 116 117config SPARSEMEM_VMEMMAP_ENABLE 118 bool 119 120config SPARSEMEM_VMEMMAP 121 bool "Sparse Memory virtual memmap" 122 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE 123 default y 124 help 125 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise 126 pfn_to_page and page_to_pfn operations. This is the most 127 efficient option when sufficient kernel resources are available. 128 129config HAVE_MEMBLOCK_NODE_MAP 130 bool 131 132config HAVE_MEMBLOCK_PHYS_MAP 133 bool 134 135config HAVE_GENERIC_GUP 136 bool 137 138config ARCH_KEEP_MEMBLOCK 139 bool 140 141config MEMORY_ISOLATION 142 bool 143 144# 145# Only be set on architectures that have completely implemented memory hotplug 146# feature. If you are not sure, don't touch it. 147# 148config HAVE_BOOTMEM_INFO_NODE 149 def_bool n 150 151# eventually, we can have this option just 'select SPARSEMEM' 152config MEMORY_HOTPLUG 153 bool "Allow for memory hot-add" 154 depends on SPARSEMEM || X86_64_ACPI_NUMA 155 depends on ARCH_ENABLE_MEMORY_HOTPLUG 156 157config MEMORY_HOTPLUG_SPARSE 158 def_bool y 159 depends on SPARSEMEM && MEMORY_HOTPLUG 160 161config MEMORY_HOTPLUG_DEFAULT_ONLINE 162 bool "Online the newly added memory blocks by default" 163 depends on MEMORY_HOTPLUG 164 help 165 This option sets the default policy setting for memory hotplug 166 onlining policy (/sys/devices/system/memory/auto_online_blocks) which 167 determines what happens to newly added memory regions. Policy setting 168 can always be changed at runtime. 169 See Documentation/memory-hotplug.txt for more information. 170 171 Say Y here if you want all hot-plugged memory blocks to appear in 172 'online' state by default. 173 Say N here if you want the default policy to keep all hot-plugged 174 memory blocks in 'offline' state. 175 176config MEMORY_HOTREMOVE 177 bool "Allow for memory hot remove" 178 select MEMORY_ISOLATION 179 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) 180 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 181 depends on MIGRATION 182 183# Heavily threaded applications may benefit from splitting the mm-wide 184# page_table_lock, so that faults on different parts of the user address 185# space can be handled with less contention: split it at this NR_CPUS. 186# Default to 4 for wider testing, though 8 might be more appropriate. 187# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 188# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 189# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. 190# 191config SPLIT_PTLOCK_CPUS 192 int 193 default "999999" if !MMU 194 default "999999" if ARM && !CPU_CACHE_VIPT 195 default "999999" if PARISC && !PA20 196 default "4" 197 198config ARCH_ENABLE_SPLIT_PMD_PTLOCK 199 bool 200 201# 202# support for memory balloon 203config MEMORY_BALLOON 204 bool 205 206# 207# support for memory balloon compaction 208config BALLOON_COMPACTION 209 bool "Allow for balloon memory compaction/migration" 210 def_bool y 211 depends on COMPACTION && MEMORY_BALLOON 212 help 213 Memory fragmentation introduced by ballooning might reduce 214 significantly the number of 2MB contiguous memory blocks that can be 215 used within a guest, thus imposing performance penalties associated 216 with the reduced number of transparent huge pages that could be used 217 by the guest workload. Allowing the compaction & migration for memory 218 pages enlisted as being part of memory balloon devices avoids the 219 scenario aforementioned and helps improving memory defragmentation. 220 221# 222# support for memory compaction 223config COMPACTION 224 bool "Allow for memory compaction" 225 def_bool y 226 select MIGRATION 227 depends on MMU 228 help 229 Compaction is the only memory management component to form 230 high order (larger physically contiguous) memory blocks 231 reliably. The page allocator relies on compaction heavily and 232 the lack of the feature can lead to unexpected OOM killer 233 invocations for high order memory requests. You shouldn't 234 disable this option unless there really is a strong reason for 235 it and then we would be really interested to hear about that at 236 linux-mm@kvack.org. 237 238# 239# support for page migration 240# 241config MIGRATION 242 bool "Page migration" 243 def_bool y 244 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU 245 help 246 Allows the migration of the physical location of pages of processes 247 while the virtual addresses are not changed. This is useful in 248 two situations. The first is on NUMA systems to put pages nearer 249 to the processors accessing. The second is when allocating huge 250 pages as migration can relocate pages to satisfy a huge page 251 allocation instead of reclaiming. 252 253config ARCH_ENABLE_HUGEPAGE_MIGRATION 254 bool 255 256config ARCH_ENABLE_THP_MIGRATION 257 bool 258 259config CONTIG_ALLOC 260 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA 261 262config PHYS_ADDR_T_64BIT 263 def_bool 64BIT 264 265config BOUNCE 266 bool "Enable bounce buffers" 267 default y 268 depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM) 269 help 270 Enable bounce buffers for devices that cannot access 271 the full range of memory available to the CPU. Enabled 272 by default when ZONE_DMA or HIGHMEM is selected, but you 273 may say n to override this. 274 275config NR_QUICK 276 int 277 depends on QUICKLIST 278 default "1" 279 280config VIRT_TO_BUS 281 bool 282 help 283 An architecture should select this if it implements the 284 deprecated interface virt_to_bus(). All new architectures 285 should probably not select this. 286 287 288config MMU_NOTIFIER 289 bool 290 select SRCU 291 292config KSM 293 bool "Enable KSM for page merging" 294 depends on MMU 295 select XXHASH 296 help 297 Enable Kernel Samepage Merging: KSM periodically scans those areas 298 of an application's address space that an app has advised may be 299 mergeable. When it finds pages of identical content, it replaces 300 the many instances by a single page with that content, so 301 saving memory until one or another app needs to modify the content. 302 Recommended for use with KVM, or with other duplicative applications. 303 See Documentation/vm/ksm.rst for more information: KSM is inactive 304 until a program has madvised that an area is MADV_MERGEABLE, and 305 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). 306 307config DEFAULT_MMAP_MIN_ADDR 308 int "Low address space to protect from user allocation" 309 depends on MMU 310 default 4096 311 help 312 This is the portion of low virtual memory which should be protected 313 from userspace allocation. Keeping a user from writing to low pages 314 can help reduce the impact of kernel NULL pointer bugs. 315 316 For most ia64, ppc64 and x86 users with lots of address space 317 a value of 65536 is reasonable and should cause no problems. 318 On arm and other archs it should not be higher than 32768. 319 Programs which use vm86 functionality or have some need to map 320 this low address space will need CAP_SYS_RAWIO or disable this 321 protection by setting the value to 0. 322 323 This value can be changed after boot using the 324 /proc/sys/vm/mmap_min_addr tunable. 325 326config ARCH_SUPPORTS_MEMORY_FAILURE 327 bool 328 329config MEMORY_FAILURE 330 depends on MMU 331 depends on ARCH_SUPPORTS_MEMORY_FAILURE 332 bool "Enable recovery from hardware memory errors" 333 select MEMORY_ISOLATION 334 select RAS 335 help 336 Enables code to recover from some memory failures on systems 337 with MCA recovery. This allows a system to continue running 338 even when some of its memory has uncorrected errors. This requires 339 special hardware support and typically ECC memory. 340 341config HWPOISON_INJECT 342 tristate "HWPoison pages injector" 343 depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS 344 select PROC_PAGE_MONITOR 345 346config NOMMU_INITIAL_TRIM_EXCESS 347 int "Turn on mmap() excess space trimming before booting" 348 depends on !MMU 349 default 1 350 help 351 The NOMMU mmap() frequently needs to allocate large contiguous chunks 352 of memory on which to store mappings, but it can only ask the system 353 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently 354 more than it requires. To deal with this, mmap() is able to trim off 355 the excess and return it to the allocator. 356 357 If trimming is enabled, the excess is trimmed off and returned to the 358 system allocator, which can cause extra fragmentation, particularly 359 if there are a lot of transient processes. 360 361 If trimming is disabled, the excess is kept, but not used, which for 362 long-term mappings means that the space is wasted. 363 364 Trimming can be dynamically controlled through a sysctl option 365 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of 366 excess pages there must be before trimming should occur, or zero if 367 no trimming is to occur. 368 369 This option specifies the initial value of this option. The default 370 of 1 says that all excess pages should be trimmed. 371 372 See Documentation/nommu-mmap.txt for more information. 373 374config TRANSPARENT_HUGEPAGE 375 bool "Transparent Hugepage Support" 376 depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE 377 select COMPACTION 378 select XARRAY_MULTI 379 help 380 Transparent Hugepages allows the kernel to use huge pages and 381 huge tlb transparently to the applications whenever possible. 382 This feature can improve computing performance to certain 383 applications by speeding up page faults during memory 384 allocation, by reducing the number of tlb misses and by speeding 385 up the pagetable walking. 386 387 If memory constrained on embedded, you may want to say N. 388 389choice 390 prompt "Transparent Hugepage Support sysfs defaults" 391 depends on TRANSPARENT_HUGEPAGE 392 default TRANSPARENT_HUGEPAGE_ALWAYS 393 help 394 Selects the sysfs defaults for Transparent Hugepage Support. 395 396 config TRANSPARENT_HUGEPAGE_ALWAYS 397 bool "always" 398 help 399 Enabling Transparent Hugepage always, can increase the 400 memory footprint of applications without a guaranteed 401 benefit but it will work automatically for all applications. 402 403 config TRANSPARENT_HUGEPAGE_MADVISE 404 bool "madvise" 405 help 406 Enabling Transparent Hugepage madvise, will only provide a 407 performance improvement benefit to the applications using 408 madvise(MADV_HUGEPAGE) but it won't risk to increase the 409 memory footprint of applications without a guaranteed 410 benefit. 411endchoice 412 413config ARCH_WANTS_THP_SWAP 414 def_bool n 415 416config THP_SWAP 417 def_bool y 418 depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP 419 help 420 Swap transparent huge pages in one piece, without splitting. 421 XXX: For now, swap cluster backing transparent huge page 422 will be split after swapout. 423 424 For selection by architectures with reasonable THP sizes. 425 426config TRANSPARENT_HUGE_PAGECACHE 427 def_bool y 428 depends on TRANSPARENT_HUGEPAGE 429 430# 431# UP and nommu archs use km based percpu allocator 432# 433config NEED_PER_CPU_KM 434 depends on !SMP 435 bool 436 default y 437 438config CLEANCACHE 439 bool "Enable cleancache driver to cache clean pages if tmem is present" 440 help 441 Cleancache can be thought of as a page-granularity victim cache 442 for clean pages that the kernel's pageframe replacement algorithm 443 (PFRA) would like to keep around, but can't since there isn't enough 444 memory. So when the PFRA "evicts" a page, it first attempts to use 445 cleancache code to put the data contained in that page into 446 "transcendent memory", memory that is not directly accessible or 447 addressable by the kernel and is of unknown and possibly 448 time-varying size. And when a cleancache-enabled 449 filesystem wishes to access a page in a file on disk, it first 450 checks cleancache to see if it already contains it; if it does, 451 the page is copied into the kernel and a disk access is avoided. 452 When a transcendent memory driver is available (such as zcache or 453 Xen transcendent memory), a significant I/O reduction 454 may be achieved. When none is available, all cleancache calls 455 are reduced to a single pointer-compare-against-NULL resulting 456 in a negligible performance hit. 457 458 If unsure, say Y to enable cleancache 459 460config FRONTSWAP 461 bool "Enable frontswap to cache swap pages if tmem is present" 462 depends on SWAP 463 help 464 Frontswap is so named because it can be thought of as the opposite 465 of a "backing" store for a swap device. The data is stored into 466 "transcendent memory", memory that is not directly accessible or 467 addressable by the kernel and is of unknown and possibly 468 time-varying size. When space in transcendent memory is available, 469 a significant swap I/O reduction may be achieved. When none is 470 available, all frontswap calls are reduced to a single pointer- 471 compare-against-NULL resulting in a negligible performance hit 472 and swap data is stored as normal on the matching swap device. 473 474 If unsure, say Y to enable frontswap. 475 476config CMA 477 bool "Contiguous Memory Allocator" 478 depends on MMU 479 select MIGRATION 480 select MEMORY_ISOLATION 481 help 482 This enables the Contiguous Memory Allocator which allows other 483 subsystems to allocate big physically-contiguous blocks of memory. 484 CMA reserves a region of memory and allows only movable pages to 485 be allocated from it. This way, the kernel can use the memory for 486 pagecache and when a subsystem requests for contiguous area, the 487 allocated pages are migrated away to serve the contiguous request. 488 489 If unsure, say "n". 490 491config CMA_DEBUG 492 bool "CMA debug messages (DEVELOPMENT)" 493 depends on DEBUG_KERNEL && CMA 494 help 495 Turns on debug messages in CMA. This produces KERN_DEBUG 496 messages for every CMA call as well as various messages while 497 processing calls such as dma_alloc_from_contiguous(). 498 This option does not affect warning and error messages. 499 500config CMA_DEBUGFS 501 bool "CMA debugfs interface" 502 depends on CMA && DEBUG_FS 503 help 504 Turns on the DebugFS interface for CMA. 505 506config CMA_AREAS 507 int "Maximum count of the CMA areas" 508 depends on CMA 509 default 7 510 help 511 CMA allows to create CMA areas for particular purpose, mainly, 512 used as device private area. This parameter sets the maximum 513 number of CMA area in the system. 514 515 If unsure, leave the default value "7". 516 517config MEM_SOFT_DIRTY 518 bool "Track memory changes" 519 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS 520 select PROC_PAGE_MONITOR 521 help 522 This option enables memory changes tracking by introducing a 523 soft-dirty bit on pte-s. This bit it set when someone writes 524 into a page just as regular dirty bit, but unlike the latter 525 it can be cleared by hands. 526 527 See Documentation/admin-guide/mm/soft-dirty.rst for more details. 528 529config ZSWAP 530 bool "Compressed cache for swap pages (EXPERIMENTAL)" 531 depends on FRONTSWAP && CRYPTO=y 532 select CRYPTO_LZO 533 select ZPOOL 534 help 535 A lightweight compressed cache for swap pages. It takes 536 pages that are in the process of being swapped out and attempts to 537 compress them into a dynamically allocated RAM-based memory pool. 538 This can result in a significant I/O reduction on swap device and, 539 in the case where decompressing from RAM is faster that swap device 540 reads, can also improve workload performance. 541 542 This is marked experimental because it is a new feature (as of 543 v3.11) that interacts heavily with memory reclaim. While these 544 interactions don't cause any known issues on simple memory setups, 545 they have not be fully explored on the large set of potential 546 configurations and workloads that exist. 547 548config ZPOOL 549 tristate "Common API for compressed memory storage" 550 help 551 Compressed memory storage API. This allows using either zbud or 552 zsmalloc. 553 554config ZBUD 555 tristate "Low (Up to 2x) density storage for compressed pages" 556 help 557 A special purpose allocator for storing compressed pages. 558 It is designed to store up to two compressed pages per physical 559 page. While this design limits storage density, it has simple and 560 deterministic reclaim properties that make it preferable to a higher 561 density approach when reclaim will be used. 562 563config Z3FOLD 564 tristate "Up to 3x density storage for compressed pages" 565 depends on ZPOOL 566 help 567 A special purpose allocator for storing compressed pages. 568 It is designed to store up to three compressed pages per physical 569 page. It is a ZBUD derivative so the simplicity and determinism are 570 still there. 571 572config ZSMALLOC 573 tristate "Memory allocator for compressed pages" 574 depends on MMU 575 help 576 zsmalloc is a slab-based memory allocator designed to store 577 compressed RAM pages. zsmalloc uses virtual memory mapping 578 in order to reduce fragmentation. However, this results in a 579 non-standard allocator interface where a handle, not a pointer, is 580 returned by an alloc(). This handle must be mapped in order to 581 access the allocated space. 582 583config PGTABLE_MAPPING 584 bool "Use page table mapping to access object in zsmalloc" 585 depends on ZSMALLOC 586 help 587 By default, zsmalloc uses a copy-based object mapping method to 588 access allocations that span two pages. However, if a particular 589 architecture (ex, ARM) performs VM mapping faster than copying, 590 then you should select this. This causes zsmalloc to use page table 591 mapping rather than copying for object mapping. 592 593 You can check speed with zsmalloc benchmark: 594 https://github.com/spartacus06/zsmapbench 595 596config ZSMALLOC_STAT 597 bool "Export zsmalloc statistics" 598 depends on ZSMALLOC 599 select DEBUG_FS 600 help 601 This option enables code in the zsmalloc to collect various 602 statistics about whats happening in zsmalloc and exports that 603 information to userspace via debugfs. 604 If unsure, say N. 605 606config GENERIC_EARLY_IOREMAP 607 bool 608 609config MAX_STACK_SIZE_MB 610 int "Maximum user stack size for 32-bit processes (MB)" 611 default 80 612 range 8 2048 613 depends on STACK_GROWSUP && (!64BIT || COMPAT) 614 help 615 This is the maximum stack size in Megabytes in the VM layout of 32-bit 616 user processes when the stack grows upwards (currently only on parisc 617 arch). The stack will be located at the highest memory address minus 618 the given value, unless the RLIMIT_STACK hard limit is changed to a 619 smaller value in which case that is used. 620 621 A sane initial value is 80 MB. 622 623config DEFERRED_STRUCT_PAGE_INIT 624 bool "Defer initialisation of struct pages to kthreads" 625 depends on SPARSEMEM 626 depends on !NEED_PER_CPU_KM 627 depends on 64BIT 628 help 629 Ordinarily all struct pages are initialised during early boot in a 630 single thread. On very large machines this can take a considerable 631 amount of time. If this option is set, large machines will bring up 632 a subset of memmap at boot and then initialise the rest in parallel 633 by starting one-off "pgdatinitX" kernel thread for each node X. This 634 has a potential performance impact on processes running early in the 635 lifetime of the system until these kthreads finish the 636 initialisation. 637 638config IDLE_PAGE_TRACKING 639 bool "Enable idle page tracking" 640 depends on SYSFS && MMU 641 select PAGE_EXTENSION if !64BIT 642 help 643 This feature allows to estimate the amount of user pages that have 644 not been touched during a given period of time. This information can 645 be useful to tune memory cgroup limits and/or for job placement 646 within a compute cluster. 647 648 See Documentation/admin-guide/mm/idle_page_tracking.rst for 649 more details. 650 651# arch_add_memory() comprehends device memory 652config ARCH_HAS_ZONE_DEVICE 653 bool 654 655config ZONE_DEVICE 656 bool "Device memory (pmem, HMM, etc...) hotplug support" 657 depends on MEMORY_HOTPLUG 658 depends on MEMORY_HOTREMOVE 659 depends on SPARSEMEM_VMEMMAP 660 depends on ARCH_HAS_ZONE_DEVICE 661 select XARRAY_MULTI 662 663 help 664 Device memory hotplug support allows for establishing pmem, 665 or other device driver discovered memory regions, in the 666 memmap. This allows pfn_to_page() lookups of otherwise 667 "device-physical" addresses which is needed for using a DAX 668 mapping in an O_DIRECT operation, among other things. 669 670 If FS_DAX is enabled, then say Y. 671 672config ARCH_HAS_HMM_MIRROR 673 bool 674 default y 675 depends on (X86_64 || PPC64) 676 depends on MMU && 64BIT 677 678config ARCH_HAS_HMM_DEVICE 679 bool 680 default y 681 depends on (X86_64 || PPC64) 682 depends on MEMORY_HOTPLUG 683 depends on MEMORY_HOTREMOVE 684 depends on SPARSEMEM_VMEMMAP 685 depends on ARCH_HAS_ZONE_DEVICE 686 select XARRAY_MULTI 687 688config ARCH_HAS_HMM 689 bool 690 default y 691 depends on (X86_64 || PPC64) 692 depends on ZONE_DEVICE 693 depends on MMU && 64BIT 694 depends on MEMORY_HOTPLUG 695 depends on MEMORY_HOTREMOVE 696 depends on SPARSEMEM_VMEMMAP 697 698config MIGRATE_VMA_HELPER 699 bool 700 701config DEV_PAGEMAP_OPS 702 bool 703 704config HMM 705 bool 706 select MMU_NOTIFIER 707 select MIGRATE_VMA_HELPER 708 709config HMM_MIRROR 710 bool "HMM mirror CPU page table into a device page table" 711 depends on ARCH_HAS_HMM 712 select HMM 713 help 714 Select HMM_MIRROR if you want to mirror range of the CPU page table of a 715 process into a device page table. Here, mirror means "keep synchronized". 716 Prerequisites: the device must provide the ability to write-protect its 717 page tables (at PAGE_SIZE granularity), and must be able to recover from 718 the resulting potential page faults. 719 720config DEVICE_PRIVATE 721 bool "Unaddressable device memory (GPU memory, ...)" 722 depends on ARCH_HAS_HMM 723 select HMM 724 select DEV_PAGEMAP_OPS 725 726 help 727 Allows creation of struct pages to represent unaddressable device 728 memory; i.e., memory that is only accessible from the device (or 729 group of devices). You likely also want to select HMM_MIRROR. 730 731config DEVICE_PUBLIC 732 bool "Addressable device memory (like GPU memory)" 733 depends on ARCH_HAS_HMM 734 select HMM 735 select DEV_PAGEMAP_OPS 736 737 help 738 Allows creation of struct pages to represent addressable device 739 memory; i.e., memory that is accessible from both the device and 740 the CPU 741 742config FRAME_VECTOR 743 bool 744 745config ARCH_USES_HIGH_VMA_FLAGS 746 bool 747config ARCH_HAS_PKEYS 748 bool 749 750config PERCPU_STATS 751 bool "Collect percpu memory statistics" 752 help 753 This feature collects and exposes statistics via debugfs. The 754 information includes global and per chunk statistics, which can 755 be used to help understand percpu memory usage. 756 757config GUP_BENCHMARK 758 bool "Enable infrastructure for get_user_pages_fast() benchmarking" 759 help 760 Provides /sys/kernel/debug/gup_benchmark that helps with testing 761 performance of get_user_pages_fast(). 762 763 See tools/testing/selftests/vm/gup_benchmark.c 764 765config ARCH_HAS_PTE_SPECIAL 766 bool 767 768endmenu 769