1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H 3f8af4da3SHugh Dickins #define __LINUX_KSM_H 4f8af4da3SHugh Dickins /* 5f8af4da3SHugh Dickins * Memory merging support. 6f8af4da3SHugh Dickins * 7f8af4da3SHugh Dickins * This code enables dynamic sharing of identical pages found in different 8f8af4da3SHugh Dickins * memory areas, even if they are not shared by fork(). 9f8af4da3SHugh Dickins */ 10f8af4da3SHugh Dickins 11f8af4da3SHugh Dickins #include <linux/bitops.h> 12f8af4da3SHugh Dickins #include <linux/mm.h> 135ad64688SHugh Dickins #include <linux/pagemap.h> 145ad64688SHugh Dickins #include <linux/rmap.h> 15f8af4da3SHugh Dickins #include <linux/sched.h> 16f7ccbae4SIngo Molnar #include <linux/sched/coredump.h> 17f8af4da3SHugh Dickins 18f8af4da3SHugh Dickins #ifdef CONFIG_KSM 19f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 20f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags); 21d7597f59SStefan Roesch 22d7597f59SStefan Roesch void ksm_add_vma(struct vm_area_struct *vma); 23d7597f59SStefan Roesch int ksm_enable_merge_any(struct mm_struct *mm); 2424139c07SDavid Hildenbrand int ksm_disable_merge_any(struct mm_struct *mm); 252c281f54SDavid Hildenbrand int ksm_disable(struct mm_struct *mm); 26d7597f59SStefan Roesch 27f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm); 281c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm); 2979271476Sxu xin /* 3079271476Sxu xin * To identify zeropages that were mapped by KSM, we reuse the dirty bit 3179271476Sxu xin * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when 3279271476Sxu xin * deduplicating memory. 3379271476Sxu xin */ 3479271476Sxu xin #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) 35f8af4da3SHugh Dickins 36e2942062Sxu xin extern unsigned long ksm_zero_pages; 37e2942062Sxu xin 38*6080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) 39e2942062Sxu xin { 40*6080d19fSxu xin if (is_ksm_zero_pte(pte)) { 41e2942062Sxu xin ksm_zero_pages--; 42*6080d19fSxu xin mm->ksm_zero_pages--; 43*6080d19fSxu xin } 44e2942062Sxu xin } 45e2942062Sxu xin 46f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 47f8af4da3SHugh Dickins { 48d7597f59SStefan Roesch int ret; 49d7597f59SStefan Roesch 50d7597f59SStefan Roesch if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { 51d7597f59SStefan Roesch ret = __ksm_enter(mm); 52d7597f59SStefan Roesch if (ret) 53d7597f59SStefan Roesch return ret; 54d7597f59SStefan Roesch } 55d7597f59SStefan Roesch 56d7597f59SStefan Roesch if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) 57d7597f59SStefan Roesch set_bit(MMF_VM_MERGE_ANY, &mm->flags); 58d7597f59SStefan Roesch 59f8af4da3SHugh Dickins return 0; 60f8af4da3SHugh Dickins } 61f8af4da3SHugh Dickins 621c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm) 63f8af4da3SHugh Dickins { 64f8af4da3SHugh Dickins if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 651c2fb7a4SAndrea Arcangeli __ksm_exit(mm); 66f8af4da3SHugh Dickins } 679a840895SHugh Dickins 685ad64688SHugh Dickins /* 695ad64688SHugh Dickins * When do_swap_page() first faults in from swap what used to be a KSM page, 705ad64688SHugh Dickins * no problem, it will be assigned to this vma's anon_vma; but thereafter, 715ad64688SHugh Dickins * it might be faulted into a different anon_vma (or perhaps to a different 725ad64688SHugh Dickins * offset in the same anon_vma). do_swap_page() cannot do all the locking 735ad64688SHugh Dickins * needed to reconstitute a cross-anon_vma KSM page: for now it has to make 745ad64688SHugh Dickins * a copy, and leave remerging the pages to a later pass of ksmd. 755ad64688SHugh Dickins * 765ad64688SHugh Dickins * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 775ad64688SHugh Dickins * but what if the vma was unmerged while the page was swapped out? 785ad64688SHugh Dickins */ 79cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 80cbf86cfeSHugh Dickins struct vm_area_struct *vma, unsigned long address); 815ad64688SHugh Dickins 826d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); 8319138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); 845ad64688SHugh Dickins 854248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE 864248d008SLonglong Xia void collect_procs_ksm(struct page *page, struct list_head *to_kill, 874248d008SLonglong Xia int force_early); 884248d008SLonglong Xia #endif 89d21077fbSStefan Roesch 90d21077fbSStefan Roesch #ifdef CONFIG_PROC_FS 91d21077fbSStefan Roesch long ksm_process_profit(struct mm_struct *); 92d21077fbSStefan Roesch #endif /* CONFIG_PROC_FS */ 93d21077fbSStefan Roesch 94f8af4da3SHugh Dickins #else /* !CONFIG_KSM */ 95f8af4da3SHugh Dickins 96d7597f59SStefan Roesch static inline void ksm_add_vma(struct vm_area_struct *vma) 97d7597f59SStefan Roesch { 98d7597f59SStefan Roesch } 99d7597f59SStefan Roesch 1002c281f54SDavid Hildenbrand static inline int ksm_disable(struct mm_struct *mm) 1012c281f54SDavid Hildenbrand { 1022c281f54SDavid Hildenbrand return 0; 1032c281f54SDavid Hildenbrand } 1042c281f54SDavid Hildenbrand 105f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 106f8af4da3SHugh Dickins { 107f8af4da3SHugh Dickins return 0; 108f8af4da3SHugh Dickins } 109f8af4da3SHugh Dickins 1101c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm) 111f8af4da3SHugh Dickins { 112f8af4da3SHugh Dickins } 1139a840895SHugh Dickins 114*6080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) 115e2942062Sxu xin { 116e2942062Sxu xin } 117e2942062Sxu xin 1184248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE 1194248d008SLonglong Xia static inline void collect_procs_ksm(struct page *page, 1204248d008SLonglong Xia struct list_head *to_kill, int force_early) 1214248d008SLonglong Xia { 1224248d008SLonglong Xia } 1234248d008SLonglong Xia #endif 1244248d008SLonglong Xia 125f42647acSHugh Dickins #ifdef CONFIG_MMU 126f42647acSHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 127f42647acSHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 128f42647acSHugh Dickins { 129f42647acSHugh Dickins return 0; 130f42647acSHugh Dickins } 131f42647acSHugh Dickins 132cbf86cfeSHugh Dickins static inline struct page *ksm_might_need_to_copy(struct page *page, 1335ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 1345ad64688SHugh Dickins { 135cbf86cfeSHugh Dickins return page; 1365ad64688SHugh Dickins } 1375ad64688SHugh Dickins 1382f031c6fSMatthew Wilcox (Oracle) static inline void rmap_walk_ksm(struct folio *folio, 1396d4675e6SMinchan Kim struct rmap_walk_control *rwc) 140e9995ef9SHugh Dickins { 141e9995ef9SHugh Dickins } 142e9995ef9SHugh Dickins 14319138349SMatthew Wilcox (Oracle) static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) 144e9995ef9SHugh Dickins { 145e9995ef9SHugh Dickins } 146f42647acSHugh Dickins #endif /* CONFIG_MMU */ 147f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */ 148f8af4da3SHugh Dickins 1495ad64688SHugh Dickins #endif /* __LINUX_KSM_H */ 150