xref: /linux/include/linux/ksm.h (revision 3a9e567ca45fb5280065283d10d9a11f0db61d2b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H
3f8af4da3SHugh Dickins #define __LINUX_KSM_H
4f8af4da3SHugh Dickins /*
5f8af4da3SHugh Dickins  * Memory merging support.
6f8af4da3SHugh Dickins  *
7f8af4da3SHugh Dickins  * This code enables dynamic sharing of identical pages found in different
8f8af4da3SHugh Dickins  * memory areas, even if they are not shared by fork().
9f8af4da3SHugh Dickins  */
10f8af4da3SHugh Dickins 
11f8af4da3SHugh Dickins #include <linux/bitops.h>
12f8af4da3SHugh Dickins #include <linux/mm.h>
135ad64688SHugh Dickins #include <linux/pagemap.h>
145ad64688SHugh Dickins #include <linux/rmap.h>
15f8af4da3SHugh Dickins #include <linux/sched.h>
16f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
17f8af4da3SHugh Dickins 
18f8af4da3SHugh Dickins #ifdef CONFIG_KSM
19f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20f8af4da3SHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags);
21d7597f59SStefan Roesch 
22d7597f59SStefan Roesch void ksm_add_vma(struct vm_area_struct *vma);
23d7597f59SStefan Roesch int ksm_enable_merge_any(struct mm_struct *mm);
2424139c07SDavid Hildenbrand int ksm_disable_merge_any(struct mm_struct *mm);
252c281f54SDavid Hildenbrand int ksm_disable(struct mm_struct *mm);
26d7597f59SStefan Roesch 
27f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm);
281c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm);
2979271476Sxu xin /*
3079271476Sxu xin  * To identify zeropages that were mapped by KSM, we reuse the dirty bit
3179271476Sxu xin  * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
3279271476Sxu xin  * deduplicating memory.
3379271476Sxu xin  */
3479271476Sxu xin #define is_ksm_zero_pte(pte)	(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
35f8af4da3SHugh Dickins 
36e2942062Sxu xin extern unsigned long ksm_zero_pages;
37e2942062Sxu xin 
386080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
39e2942062Sxu xin {
406080d19fSxu xin 	if (is_ksm_zero_pte(pte)) {
41e2942062Sxu xin 		ksm_zero_pages--;
426080d19fSxu xin 		mm->ksm_zero_pages--;
436080d19fSxu xin 	}
44e2942062Sxu xin }
45e2942062Sxu xin 
46f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
47f8af4da3SHugh Dickins {
48d7597f59SStefan Roesch 	int ret;
49d7597f59SStefan Roesch 
50d7597f59SStefan Roesch 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
51d7597f59SStefan Roesch 		ret = __ksm_enter(mm);
52d7597f59SStefan Roesch 		if (ret)
53d7597f59SStefan Roesch 			return ret;
54d7597f59SStefan Roesch 	}
55d7597f59SStefan Roesch 
56d7597f59SStefan Roesch 	if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
57d7597f59SStefan Roesch 		set_bit(MMF_VM_MERGE_ANY, &mm->flags);
58d7597f59SStefan Roesch 
59f8af4da3SHugh Dickins 	return 0;
60f8af4da3SHugh Dickins }
61f8af4da3SHugh Dickins 
62*3a9e567cSJinjiang Tu static inline int ksm_execve(struct mm_struct *mm)
63*3a9e567cSJinjiang Tu {
64*3a9e567cSJinjiang Tu 	if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
65*3a9e567cSJinjiang Tu 		return __ksm_enter(mm);
66*3a9e567cSJinjiang Tu 
67*3a9e567cSJinjiang Tu 	return 0;
68*3a9e567cSJinjiang Tu }
69*3a9e567cSJinjiang Tu 
701c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
71f8af4da3SHugh Dickins {
72f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
731c2fb7a4SAndrea Arcangeli 		__ksm_exit(mm);
74f8af4da3SHugh Dickins }
759a840895SHugh Dickins 
765ad64688SHugh Dickins /*
775ad64688SHugh Dickins  * When do_swap_page() first faults in from swap what used to be a KSM page,
785ad64688SHugh Dickins  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
795ad64688SHugh Dickins  * it might be faulted into a different anon_vma (or perhaps to a different
805ad64688SHugh Dickins  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
815ad64688SHugh Dickins  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
825ad64688SHugh Dickins  * a copy, and leave remerging the pages to a later pass of ksmd.
835ad64688SHugh Dickins  *
845ad64688SHugh Dickins  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
855ad64688SHugh Dickins  * but what if the vma was unmerged while the page was swapped out?
865ad64688SHugh Dickins  */
8796db66d9SMatthew Wilcox (Oracle) struct folio *ksm_might_need_to_copy(struct folio *folio,
881486fb50SKefeng Wang 			struct vm_area_struct *vma, unsigned long addr);
895ad64688SHugh Dickins 
906d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
9119138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
925ad64688SHugh Dickins 
934248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE
944248d008SLonglong Xia void collect_procs_ksm(struct page *page, struct list_head *to_kill,
954248d008SLonglong Xia 		       int force_early);
964248d008SLonglong Xia #endif
97d21077fbSStefan Roesch 
98d21077fbSStefan Roesch #ifdef CONFIG_PROC_FS
99d21077fbSStefan Roesch long ksm_process_profit(struct mm_struct *);
100d21077fbSStefan Roesch #endif /* CONFIG_PROC_FS */
101d21077fbSStefan Roesch 
102f8af4da3SHugh Dickins #else  /* !CONFIG_KSM */
103f8af4da3SHugh Dickins 
104d7597f59SStefan Roesch static inline void ksm_add_vma(struct vm_area_struct *vma)
105d7597f59SStefan Roesch {
106d7597f59SStefan Roesch }
107d7597f59SStefan Roesch 
1082c281f54SDavid Hildenbrand static inline int ksm_disable(struct mm_struct *mm)
1092c281f54SDavid Hildenbrand {
1102c281f54SDavid Hildenbrand 	return 0;
1112c281f54SDavid Hildenbrand }
1122c281f54SDavid Hildenbrand 
113f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
114f8af4da3SHugh Dickins {
115f8af4da3SHugh Dickins 	return 0;
116f8af4da3SHugh Dickins }
117f8af4da3SHugh Dickins 
118*3a9e567cSJinjiang Tu static inline int ksm_execve(struct mm_struct *mm)
119*3a9e567cSJinjiang Tu {
120*3a9e567cSJinjiang Tu 	return 0;
121*3a9e567cSJinjiang Tu }
122*3a9e567cSJinjiang Tu 
1231c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
124f8af4da3SHugh Dickins {
125f8af4da3SHugh Dickins }
1269a840895SHugh Dickins 
1276080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
128e2942062Sxu xin {
129e2942062Sxu xin }
130e2942062Sxu xin 
1314248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE
1324248d008SLonglong Xia static inline void collect_procs_ksm(struct page *page,
1334248d008SLonglong Xia 				     struct list_head *to_kill, int force_early)
1344248d008SLonglong Xia {
1354248d008SLonglong Xia }
1364248d008SLonglong Xia #endif
1374248d008SLonglong Xia 
138f42647acSHugh Dickins #ifdef CONFIG_MMU
139f42647acSHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
140f42647acSHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags)
141f42647acSHugh Dickins {
142f42647acSHugh Dickins 	return 0;
143f42647acSHugh Dickins }
144f42647acSHugh Dickins 
14596db66d9SMatthew Wilcox (Oracle) static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
1461486fb50SKefeng Wang 			struct vm_area_struct *vma, unsigned long addr)
1475ad64688SHugh Dickins {
14896db66d9SMatthew Wilcox (Oracle) 	return folio;
1495ad64688SHugh Dickins }
1505ad64688SHugh Dickins 
1512f031c6fSMatthew Wilcox (Oracle) static inline void rmap_walk_ksm(struct folio *folio,
1526d4675e6SMinchan Kim 			struct rmap_walk_control *rwc)
153e9995ef9SHugh Dickins {
154e9995ef9SHugh Dickins }
155e9995ef9SHugh Dickins 
15619138349SMatthew Wilcox (Oracle) static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
157e9995ef9SHugh Dickins {
158e9995ef9SHugh Dickins }
159f42647acSHugh Dickins #endif /* CONFIG_MMU */
160f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */
161f8af4da3SHugh Dickins 
1625ad64688SHugh Dickins #endif /* __LINUX_KSM_H */
163