xref: /linux/include/linux/ksm.h (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_KSM_H
3 #define __LINUX_KSM_H
4 /*
5  * Memory merging support.
6  *
7  * This code enables dynamic sharing of identical pages found in different
8  * memory areas, even if they are not shared by fork().
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/rmap.h>
15 #include <linux/sched.h>
16 #include <linux/sched/coredump.h>
17 
18 #ifdef CONFIG_KSM
19 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20 		unsigned long end, int advice, unsigned long *vm_flags);
21 
22 void ksm_add_vma(struct vm_area_struct *vma);
23 int ksm_enable_merge_any(struct mm_struct *mm);
24 int ksm_disable_merge_any(struct mm_struct *mm);
25 int ksm_disable(struct mm_struct *mm);
26 
27 int __ksm_enter(struct mm_struct *mm);
28 void __ksm_exit(struct mm_struct *mm);
29 /*
30  * To identify zeropages that were mapped by KSM, we reuse the dirty bit
31  * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
32  * deduplicating memory.
33  */
34 #define is_ksm_zero_pte(pte)	(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
35 
36 extern unsigned long ksm_zero_pages;
37 
38 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
39 {
40 	if (is_ksm_zero_pte(pte)) {
41 		ksm_zero_pages--;
42 		mm->ksm_zero_pages--;
43 	}
44 }
45 
46 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
47 {
48 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
49 		return __ksm_enter(mm);
50 
51 	return 0;
52 }
53 
54 static inline int ksm_execve(struct mm_struct *mm)
55 {
56 	if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
57 		return __ksm_enter(mm);
58 
59 	return 0;
60 }
61 
62 static inline void ksm_exit(struct mm_struct *mm)
63 {
64 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
65 		__ksm_exit(mm);
66 }
67 
68 /*
69  * When do_swap_page() first faults in from swap what used to be a KSM page,
70  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
71  * it might be faulted into a different anon_vma (or perhaps to a different
72  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
73  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
74  * a copy, and leave remerging the pages to a later pass of ksmd.
75  *
76  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
77  * but what if the vma was unmerged while the page was swapped out?
78  */
79 struct folio *ksm_might_need_to_copy(struct folio *folio,
80 			struct vm_area_struct *vma, unsigned long addr);
81 
82 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
83 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
84 void collect_procs_ksm(struct folio *folio, struct page *page,
85 		struct list_head *to_kill, int force_early);
86 long ksm_process_profit(struct mm_struct *);
87 
88 #else  /* !CONFIG_KSM */
89 
90 static inline void ksm_add_vma(struct vm_area_struct *vma)
91 {
92 }
93 
94 static inline int ksm_disable(struct mm_struct *mm)
95 {
96 	return 0;
97 }
98 
99 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
100 {
101 	return 0;
102 }
103 
104 static inline int ksm_execve(struct mm_struct *mm)
105 {
106 	return 0;
107 }
108 
109 static inline void ksm_exit(struct mm_struct *mm)
110 {
111 }
112 
113 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
114 {
115 }
116 
117 static inline void collect_procs_ksm(struct folio *folio, struct page *page,
118 				     struct list_head *to_kill, int force_early)
119 {
120 }
121 
122 #ifdef CONFIG_MMU
123 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
124 		unsigned long end, int advice, unsigned long *vm_flags)
125 {
126 	return 0;
127 }
128 
129 static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
130 			struct vm_area_struct *vma, unsigned long addr)
131 {
132 	return folio;
133 }
134 
135 static inline void rmap_walk_ksm(struct folio *folio,
136 			struct rmap_walk_control *rwc)
137 {
138 }
139 
140 static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
141 {
142 }
143 #endif /* CONFIG_MMU */
144 #endif /* !CONFIG_KSM */
145 
146 #endif /* __LINUX_KSM_H */
147