xref: /linux/include/linux/ksm.h (revision d7597f59d1d33e9efbffa7060deb9ee5bd119e62)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H
3f8af4da3SHugh Dickins #define __LINUX_KSM_H
4f8af4da3SHugh Dickins /*
5f8af4da3SHugh Dickins  * Memory merging support.
6f8af4da3SHugh Dickins  *
7f8af4da3SHugh Dickins  * This code enables dynamic sharing of identical pages found in different
8f8af4da3SHugh Dickins  * memory areas, even if they are not shared by fork().
9f8af4da3SHugh Dickins  */
10f8af4da3SHugh Dickins 
11f8af4da3SHugh Dickins #include <linux/bitops.h>
12f8af4da3SHugh Dickins #include <linux/mm.h>
135ad64688SHugh Dickins #include <linux/pagemap.h>
145ad64688SHugh Dickins #include <linux/rmap.h>
15f8af4da3SHugh Dickins #include <linux/sched.h>
16f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
17f8af4da3SHugh Dickins 
18f8af4da3SHugh Dickins #ifdef CONFIG_KSM
19f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20f8af4da3SHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags);
21*d7597f59SStefan Roesch 
22*d7597f59SStefan Roesch void ksm_add_vma(struct vm_area_struct *vma);
23*d7597f59SStefan Roesch int ksm_enable_merge_any(struct mm_struct *mm);
24*d7597f59SStefan Roesch 
25f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm);
261c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm);
27f8af4da3SHugh Dickins 
28f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
29f8af4da3SHugh Dickins {
30*d7597f59SStefan Roesch 	int ret;
31*d7597f59SStefan Roesch 
32*d7597f59SStefan Roesch 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
33*d7597f59SStefan Roesch 		ret = __ksm_enter(mm);
34*d7597f59SStefan Roesch 		if (ret)
35*d7597f59SStefan Roesch 			return ret;
36*d7597f59SStefan Roesch 	}
37*d7597f59SStefan Roesch 
38*d7597f59SStefan Roesch 	if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
39*d7597f59SStefan Roesch 		set_bit(MMF_VM_MERGE_ANY, &mm->flags);
40*d7597f59SStefan Roesch 
41f8af4da3SHugh Dickins 	return 0;
42f8af4da3SHugh Dickins }
43f8af4da3SHugh Dickins 
441c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
45f8af4da3SHugh Dickins {
46f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
471c2fb7a4SAndrea Arcangeli 		__ksm_exit(mm);
48f8af4da3SHugh Dickins }
499a840895SHugh Dickins 
505ad64688SHugh Dickins /*
515ad64688SHugh Dickins  * When do_swap_page() first faults in from swap what used to be a KSM page,
525ad64688SHugh Dickins  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
535ad64688SHugh Dickins  * it might be faulted into a different anon_vma (or perhaps to a different
545ad64688SHugh Dickins  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
555ad64688SHugh Dickins  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
565ad64688SHugh Dickins  * a copy, and leave remerging the pages to a later pass of ksmd.
575ad64688SHugh Dickins  *
585ad64688SHugh Dickins  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
595ad64688SHugh Dickins  * but what if the vma was unmerged while the page was swapped out?
605ad64688SHugh Dickins  */
61cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page,
62cbf86cfeSHugh Dickins 			struct vm_area_struct *vma, unsigned long address);
635ad64688SHugh Dickins 
646d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
6519138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
665ad64688SHugh Dickins 
674248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE
684248d008SLonglong Xia void collect_procs_ksm(struct page *page, struct list_head *to_kill,
694248d008SLonglong Xia 		       int force_early);
704248d008SLonglong Xia #endif
71f8af4da3SHugh Dickins #else  /* !CONFIG_KSM */
72f8af4da3SHugh Dickins 
73*d7597f59SStefan Roesch static inline void ksm_add_vma(struct vm_area_struct *vma)
74*d7597f59SStefan Roesch {
75*d7597f59SStefan Roesch }
76*d7597f59SStefan Roesch 
77f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
78f8af4da3SHugh Dickins {
79f8af4da3SHugh Dickins 	return 0;
80f8af4da3SHugh Dickins }
81f8af4da3SHugh Dickins 
821c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
83f8af4da3SHugh Dickins {
84f8af4da3SHugh Dickins }
859a840895SHugh Dickins 
864248d008SLonglong Xia #ifdef CONFIG_MEMORY_FAILURE
874248d008SLonglong Xia static inline void collect_procs_ksm(struct page *page,
884248d008SLonglong Xia 				     struct list_head *to_kill, int force_early)
894248d008SLonglong Xia {
904248d008SLonglong Xia }
914248d008SLonglong Xia #endif
924248d008SLonglong Xia 
93f42647acSHugh Dickins #ifdef CONFIG_MMU
94f42647acSHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
95f42647acSHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags)
96f42647acSHugh Dickins {
97f42647acSHugh Dickins 	return 0;
98f42647acSHugh Dickins }
99f42647acSHugh Dickins 
100cbf86cfeSHugh Dickins static inline struct page *ksm_might_need_to_copy(struct page *page,
1015ad64688SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
1025ad64688SHugh Dickins {
103cbf86cfeSHugh Dickins 	return page;
1045ad64688SHugh Dickins }
1055ad64688SHugh Dickins 
1062f031c6fSMatthew Wilcox (Oracle) static inline void rmap_walk_ksm(struct folio *folio,
1076d4675e6SMinchan Kim 			struct rmap_walk_control *rwc)
108e9995ef9SHugh Dickins {
109e9995ef9SHugh Dickins }
110e9995ef9SHugh Dickins 
11119138349SMatthew Wilcox (Oracle) static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
112e9995ef9SHugh Dickins {
113e9995ef9SHugh Dickins }
114f42647acSHugh Dickins #endif /* CONFIG_MMU */
115f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */
116f8af4da3SHugh Dickins 
1175ad64688SHugh Dickins #endif /* __LINUX_KSM_H */
118