xref: /linux/include/linux/ksm.h (revision f42647acc4eab1befa9e290691ed7a40f9a7d3cc)
1f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H
2f8af4da3SHugh Dickins #define __LINUX_KSM_H
3f8af4da3SHugh Dickins /*
4f8af4da3SHugh Dickins  * Memory merging support.
5f8af4da3SHugh Dickins  *
6f8af4da3SHugh Dickins  * This code enables dynamic sharing of identical pages found in different
7f8af4da3SHugh Dickins  * memory areas, even if they are not shared by fork().
8f8af4da3SHugh Dickins  */
9f8af4da3SHugh Dickins 
10f8af4da3SHugh Dickins #include <linux/bitops.h>
11f8af4da3SHugh Dickins #include <linux/mm.h>
125ad64688SHugh Dickins #include <linux/pagemap.h>
135ad64688SHugh Dickins #include <linux/rmap.h>
14f8af4da3SHugh Dickins #include <linux/sched.h>
15f8af4da3SHugh Dickins 
1608beca44SHugh Dickins struct stable_node;
175ad64688SHugh Dickins struct mem_cgroup;
1808beca44SHugh Dickins 
19f8af4da3SHugh Dickins #ifdef CONFIG_KSM
20f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21f8af4da3SHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags);
22f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm);
231c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm);
24f8af4da3SHugh Dickins 
25f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
26f8af4da3SHugh Dickins {
27f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
28f8af4da3SHugh Dickins 		return __ksm_enter(mm);
29f8af4da3SHugh Dickins 	return 0;
30f8af4da3SHugh Dickins }
31f8af4da3SHugh Dickins 
321c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
33f8af4da3SHugh Dickins {
34f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
351c2fb7a4SAndrea Arcangeli 		__ksm_exit(mm);
36f8af4da3SHugh Dickins }
379a840895SHugh Dickins 
389a840895SHugh Dickins /*
399a840895SHugh Dickins  * A KSM page is one of those write-protected "shared pages" or "merged pages"
409a840895SHugh Dickins  * which KSM maps into multiple mms, wherever identical anonymous page content
4108beca44SHugh Dickins  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
4208beca44SHugh Dickins  * anon_vma, but to that page's node of the stable tree.
439a840895SHugh Dickins  */
449a840895SHugh Dickins static inline int PageKsm(struct page *page)
459a840895SHugh Dickins {
463ca7b3c5SHugh Dickins 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
473ca7b3c5SHugh Dickins 				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
489a840895SHugh Dickins }
499a840895SHugh Dickins 
5008beca44SHugh Dickins static inline struct stable_node *page_stable_node(struct page *page)
5108beca44SHugh Dickins {
5208beca44SHugh Dickins 	return PageKsm(page) ? page_rmapping(page) : NULL;
5308beca44SHugh Dickins }
5408beca44SHugh Dickins 
5508beca44SHugh Dickins static inline void set_page_stable_node(struct page *page,
5608beca44SHugh Dickins 					struct stable_node *stable_node)
5708beca44SHugh Dickins {
5808beca44SHugh Dickins 	page->mapping = (void *)stable_node +
5908beca44SHugh Dickins 				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
6008beca44SHugh Dickins }
6108beca44SHugh Dickins 
625ad64688SHugh Dickins /*
635ad64688SHugh Dickins  * When do_swap_page() first faults in from swap what used to be a KSM page,
645ad64688SHugh Dickins  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
655ad64688SHugh Dickins  * it might be faulted into a different anon_vma (or perhaps to a different
665ad64688SHugh Dickins  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
675ad64688SHugh Dickins  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
685ad64688SHugh Dickins  * a copy, and leave remerging the pages to a later pass of ksmd.
695ad64688SHugh Dickins  *
705ad64688SHugh Dickins  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
715ad64688SHugh Dickins  * but what if the vma was unmerged while the page was swapped out?
725ad64688SHugh Dickins  */
735ad64688SHugh Dickins struct page *ksm_does_need_to_copy(struct page *page,
745ad64688SHugh Dickins 			struct vm_area_struct *vma, unsigned long address);
755ad64688SHugh Dickins static inline struct page *ksm_might_need_to_copy(struct page *page,
765ad64688SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
779a840895SHugh Dickins {
785ad64688SHugh Dickins 	struct anon_vma *anon_vma = page_anon_vma(page);
795ad64688SHugh Dickins 
805ad64688SHugh Dickins 	if (!anon_vma ||
815ad64688SHugh Dickins 	    (anon_vma == vma->anon_vma &&
825ad64688SHugh Dickins 	     page->index == linear_page_index(vma, address)))
835ad64688SHugh Dickins 		return page;
845ad64688SHugh Dickins 
855ad64688SHugh Dickins 	return ksm_does_need_to_copy(page, vma, address);
869a840895SHugh Dickins }
875ad64688SHugh Dickins 
885ad64688SHugh Dickins int page_referenced_ksm(struct page *page,
895ad64688SHugh Dickins 			struct mem_cgroup *memcg, unsigned long *vm_flags);
905ad64688SHugh Dickins int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
91e9995ef9SHugh Dickins int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
92e9995ef9SHugh Dickins 		  struct vm_area_struct *, unsigned long, void *), void *arg);
93e9995ef9SHugh Dickins void ksm_migrate_page(struct page *newpage, struct page *oldpage);
945ad64688SHugh Dickins 
95f8af4da3SHugh Dickins #else  /* !CONFIG_KSM */
96f8af4da3SHugh Dickins 
97f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
98f8af4da3SHugh Dickins {
99f8af4da3SHugh Dickins 	return 0;
100f8af4da3SHugh Dickins }
101f8af4da3SHugh Dickins 
1021c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
103f8af4da3SHugh Dickins {
104f8af4da3SHugh Dickins }
1059a840895SHugh Dickins 
1069a840895SHugh Dickins static inline int PageKsm(struct page *page)
1079a840895SHugh Dickins {
1089a840895SHugh Dickins 	return 0;
1099a840895SHugh Dickins }
1109a840895SHugh Dickins 
111*f42647acSHugh Dickins #ifdef CONFIG_MMU
112*f42647acSHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
113*f42647acSHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags)
114*f42647acSHugh Dickins {
115*f42647acSHugh Dickins 	return 0;
116*f42647acSHugh Dickins }
117*f42647acSHugh Dickins 
1185ad64688SHugh Dickins static inline struct page *ksm_might_need_to_copy(struct page *page,
1195ad64688SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
1205ad64688SHugh Dickins {
1215ad64688SHugh Dickins 	return page;
1225ad64688SHugh Dickins }
1235ad64688SHugh Dickins 
1245ad64688SHugh Dickins static inline int page_referenced_ksm(struct page *page,
1255ad64688SHugh Dickins 			struct mem_cgroup *memcg, unsigned long *vm_flags)
1265ad64688SHugh Dickins {
1275ad64688SHugh Dickins 	return 0;
1285ad64688SHugh Dickins }
1295ad64688SHugh Dickins 
1305ad64688SHugh Dickins static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1315ad64688SHugh Dickins {
1325ad64688SHugh Dickins 	return 0;
1335ad64688SHugh Dickins }
134e9995ef9SHugh Dickins 
135e9995ef9SHugh Dickins static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
136e9995ef9SHugh Dickins 		struct vm_area_struct *, unsigned long, void *), void *arg)
137e9995ef9SHugh Dickins {
138e9995ef9SHugh Dickins 	return 0;
139e9995ef9SHugh Dickins }
140e9995ef9SHugh Dickins 
141e9995ef9SHugh Dickins static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
142e9995ef9SHugh Dickins {
143e9995ef9SHugh Dickins }
144*f42647acSHugh Dickins #endif /* CONFIG_MMU */
145f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */
146f8af4da3SHugh Dickins 
1475ad64688SHugh Dickins #endif /* __LINUX_KSM_H */
148