xref: /linux/include/linux/ksm.h (revision 3ca7b3c5b64d35fe02c35b5d44c2c58b49499fee)
1f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H
2f8af4da3SHugh Dickins #define __LINUX_KSM_H
3f8af4da3SHugh Dickins /*
4f8af4da3SHugh Dickins  * Memory merging support.
5f8af4da3SHugh Dickins  *
6f8af4da3SHugh Dickins  * This code enables dynamic sharing of identical pages found in different
7f8af4da3SHugh Dickins  * memory areas, even if they are not shared by fork().
8f8af4da3SHugh Dickins  */
9f8af4da3SHugh Dickins 
10f8af4da3SHugh Dickins #include <linux/bitops.h>
11f8af4da3SHugh Dickins #include <linux/mm.h>
12f8af4da3SHugh Dickins #include <linux/sched.h>
139a840895SHugh Dickins #include <linux/vmstat.h>
14f8af4da3SHugh Dickins 
15f8af4da3SHugh Dickins #ifdef CONFIG_KSM
16f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
17f8af4da3SHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags);
18f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm);
191c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm);
20f8af4da3SHugh Dickins 
21f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
22f8af4da3SHugh Dickins {
23f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
24f8af4da3SHugh Dickins 		return __ksm_enter(mm);
25f8af4da3SHugh Dickins 	return 0;
26f8af4da3SHugh Dickins }
27f8af4da3SHugh Dickins 
281c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
29f8af4da3SHugh Dickins {
30f8af4da3SHugh Dickins 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
311c2fb7a4SAndrea Arcangeli 		__ksm_exit(mm);
32f8af4da3SHugh Dickins }
339a840895SHugh Dickins 
349a840895SHugh Dickins /*
359a840895SHugh Dickins  * A KSM page is one of those write-protected "shared pages" or "merged pages"
369a840895SHugh Dickins  * which KSM maps into multiple mms, wherever identical anonymous page content
379a840895SHugh Dickins  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, with NULL anon_vma.
389a840895SHugh Dickins  */
399a840895SHugh Dickins static inline int PageKsm(struct page *page)
409a840895SHugh Dickins {
41*3ca7b3c5SHugh Dickins 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
42*3ca7b3c5SHugh Dickins 				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
439a840895SHugh Dickins }
449a840895SHugh Dickins 
459a840895SHugh Dickins /*
469a840895SHugh Dickins  * But we have to avoid the checking which page_add_anon_rmap() performs.
479a840895SHugh Dickins  */
489a840895SHugh Dickins static inline void page_add_ksm_rmap(struct page *page)
499a840895SHugh Dickins {
509a840895SHugh Dickins 	if (atomic_inc_and_test(&page->_mapcount)) {
51*3ca7b3c5SHugh Dickins 		page->mapping = (void *) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
529a840895SHugh Dickins 		__inc_zone_page_state(page, NR_ANON_PAGES);
539a840895SHugh Dickins 	}
549a840895SHugh Dickins }
55f8af4da3SHugh Dickins #else  /* !CONFIG_KSM */
56f8af4da3SHugh Dickins 
57f8af4da3SHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
58f8af4da3SHugh Dickins 		unsigned long end, int advice, unsigned long *vm_flags)
59f8af4da3SHugh Dickins {
60f8af4da3SHugh Dickins 	return 0;
61f8af4da3SHugh Dickins }
62f8af4da3SHugh Dickins 
63f8af4da3SHugh Dickins static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
64f8af4da3SHugh Dickins {
65f8af4da3SHugh Dickins 	return 0;
66f8af4da3SHugh Dickins }
67f8af4da3SHugh Dickins 
681c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
69f8af4da3SHugh Dickins {
70f8af4da3SHugh Dickins }
719a840895SHugh Dickins 
729a840895SHugh Dickins static inline int PageKsm(struct page *page)
739a840895SHugh Dickins {
749a840895SHugh Dickins 	return 0;
759a840895SHugh Dickins }
769a840895SHugh Dickins 
779a840895SHugh Dickins /* No stub required for page_add_ksm_rmap(page) */
78f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */
79f8af4da3SHugh Dickins 
80f8af4da3SHugh Dickins #endif
81