xref: /linux/mm/page_idle.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
233c3fc71SVladimir Davydov #include <linux/init.h>
357c8a661SMike Rapoport #include <linux/memblock.h>
433c3fc71SVladimir Davydov #include <linux/fs.h>
533c3fc71SVladimir Davydov #include <linux/sysfs.h>
633c3fc71SVladimir Davydov #include <linux/kobject.h>
792fb1db2SSeongJae Park #include <linux/memory_hotplug.h>
833c3fc71SVladimir Davydov #include <linux/mm.h>
933c3fc71SVladimir Davydov #include <linux/mmzone.h>
1033c3fc71SVladimir Davydov #include <linux/pagemap.h>
1133c3fc71SVladimir Davydov #include <linux/rmap.h>
1233c3fc71SVladimir Davydov #include <linux/mmu_notifier.h>
1333c3fc71SVladimir Davydov #include <linux/page_ext.h>
1433c3fc71SVladimir Davydov #include <linux/page_idle.h>
1533c3fc71SVladimir Davydov 
164aed23a2SMatthew Wilcox (Oracle) #include "internal.h"
174aed23a2SMatthew Wilcox (Oracle) 
1833c3fc71SVladimir Davydov #define BITMAP_CHUNK_SIZE	sizeof(u64)
1933c3fc71SVladimir Davydov #define BITMAP_CHUNK_BITS	(BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
2033c3fc71SVladimir Davydov 
2133c3fc71SVladimir Davydov /*
2233c3fc71SVladimir Davydov  * Idle page tracking only considers user memory pages, for other types of
2333c3fc71SVladimir Davydov  * pages the idle flag is always unset and an attempt to set it is silently
2433c3fc71SVladimir Davydov  * ignored.
2533c3fc71SVladimir Davydov  *
2633c3fc71SVladimir Davydov  * We treat a page as a user memory page if it is on an LRU list, because it is
2733c3fc71SVladimir Davydov  * always safe to pass such a page to rmap_walk(), which is essential for idle
2833c3fc71SVladimir Davydov  * page tracking. With such an indicator of user pages we can skip isolated
2933c3fc71SVladimir Davydov  * pages, but since there are not usually many of them, it will hardly affect
3033c3fc71SVladimir Davydov  * the overall result.
3133c3fc71SVladimir Davydov  *
3233c3fc71SVladimir Davydov  * This function tries to get a user memory page by pfn as described above.
3333c3fc71SVladimir Davydov  */
page_idle_get_folio(unsigned long pfn)34*5acc17fdSKefeng Wang static struct folio *page_idle_get_folio(unsigned long pfn)
3533c3fc71SVladimir Davydov {
3692fb1db2SSeongJae Park 	struct page *page = pfn_to_online_page(pfn);
37*5acc17fdSKefeng Wang 	struct folio *folio;
3833c3fc71SVladimir Davydov 
39*5acc17fdSKefeng Wang 	if (!page || PageTail(page))
4033c3fc71SVladimir Davydov 		return NULL;
4133c3fc71SVladimir Davydov 
42*5acc17fdSKefeng Wang 	folio = page_folio(page);
43*5acc17fdSKefeng Wang 	if (!folio_test_lru(folio) || !folio_try_get(folio))
44*5acc17fdSKefeng Wang 		return NULL;
45*5acc17fdSKefeng Wang 	if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
46*5acc17fdSKefeng Wang 		folio_put(folio);
47*5acc17fdSKefeng Wang 		folio = NULL;
4833c3fc71SVladimir Davydov 	}
49*5acc17fdSKefeng Wang 	return folio;
5033c3fc71SVladimir Davydov }
5133c3fc71SVladimir Davydov 
page_idle_clear_pte_refs_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)522f031c6fSMatthew Wilcox (Oracle) static bool page_idle_clear_pte_refs_one(struct folio *folio,
5333c3fc71SVladimir Davydov 					struct vm_area_struct *vma,
5433c3fc71SVladimir Davydov 					unsigned long addr, void *arg)
5533c3fc71SVladimir Davydov {
564aed23a2SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
5733c3fc71SVladimir Davydov 	bool referenced = false;
5833c3fc71SVladimir Davydov 
59699fa216SKirill A. Shutemov 	while (page_vma_mapped_walk(&pvmw)) {
60699fa216SKirill A. Shutemov 		addr = pvmw.address;
61699fa216SKirill A. Shutemov 		if (pvmw.pte) {
62f0849ac0SYang Shi 			/*
63f0849ac0SYang Shi 			 * For PTE-mapped THP, one sub page is referenced,
64f0849ac0SYang Shi 			 * the whole THP is referenced.
65f0849ac0SYang Shi 			 */
66f0849ac0SYang Shi 			if (ptep_clear_young_notify(vma, addr, pvmw.pte))
67f0849ac0SYang Shi 				referenced = true;
688749cfeaSVladimir Davydov 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
69f0849ac0SYang Shi 			if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
70f0849ac0SYang Shi 				referenced = true;
718749cfeaSVladimir Davydov 		} else {
728749cfeaSVladimir Davydov 			/* unexpected pmd-mapped page? */
738749cfeaSVladimir Davydov 			WARN_ON_ONCE(1);
748749cfeaSVladimir Davydov 		}
75699fa216SKirill A. Shutemov 	}
768749cfeaSVladimir Davydov 
7733c3fc71SVladimir Davydov 	if (referenced) {
784aed23a2SMatthew Wilcox (Oracle) 		folio_clear_idle(folio);
7933c3fc71SVladimir Davydov 		/*
8033c3fc71SVladimir Davydov 		 * We cleared the referenced bit in a mapping to this page. To
8133c3fc71SVladimir Davydov 		 * avoid interference with page reclaim, mark it young so that
82b3ac0413SMatthew Wilcox (Oracle) 		 * folio_referenced() will return > 0.
8333c3fc71SVladimir Davydov 		 */
844aed23a2SMatthew Wilcox (Oracle) 		folio_set_young(folio);
8533c3fc71SVladimir Davydov 	}
86e4b82222SMinchan Kim 	return true;
8733c3fc71SVladimir Davydov }
8833c3fc71SVladimir Davydov 
page_idle_clear_pte_refs(struct folio * folio)89*5acc17fdSKefeng Wang static void page_idle_clear_pte_refs(struct folio *folio)
9033c3fc71SVladimir Davydov {
9133c3fc71SVladimir Davydov 	/*
926d4675e6SMinchan Kim 	 * Since rwc.try_lock is unused, rwc is effectively immutable, so we
936d4675e6SMinchan Kim 	 * can make it static to save some cycles and stack.
9433c3fc71SVladimir Davydov 	 */
956d4675e6SMinchan Kim 	static struct rmap_walk_control rwc = {
9633c3fc71SVladimir Davydov 		.rmap_one = page_idle_clear_pte_refs_one,
972f031c6fSMatthew Wilcox (Oracle) 		.anon_lock = folio_lock_anon_vma_read,
9833c3fc71SVladimir Davydov 	};
9933c3fc71SVladimir Davydov 	bool need_lock;
10033c3fc71SVladimir Davydov 
1014aed23a2SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio) || !folio_raw_mapping(folio))
10233c3fc71SVladimir Davydov 		return;
10333c3fc71SVladimir Davydov 
1044aed23a2SMatthew Wilcox (Oracle) 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
1054aed23a2SMatthew Wilcox (Oracle) 	if (need_lock && !folio_trylock(folio))
10633c3fc71SVladimir Davydov 		return;
10733c3fc71SVladimir Davydov 
10884fbbe21SMatthew Wilcox (Oracle) 	rmap_walk(folio, &rwc);
10933c3fc71SVladimir Davydov 
11033c3fc71SVladimir Davydov 	if (need_lock)
1114aed23a2SMatthew Wilcox (Oracle) 		folio_unlock(folio);
11233c3fc71SVladimir Davydov }
11333c3fc71SVladimir Davydov 
page_idle_bitmap_read(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)11433c3fc71SVladimir Davydov static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
11533c3fc71SVladimir Davydov 				     struct bin_attribute *attr, char *buf,
11633c3fc71SVladimir Davydov 				     loff_t pos, size_t count)
11733c3fc71SVladimir Davydov {
11833c3fc71SVladimir Davydov 	u64 *out = (u64 *)buf;
119*5acc17fdSKefeng Wang 	struct folio *folio;
12033c3fc71SVladimir Davydov 	unsigned long pfn, end_pfn;
12133c3fc71SVladimir Davydov 	int bit;
12233c3fc71SVladimir Davydov 
12333c3fc71SVladimir Davydov 	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
12433c3fc71SVladimir Davydov 		return -EINVAL;
12533c3fc71SVladimir Davydov 
12633c3fc71SVladimir Davydov 	pfn = pos * BITS_PER_BYTE;
12733c3fc71SVladimir Davydov 	if (pfn >= max_pfn)
12833c3fc71SVladimir Davydov 		return 0;
12933c3fc71SVladimir Davydov 
13033c3fc71SVladimir Davydov 	end_pfn = pfn + count * BITS_PER_BYTE;
13133c3fc71SVladimir Davydov 	if (end_pfn > max_pfn)
1327298e3b0SColin Ian King 		end_pfn = max_pfn;
13333c3fc71SVladimir Davydov 
13433c3fc71SVladimir Davydov 	for (; pfn < end_pfn; pfn++) {
13533c3fc71SVladimir Davydov 		bit = pfn % BITMAP_CHUNK_BITS;
13633c3fc71SVladimir Davydov 		if (!bit)
13733c3fc71SVladimir Davydov 			*out = 0ULL;
138*5acc17fdSKefeng Wang 		folio = page_idle_get_folio(pfn);
139*5acc17fdSKefeng Wang 		if (folio) {
140*5acc17fdSKefeng Wang 			if (folio_test_idle(folio)) {
14133c3fc71SVladimir Davydov 				/*
14233c3fc71SVladimir Davydov 				 * The page might have been referenced via a
14333c3fc71SVladimir Davydov 				 * pte, in which case it is not idle. Clear
14433c3fc71SVladimir Davydov 				 * refs and recheck.
14533c3fc71SVladimir Davydov 				 */
146*5acc17fdSKefeng Wang 				page_idle_clear_pte_refs(folio);
147*5acc17fdSKefeng Wang 				if (folio_test_idle(folio))
14833c3fc71SVladimir Davydov 					*out |= 1ULL << bit;
14933c3fc71SVladimir Davydov 			}
150*5acc17fdSKefeng Wang 			folio_put(folio);
15133c3fc71SVladimir Davydov 		}
15233c3fc71SVladimir Davydov 		if (bit == BITMAP_CHUNK_BITS - 1)
15333c3fc71SVladimir Davydov 			out++;
15433c3fc71SVladimir Davydov 		cond_resched();
15533c3fc71SVladimir Davydov 	}
15633c3fc71SVladimir Davydov 	return (char *)out - buf;
15733c3fc71SVladimir Davydov }
15833c3fc71SVladimir Davydov 
page_idle_bitmap_write(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)15933c3fc71SVladimir Davydov static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
16033c3fc71SVladimir Davydov 				      struct bin_attribute *attr, char *buf,
16133c3fc71SVladimir Davydov 				      loff_t pos, size_t count)
16233c3fc71SVladimir Davydov {
16333c3fc71SVladimir Davydov 	const u64 *in = (u64 *)buf;
164*5acc17fdSKefeng Wang 	struct folio *folio;
16533c3fc71SVladimir Davydov 	unsigned long pfn, end_pfn;
16633c3fc71SVladimir Davydov 	int bit;
16733c3fc71SVladimir Davydov 
16833c3fc71SVladimir Davydov 	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
16933c3fc71SVladimir Davydov 		return -EINVAL;
17033c3fc71SVladimir Davydov 
17133c3fc71SVladimir Davydov 	pfn = pos * BITS_PER_BYTE;
17233c3fc71SVladimir Davydov 	if (pfn >= max_pfn)
17333c3fc71SVladimir Davydov 		return -ENXIO;
17433c3fc71SVladimir Davydov 
17533c3fc71SVladimir Davydov 	end_pfn = pfn + count * BITS_PER_BYTE;
17633c3fc71SVladimir Davydov 	if (end_pfn > max_pfn)
1777298e3b0SColin Ian King 		end_pfn = max_pfn;
17833c3fc71SVladimir Davydov 
17933c3fc71SVladimir Davydov 	for (; pfn < end_pfn; pfn++) {
18033c3fc71SVladimir Davydov 		bit = pfn % BITMAP_CHUNK_BITS;
18133c3fc71SVladimir Davydov 		if ((*in >> bit) & 1) {
182*5acc17fdSKefeng Wang 			folio = page_idle_get_folio(pfn);
183*5acc17fdSKefeng Wang 			if (folio) {
184*5acc17fdSKefeng Wang 				page_idle_clear_pte_refs(folio);
185*5acc17fdSKefeng Wang 				folio_set_idle(folio);
186*5acc17fdSKefeng Wang 				folio_put(folio);
18733c3fc71SVladimir Davydov 			}
18833c3fc71SVladimir Davydov 		}
18933c3fc71SVladimir Davydov 		if (bit == BITMAP_CHUNK_BITS - 1)
19033c3fc71SVladimir Davydov 			in++;
19133c3fc71SVladimir Davydov 		cond_resched();
19233c3fc71SVladimir Davydov 	}
19333c3fc71SVladimir Davydov 	return (char *)in - buf;
19433c3fc71SVladimir Davydov }
19533c3fc71SVladimir Davydov 
19633c3fc71SVladimir Davydov static struct bin_attribute page_idle_bitmap_attr =
1970825a6f9SJoe Perches 		__BIN_ATTR(bitmap, 0600,
19833c3fc71SVladimir Davydov 			   page_idle_bitmap_read, page_idle_bitmap_write, 0);
19933c3fc71SVladimir Davydov 
20033c3fc71SVladimir Davydov static struct bin_attribute *page_idle_bin_attrs[] = {
20133c3fc71SVladimir Davydov 	&page_idle_bitmap_attr,
20233c3fc71SVladimir Davydov 	NULL,
20333c3fc71SVladimir Davydov };
20433c3fc71SVladimir Davydov 
205fd147cbbSArvind Yadav static const struct attribute_group page_idle_attr_group = {
20633c3fc71SVladimir Davydov 	.bin_attrs = page_idle_bin_attrs,
20733c3fc71SVladimir Davydov 	.name = "page_idle",
20833c3fc71SVladimir Davydov };
20933c3fc71SVladimir Davydov 
page_idle_init(void)21033c3fc71SVladimir Davydov static int __init page_idle_init(void)
21133c3fc71SVladimir Davydov {
21233c3fc71SVladimir Davydov 	int err;
21333c3fc71SVladimir Davydov 
21433c3fc71SVladimir Davydov 	err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
21533c3fc71SVladimir Davydov 	if (err) {
21633c3fc71SVladimir Davydov 		pr_err("page_idle: register sysfs failed\n");
21733c3fc71SVladimir Davydov 		return err;
21833c3fc71SVladimir Davydov 	}
21933c3fc71SVladimir Davydov 	return 0;
22033c3fc71SVladimir Davydov }
22133c3fc71SVladimir Davydov subsys_initcall(page_idle_init);
222