xref: /linux/mm/balloon_compaction.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * mm/balloon_compaction.c
3  *
4  * Common interface for making balloon pages movable by compaction.
5  *
6  * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
7  */
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/balloon_compaction.h>
12 
13 /*
14  * balloon_page_enqueue - allocates a new page and inserts it into the balloon
15  *			  page list.
16  * @b_dev_info: balloon device decriptor where we will insert a new page to
17  *
18  * Driver must call it to properly allocate a new enlisted balloon page
19  * before definetively removing it from the guest system.
20  * This function returns the page address for the recently enqueued page or
21  * NULL in the case we fail to allocate a new page this turn.
22  */
23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
24 {
25 	unsigned long flags;
26 	struct page *page = alloc_page(balloon_mapping_gfp_mask() |
27 					__GFP_NOMEMALLOC | __GFP_NORETRY);
28 	if (!page)
29 		return NULL;
30 
31 	/*
32 	 * Block others from accessing the 'page' when we get around to
33 	 * establishing additional references. We should be the only one
34 	 * holding a reference to the 'page' at this point.
35 	 */
36 	BUG_ON(!trylock_page(page));
37 	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
38 	balloon_page_insert(b_dev_info, page);
39 	__count_vm_event(BALLOON_INFLATE);
40 	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
41 	unlock_page(page);
42 	return page;
43 }
44 EXPORT_SYMBOL_GPL(balloon_page_enqueue);
45 
46 /*
47  * balloon_page_dequeue - removes a page from balloon's page list and returns
48  *			  the its address to allow the driver release the page.
49  * @b_dev_info: balloon device decriptor where we will grab a page from.
50  *
51  * Driver must call it to properly de-allocate a previous enlisted balloon page
52  * before definetively releasing it back to the guest system.
53  * This function returns the page address for the recently dequeued page or
54  * NULL in the case we find balloon's page list temporarily empty due to
55  * compaction isolated pages.
56  */
57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
58 {
59 	struct page *page, *tmp;
60 	unsigned long flags;
61 	bool dequeued_page;
62 
63 	dequeued_page = false;
64 	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
65 		/*
66 		 * Block others from accessing the 'page' while we get around
67 		 * establishing additional references and preparing the 'page'
68 		 * to be released by the balloon driver.
69 		 */
70 		if (trylock_page(page)) {
71 #ifdef CONFIG_BALLOON_COMPACTION
72 			if (!PagePrivate(page)) {
73 				/* raced with isolation */
74 				unlock_page(page);
75 				continue;
76 			}
77 #endif
78 			spin_lock_irqsave(&b_dev_info->pages_lock, flags);
79 			balloon_page_delete(page);
80 			__count_vm_event(BALLOON_DEFLATE);
81 			spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
82 			unlock_page(page);
83 			dequeued_page = true;
84 			break;
85 		}
86 	}
87 
88 	if (!dequeued_page) {
89 		/*
90 		 * If we are unable to dequeue a balloon page because the page
91 		 * list is empty and there is no isolated pages, then something
92 		 * went out of track and some balloon pages are lost.
93 		 * BUG() here, otherwise the balloon driver may get stuck into
94 		 * an infinite loop while attempting to release all its pages.
95 		 */
96 		spin_lock_irqsave(&b_dev_info->pages_lock, flags);
97 		if (unlikely(list_empty(&b_dev_info->pages) &&
98 			     !b_dev_info->isolated_pages))
99 			BUG();
100 		spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
101 		page = NULL;
102 	}
103 	return page;
104 }
105 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
106 
107 #ifdef CONFIG_BALLOON_COMPACTION
108 
109 static inline void __isolate_balloon_page(struct page *page)
110 {
111 	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
112 	unsigned long flags;
113 
114 	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
115 	ClearPagePrivate(page);
116 	list_del(&page->lru);
117 	b_dev_info->isolated_pages++;
118 	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
119 }
120 
121 static inline void __putback_balloon_page(struct page *page)
122 {
123 	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
127 	SetPagePrivate(page);
128 	list_add(&page->lru, &b_dev_info->pages);
129 	b_dev_info->isolated_pages--;
130 	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
131 }
132 
133 /* __isolate_lru_page() counterpart for a ballooned page */
134 bool balloon_page_isolate(struct page *page)
135 {
136 	/*
137 	 * Avoid burning cycles with pages that are yet under __free_pages(),
138 	 * or just got freed under us.
139 	 *
140 	 * In case we 'win' a race for a balloon page being freed under us and
141 	 * raise its refcount preventing __free_pages() from doing its job
142 	 * the put_page() at the end of this block will take care of
143 	 * release this page, thus avoiding a nasty leakage.
144 	 */
145 	if (likely(get_page_unless_zero(page))) {
146 		/*
147 		 * As balloon pages are not isolated from LRU lists, concurrent
148 		 * compaction threads can race against page migration functions
149 		 * as well as race against the balloon driver releasing a page.
150 		 *
151 		 * In order to avoid having an already isolated balloon page
152 		 * being (wrongly) re-isolated while it is under migration,
153 		 * or to avoid attempting to isolate pages being released by
154 		 * the balloon driver, lets be sure we have the page lock
155 		 * before proceeding with the balloon page isolation steps.
156 		 */
157 		if (likely(trylock_page(page))) {
158 			/*
159 			 * A ballooned page, by default, has PagePrivate set.
160 			 * Prevent concurrent compaction threads from isolating
161 			 * an already isolated balloon page by clearing it.
162 			 */
163 			if (balloon_page_movable(page)) {
164 				__isolate_balloon_page(page);
165 				unlock_page(page);
166 				return true;
167 			}
168 			unlock_page(page);
169 		}
170 		put_page(page);
171 	}
172 	return false;
173 }
174 
175 /* putback_lru_page() counterpart for a ballooned page */
176 void balloon_page_putback(struct page *page)
177 {
178 	/*
179 	 * 'lock_page()' stabilizes the page and prevents races against
180 	 * concurrent isolation threads attempting to re-isolate it.
181 	 */
182 	lock_page(page);
183 
184 	if (__is_movable_balloon_page(page)) {
185 		__putback_balloon_page(page);
186 		/* drop the extra ref count taken for page isolation */
187 		put_page(page);
188 	} else {
189 		WARN_ON(1);
190 		dump_page(page, "not movable balloon page");
191 	}
192 	unlock_page(page);
193 }
194 
195 /* move_to_new_page() counterpart for a ballooned page */
196 int balloon_page_migrate(struct page *newpage,
197 			 struct page *page, enum migrate_mode mode)
198 {
199 	struct balloon_dev_info *balloon = balloon_page_device(page);
200 	int rc = -EAGAIN;
201 
202 	/*
203 	 * Block others from accessing the 'newpage' when we get around to
204 	 * establishing additional references. We should be the only one
205 	 * holding a reference to the 'newpage' at this point.
206 	 */
207 	BUG_ON(!trylock_page(newpage));
208 
209 	if (WARN_ON(!__is_movable_balloon_page(page))) {
210 		dump_page(page, "not movable balloon page");
211 		unlock_page(newpage);
212 		return rc;
213 	}
214 
215 	if (balloon && balloon->migratepage)
216 		rc = balloon->migratepage(balloon, newpage, page, mode);
217 
218 	unlock_page(newpage);
219 	return rc;
220 }
221 #endif /* CONFIG_BALLOON_COMPACTION */
222