1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/balloon_compaction.c 4 * 5 * Common interface for making balloon pages movable by compaction. 6 * 7 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> 8 */ 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/balloon_compaction.h> 13 14 static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, 15 struct page *page) 16 { 17 /* 18 * Block others from accessing the 'page' when we get around to 19 * establishing additional references. We should be the only one 20 * holding a reference to the 'page' at this point. If we are not, then 21 * memory corruption is possible and we should stop execution. 22 */ 23 BUG_ON(!trylock_page(page)); 24 balloon_page_insert(b_dev_info, page); 25 unlock_page(page); 26 __count_vm_event(BALLOON_INFLATE); 27 inc_node_page_state(page, NR_BALLOON_PAGES); 28 } 29 30 /** 31 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 32 * list. 33 * @b_dev_info: balloon device descriptor where we will insert a new page to 34 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * 36 * Driver must call this function to properly enqueue balloon pages before 37 * definitively removing them from the guest system. 38 * 39 * Return: number of pages that were enqueued. 40 */ 41 size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, 42 struct list_head *pages) 43 { 44 struct page *page, *tmp; 45 unsigned long flags; 46 size_t n_pages = 0; 47 48 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 49 list_for_each_entry_safe(page, tmp, pages, lru) { 50 list_del(&page->lru); 51 balloon_page_enqueue_one(b_dev_info, page); 52 n_pages++; 53 } 54 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 55 return n_pages; 56 } 57 EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); 58 59 /** 60 * balloon_page_list_dequeue() - removes pages from balloon's page list and 61 * returns a list of the pages. 62 * @b_dev_info: balloon device descriptor where we will grab a page from. 63 * @pages: pointer to the list of pages that would be returned to the caller. 64 * @n_req_pages: number of requested pages. 65 * 66 * Driver must call this function to properly de-allocate a previous enlisted 67 * balloon pages before definitively releasing it back to the guest system. 68 * This function tries to remove @n_req_pages from the ballooned pages and 69 * return them to the caller in the @pages list. 70 * 71 * Note that this function may fail to dequeue some pages even if the balloon 72 * isn't empty - since the page list can be temporarily empty due to compaction 73 * of isolated pages. 74 * 75 * Return: number of pages that were added to the @pages list. 76 */ 77 size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, 78 struct list_head *pages, size_t n_req_pages) 79 { 80 struct page *page, *tmp; 81 unsigned long flags; 82 size_t n_pages = 0; 83 84 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 85 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { 86 if (n_pages == n_req_pages) 87 break; 88 89 /* 90 * Block others from accessing the 'page' while we get around to 91 * establishing additional references and preparing the 'page' 92 * to be released by the balloon driver. 93 */ 94 if (!trylock_page(page)) 95 continue; 96 97 if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) && 98 PageIsolated(page)) { 99 /* raced with isolation */ 100 unlock_page(page); 101 continue; 102 } 103 balloon_page_delete(page); 104 __count_vm_event(BALLOON_DEFLATE); 105 list_add(&page->lru, pages); 106 unlock_page(page); 107 dec_node_page_state(page, NR_BALLOON_PAGES); 108 n_pages++; 109 } 110 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 111 112 return n_pages; 113 } 114 EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); 115 116 /* 117 * balloon_page_alloc - allocates a new page for insertion into the balloon 118 * page list. 119 * 120 * Driver must call this function to properly allocate a new balloon page. 121 * Driver must call balloon_page_enqueue before definitively removing the page 122 * from the guest system. 123 * 124 * Return: struct page for the allocated page or NULL on allocation failure. 125 */ 126 struct page *balloon_page_alloc(void) 127 { 128 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 129 __GFP_NOMEMALLOC | __GFP_NORETRY | 130 __GFP_NOWARN); 131 return page; 132 } 133 EXPORT_SYMBOL_GPL(balloon_page_alloc); 134 135 /* 136 * balloon_page_enqueue - inserts a new page into the balloon page list. 137 * 138 * @b_dev_info: balloon device descriptor where we will insert a new page 139 * @page: new page to enqueue - allocated using balloon_page_alloc. 140 * 141 * Drivers must call this function to properly enqueue a new allocated balloon 142 * page before definitively removing the page from the guest system. 143 * 144 * Drivers must not call balloon_page_enqueue on pages that have been pushed to 145 * a list with balloon_page_push before removing them with balloon_page_pop. To 146 * enqueue a list of pages, use balloon_page_list_enqueue instead. 147 */ 148 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, 149 struct page *page) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 154 balloon_page_enqueue_one(b_dev_info, page); 155 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 156 } 157 EXPORT_SYMBOL_GPL(balloon_page_enqueue); 158 159 /* 160 * balloon_page_dequeue - removes a page from balloon's page list and returns 161 * its address to allow the driver to release the page. 162 * @b_dev_info: balloon device descriptor where we will grab a page from. 163 * 164 * Driver must call this function to properly dequeue a previously enqueued page 165 * before definitively releasing it back to the guest system. 166 * 167 * Caller must perform its own accounting to ensure that this 168 * function is called only if some pages are actually enqueued. 169 * 170 * Note that this function may fail to dequeue some pages even if there are 171 * some enqueued pages - since the page list can be temporarily empty due to 172 * the compaction of isolated pages. 173 * 174 * TODO: remove the caller accounting requirements, and allow caller to wait 175 * until all pages can be dequeued. 176 * 177 * Return: struct page for the dequeued page, or NULL if no page was dequeued. 178 */ 179 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) 180 { 181 unsigned long flags; 182 LIST_HEAD(pages); 183 int n_pages; 184 185 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); 186 187 if (n_pages != 1) { 188 /* 189 * If we are unable to dequeue a balloon page because the page 190 * list is empty and there are no isolated pages, then something 191 * went out of track and some balloon pages are lost. 192 * BUG() here, otherwise the balloon driver may get stuck in 193 * an infinite loop while attempting to release all its pages. 194 */ 195 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 196 if (unlikely(list_empty(&b_dev_info->pages) && 197 !b_dev_info->isolated_pages)) 198 BUG(); 199 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 200 return NULL; 201 } 202 return list_first_entry(&pages, struct page, lru); 203 } 204 EXPORT_SYMBOL_GPL(balloon_page_dequeue); 205 206 #ifdef CONFIG_BALLOON_COMPACTION 207 208 static bool balloon_page_isolate(struct page *page, isolate_mode_t mode) 209 210 { 211 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 212 unsigned long flags; 213 214 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 215 list_del(&page->lru); 216 b_dev_info->isolated_pages++; 217 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 218 219 return true; 220 } 221 222 static void balloon_page_putback(struct page *page) 223 { 224 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 225 unsigned long flags; 226 227 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 228 list_add(&page->lru, &b_dev_info->pages); 229 b_dev_info->isolated_pages--; 230 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 231 } 232 233 /* move_to_new_page() counterpart for a ballooned page */ 234 static int balloon_page_migrate(struct page *newpage, struct page *page, 235 enum migrate_mode mode) 236 { 237 struct balloon_dev_info *balloon = balloon_page_device(page); 238 239 VM_BUG_ON_PAGE(!PageLocked(page), page); 240 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 241 242 return balloon->migratepage(balloon, newpage, page, mode); 243 } 244 245 const struct movable_operations balloon_mops = { 246 .migrate_page = balloon_page_migrate, 247 .isolate_page = balloon_page_isolate, 248 .putback_page = balloon_page_putback, 249 }; 250 EXPORT_SYMBOL_GPL(balloon_mops); 251 252 #endif /* CONFIG_BALLOON_COMPACTION */ 253