1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/balloon_compaction.c 4 * 5 * Common interface for making balloon pages movable by compaction. 6 * 7 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> 8 */ 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/balloon_compaction.h> 13 14 static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, 15 struct page *page) 16 { 17 /* 18 * Block others from accessing the 'page' when we get around to 19 * establishing additional references. We should be the only one 20 * holding a reference to the 'page' at this point. If we are not, then 21 * memory corruption is possible and we should stop execution. 22 */ 23 BUG_ON(!trylock_page(page)); 24 balloon_page_insert(b_dev_info, page); 25 unlock_page(page); 26 __count_vm_event(BALLOON_INFLATE); 27 inc_node_page_state(page, NR_BALLOON_PAGES); 28 } 29 30 /** 31 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 32 * list. 33 * @b_dev_info: balloon device descriptor where we will insert a new page to 34 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * 36 * Driver must call this function to properly enqueue balloon pages before 37 * definitively removing them from the guest system. 38 * 39 * Return: number of pages that were enqueued. 40 */ 41 size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, 42 struct list_head *pages) 43 { 44 struct page *page, *tmp; 45 unsigned long flags; 46 size_t n_pages = 0; 47 48 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 49 list_for_each_entry_safe(page, tmp, pages, lru) { 50 list_del(&page->lru); 51 balloon_page_enqueue_one(b_dev_info, page); 52 n_pages++; 53 } 54 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 55 return n_pages; 56 } 57 EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); 58 59 /** 60 * balloon_page_list_dequeue() - removes pages from balloon's page list and 61 * returns a list of the pages. 62 * @b_dev_info: balloon device descriptor where we will grab a page from. 63 * @pages: pointer to the list of pages that would be returned to the caller. 64 * @n_req_pages: number of requested pages. 65 * 66 * Driver must call this function to properly de-allocate a previous enlisted 67 * balloon pages before definitively releasing it back to the guest system. 68 * This function tries to remove @n_req_pages from the ballooned pages and 69 * return them to the caller in the @pages list. 70 * 71 * Note that this function may fail to dequeue some pages even if the balloon 72 * isn't empty - since the page list can be temporarily empty due to compaction 73 * of isolated pages. 74 * 75 * Return: number of pages that were added to the @pages list. 76 */ 77 size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, 78 struct list_head *pages, size_t n_req_pages) 79 { 80 struct page *page, *tmp; 81 unsigned long flags; 82 size_t n_pages = 0; 83 84 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 85 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { 86 if (n_pages == n_req_pages) 87 break; 88 89 /* 90 * Block others from accessing the 'page' while we get around to 91 * establishing additional references and preparing the 'page' 92 * to be released by the balloon driver. 93 */ 94 if (!trylock_page(page)) 95 continue; 96 97 list_del(&page->lru); 98 balloon_page_finalize(page); 99 __count_vm_event(BALLOON_DEFLATE); 100 list_add(&page->lru, pages); 101 unlock_page(page); 102 dec_node_page_state(page, NR_BALLOON_PAGES); 103 n_pages++; 104 } 105 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 106 107 return n_pages; 108 } 109 EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); 110 111 /* 112 * balloon_page_alloc - allocates a new page for insertion into the balloon 113 * page list. 114 * 115 * Driver must call this function to properly allocate a new balloon page. 116 * Driver must call balloon_page_enqueue before definitively removing the page 117 * from the guest system. 118 * 119 * Return: struct page for the allocated page or NULL on allocation failure. 120 */ 121 struct page *balloon_page_alloc(void) 122 { 123 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 124 __GFP_NOMEMALLOC | __GFP_NORETRY | 125 __GFP_NOWARN); 126 return page; 127 } 128 EXPORT_SYMBOL_GPL(balloon_page_alloc); 129 130 /* 131 * balloon_page_enqueue - inserts a new page into the balloon page list. 132 * 133 * @b_dev_info: balloon device descriptor where we will insert a new page 134 * @page: new page to enqueue - allocated using balloon_page_alloc. 135 * 136 * Drivers must call this function to properly enqueue a new allocated balloon 137 * page before definitively removing the page from the guest system. 138 * 139 * Drivers must not call balloon_page_enqueue on pages that have been pushed to 140 * a list with balloon_page_push before removing them with balloon_page_pop. To 141 * enqueue a list of pages, use balloon_page_list_enqueue instead. 142 */ 143 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, 144 struct page *page) 145 { 146 unsigned long flags; 147 148 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 149 balloon_page_enqueue_one(b_dev_info, page); 150 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 151 } 152 EXPORT_SYMBOL_GPL(balloon_page_enqueue); 153 154 /* 155 * balloon_page_dequeue - removes a page from balloon's page list and returns 156 * its address to allow the driver to release the page. 157 * @b_dev_info: balloon device descriptor where we will grab a page from. 158 * 159 * Driver must call this function to properly dequeue a previously enqueued page 160 * before definitively releasing it back to the guest system. 161 * 162 * Caller must perform its own accounting to ensure that this 163 * function is called only if some pages are actually enqueued. 164 * 165 * Note that this function may fail to dequeue some pages even if there are 166 * some enqueued pages - since the page list can be temporarily empty due to 167 * the compaction of isolated pages. 168 * 169 * TODO: remove the caller accounting requirements, and allow caller to wait 170 * until all pages can be dequeued. 171 * 172 * Return: struct page for the dequeued page, or NULL if no page was dequeued. 173 */ 174 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) 175 { 176 unsigned long flags; 177 LIST_HEAD(pages); 178 int n_pages; 179 180 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); 181 182 if (n_pages != 1) { 183 /* 184 * If we are unable to dequeue a balloon page because the page 185 * list is empty and there are no isolated pages, then something 186 * went out of track and some balloon pages are lost. 187 * BUG() here, otherwise the balloon driver may get stuck in 188 * an infinite loop while attempting to release all its pages. 189 */ 190 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 191 if (unlikely(list_empty(&b_dev_info->pages) && 192 !b_dev_info->isolated_pages)) 193 BUG(); 194 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 195 return NULL; 196 } 197 return list_first_entry(&pages, struct page, lru); 198 } 199 EXPORT_SYMBOL_GPL(balloon_page_dequeue); 200 201 #ifdef CONFIG_BALLOON_COMPACTION 202 203 static bool balloon_page_isolate(struct page *page, isolate_mode_t mode) 204 205 { 206 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 207 unsigned long flags; 208 209 if (!b_dev_info) 210 return false; 211 212 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 213 list_del(&page->lru); 214 b_dev_info->isolated_pages++; 215 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 216 217 return true; 218 } 219 220 static void balloon_page_putback(struct page *page) 221 { 222 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 223 unsigned long flags; 224 225 /* Isolated balloon pages cannot get deflated. */ 226 if (WARN_ON_ONCE(!b_dev_info)) 227 return; 228 229 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 230 list_add(&page->lru, &b_dev_info->pages); 231 b_dev_info->isolated_pages--; 232 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 233 } 234 235 /* move_to_new_page() counterpart for a ballooned page */ 236 static int balloon_page_migrate(struct page *newpage, struct page *page, 237 enum migrate_mode mode) 238 { 239 struct balloon_dev_info *balloon = balloon_page_device(page); 240 241 VM_BUG_ON_PAGE(!PageLocked(page), page); 242 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 243 244 /* Isolated balloon pages cannot get deflated. */ 245 if (WARN_ON_ONCE(!balloon)) 246 return -EAGAIN; 247 248 return balloon->migratepage(balloon, newpage, page, mode); 249 } 250 251 const struct movable_operations balloon_mops = { 252 .migrate_page = balloon_page_migrate, 253 .isolate_page = balloon_page_isolate, 254 .putback_page = balloon_page_putback, 255 }; 256 257 static int __init balloon_init(void) 258 { 259 return set_movable_ops(&balloon_mops, PGTY_offline); 260 } 261 core_initcall(balloon_init); 262 263 #endif /* CONFIG_BALLOON_COMPACTION */ 264