xref: /linux/mm/swap_state.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/migrate.h>
19 
20 #include <asm/pgtable.h>
21 
22 /*
23  * swapper_space is a fiction, retained to simplify the path through
24  * vmscan's shrink_list, to make sync_page look nicer, and to allow
25  * future use of radix_tree tags in the swap cache.
26  */
27 static struct address_space_operations swap_aops = {
28 	.writepage	= swap_writepage,
29 	.sync_page	= block_sync_page,
30 	.set_page_dirty	= __set_page_dirty_nobuffers,
31 	.migratepage	= migrate_page,
32 };
33 
34 static struct backing_dev_info swap_backing_dev_info = {
35 	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
36 	.unplug_io_fn	= swap_unplug_io_fn,
37 };
38 
39 struct address_space swapper_space = {
40 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
41 	.tree_lock	= RW_LOCK_UNLOCKED,
42 	.a_ops		= &swap_aops,
43 	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
44 	.backing_dev_info = &swap_backing_dev_info,
45 };
46 
47 #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
48 
49 static struct {
50 	unsigned long add_total;
51 	unsigned long del_total;
52 	unsigned long find_success;
53 	unsigned long find_total;
54 	unsigned long noent_race;
55 	unsigned long exist_race;
56 } swap_cache_info;
57 
58 void show_swap_cache_info(void)
59 {
60 	printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
61 		swap_cache_info.add_total, swap_cache_info.del_total,
62 		swap_cache_info.find_success, swap_cache_info.find_total,
63 		swap_cache_info.noent_race, swap_cache_info.exist_race);
64 	printk("Free swap  = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
65 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
66 }
67 
68 /*
69  * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
70  * but sets SwapCache flag and private instead of mapping and index.
71  */
72 static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
73 			       gfp_t gfp_mask)
74 {
75 	int error;
76 
77 	BUG_ON(PageSwapCache(page));
78 	BUG_ON(PagePrivate(page));
79 	error = radix_tree_preload(gfp_mask);
80 	if (!error) {
81 		write_lock_irq(&swapper_space.tree_lock);
82 		error = radix_tree_insert(&swapper_space.page_tree,
83 						entry.val, page);
84 		if (!error) {
85 			page_cache_get(page);
86 			SetPageLocked(page);
87 			SetPageSwapCache(page);
88 			set_page_private(page, entry.val);
89 			total_swapcache_pages++;
90 			pagecache_acct(1);
91 		}
92 		write_unlock_irq(&swapper_space.tree_lock);
93 		radix_tree_preload_end();
94 	}
95 	return error;
96 }
97 
98 static int add_to_swap_cache(struct page *page, swp_entry_t entry)
99 {
100 	int error;
101 
102 	if (!swap_duplicate(entry)) {
103 		INC_CACHE_INFO(noent_race);
104 		return -ENOENT;
105 	}
106 	error = __add_to_swap_cache(page, entry, GFP_KERNEL);
107 	/*
108 	 * Anon pages are already on the LRU, we don't run lru_cache_add here.
109 	 */
110 	if (error) {
111 		swap_free(entry);
112 		if (error == -EEXIST)
113 			INC_CACHE_INFO(exist_race);
114 		return error;
115 	}
116 	INC_CACHE_INFO(add_total);
117 	return 0;
118 }
119 
120 /*
121  * This must be called only on pages that have
122  * been verified to be in the swap cache.
123  */
124 void __delete_from_swap_cache(struct page *page)
125 {
126 	BUG_ON(!PageLocked(page));
127 	BUG_ON(!PageSwapCache(page));
128 	BUG_ON(PageWriteback(page));
129 	BUG_ON(PagePrivate(page));
130 
131 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
132 	set_page_private(page, 0);
133 	ClearPageSwapCache(page);
134 	total_swapcache_pages--;
135 	pagecache_acct(-1);
136 	INC_CACHE_INFO(del_total);
137 }
138 
139 /**
140  * add_to_swap - allocate swap space for a page
141  * @page: page we want to move to swap
142  *
143  * Allocate swap space for the page and add the page to the
144  * swap cache.  Caller needs to hold the page lock.
145  */
146 int add_to_swap(struct page * page, gfp_t gfp_mask)
147 {
148 	swp_entry_t entry;
149 	int err;
150 
151 	if (!PageLocked(page))
152 		BUG();
153 
154 	for (;;) {
155 		entry = get_swap_page();
156 		if (!entry.val)
157 			return 0;
158 
159 		/*
160 		 * Radix-tree node allocations from PF_MEMALLOC contexts could
161 		 * completely exhaust the page allocator. __GFP_NOMEMALLOC
162 		 * stops emergency reserves from being allocated.
163 		 *
164 		 * TODO: this could cause a theoretical memory reclaim
165 		 * deadlock in the swap out path.
166 		 */
167 		/*
168 		 * Add it to the swap cache and mark it dirty
169 		 */
170 		err = __add_to_swap_cache(page, entry,
171 				gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
172 
173 		switch (err) {
174 		case 0:				/* Success */
175 			SetPageUptodate(page);
176 			SetPageDirty(page);
177 			INC_CACHE_INFO(add_total);
178 			return 1;
179 		case -EEXIST:
180 			/* Raced with "speculative" read_swap_cache_async */
181 			INC_CACHE_INFO(exist_race);
182 			swap_free(entry);
183 			continue;
184 		default:
185 			/* -ENOMEM radix-tree allocation failure */
186 			swap_free(entry);
187 			return 0;
188 		}
189 	}
190 }
191 
192 /*
193  * This must be called only on pages that have
194  * been verified to be in the swap cache and locked.
195  * It will never put the page into the free list,
196  * the caller has a reference on the page.
197  */
198 void delete_from_swap_cache(struct page *page)
199 {
200 	swp_entry_t entry;
201 
202 	entry.val = page_private(page);
203 
204 	write_lock_irq(&swapper_space.tree_lock);
205 	__delete_from_swap_cache(page);
206 	write_unlock_irq(&swapper_space.tree_lock);
207 
208 	swap_free(entry);
209 	page_cache_release(page);
210 }
211 
212 /*
213  * Strange swizzling function only for use by shmem_writepage
214  */
215 int move_to_swap_cache(struct page *page, swp_entry_t entry)
216 {
217 	int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
218 	if (!err) {
219 		remove_from_page_cache(page);
220 		page_cache_release(page);	/* pagecache ref */
221 		if (!swap_duplicate(entry))
222 			BUG();
223 		SetPageDirty(page);
224 		INC_CACHE_INFO(add_total);
225 	} else if (err == -EEXIST)
226 		INC_CACHE_INFO(exist_race);
227 	return err;
228 }
229 
230 /*
231  * Strange swizzling function for shmem_getpage (and shmem_unuse)
232  */
233 int move_from_swap_cache(struct page *page, unsigned long index,
234 		struct address_space *mapping)
235 {
236 	int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
237 	if (!err) {
238 		delete_from_swap_cache(page);
239 		/* shift page from clean_pages to dirty_pages list */
240 		ClearPageDirty(page);
241 		set_page_dirty(page);
242 	}
243 	return err;
244 }
245 
246 /*
247  * If we are the only user, then try to free up the swap cache.
248  *
249  * Its ok to check for PageSwapCache without the page lock
250  * here because we are going to recheck again inside
251  * exclusive_swap_page() _with_ the lock.
252  * 					- Marcelo
253  */
254 static inline void free_swap_cache(struct page *page)
255 {
256 	if (PageSwapCache(page) && !TestSetPageLocked(page)) {
257 		remove_exclusive_swap_page(page);
258 		unlock_page(page);
259 	}
260 }
261 
262 /*
263  * Perform a free_page(), also freeing any swap cache associated with
264  * this page if it is the last user of the page.
265  */
266 void free_page_and_swap_cache(struct page *page)
267 {
268 	free_swap_cache(page);
269 	page_cache_release(page);
270 }
271 
272 /*
273  * Passed an array of pages, drop them all from swapcache and then release
274  * them.  They are removed from the LRU and freed if this is their last use.
275  */
276 void free_pages_and_swap_cache(struct page **pages, int nr)
277 {
278 	struct page **pagep = pages;
279 
280 	lru_add_drain();
281 	while (nr) {
282 		int todo = min(nr, PAGEVEC_SIZE);
283 		int i;
284 
285 		for (i = 0; i < todo; i++)
286 			free_swap_cache(pagep[i]);
287 		release_pages(pagep, todo, 0);
288 		pagep += todo;
289 		nr -= todo;
290 	}
291 }
292 
293 /*
294  * Lookup a swap entry in the swap cache. A found page will be returned
295  * unlocked and with its refcount incremented - we rely on the kernel
296  * lock getting page table operations atomic even if we drop the page
297  * lock before returning.
298  */
299 struct page * lookup_swap_cache(swp_entry_t entry)
300 {
301 	struct page *page;
302 
303 	page = find_get_page(&swapper_space, entry.val);
304 
305 	if (page)
306 		INC_CACHE_INFO(find_success);
307 
308 	INC_CACHE_INFO(find_total);
309 	return page;
310 }
311 
312 /*
313  * Locate a page of swap in physical memory, reserving swap cache space
314  * and reading the disk if it is not already cached.
315  * A failure return means that either the page allocation failed or that
316  * the swap entry is no longer in use.
317  */
318 struct page *read_swap_cache_async(swp_entry_t entry,
319 			struct vm_area_struct *vma, unsigned long addr)
320 {
321 	struct page *found_page, *new_page = NULL;
322 	int err;
323 
324 	do {
325 		/*
326 		 * First check the swap cache.  Since this is normally
327 		 * called after lookup_swap_cache() failed, re-calling
328 		 * that would confuse statistics.
329 		 */
330 		found_page = find_get_page(&swapper_space, entry.val);
331 		if (found_page)
332 			break;
333 
334 		/*
335 		 * Get a new page to read into from swap.
336 		 */
337 		if (!new_page) {
338 			new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
339 			if (!new_page)
340 				break;		/* Out of memory */
341 		}
342 
343 		/*
344 		 * Associate the page with swap entry in the swap cache.
345 		 * May fail (-ENOENT) if swap entry has been freed since
346 		 * our caller observed it.  May fail (-EEXIST) if there
347 		 * is already a page associated with this entry in the
348 		 * swap cache: added by a racing read_swap_cache_async,
349 		 * or by try_to_swap_out (or shmem_writepage) re-using
350 		 * the just freed swap entry for an existing page.
351 		 * May fail (-ENOMEM) if radix-tree node allocation failed.
352 		 */
353 		err = add_to_swap_cache(new_page, entry);
354 		if (!err) {
355 			/*
356 			 * Initiate read into locked page and return.
357 			 */
358 			lru_cache_add_active(new_page);
359 			swap_readpage(NULL, new_page);
360 			return new_page;
361 		}
362 	} while (err != -ENOENT && err != -ENOMEM);
363 
364 	if (new_page)
365 		page_cache_release(new_page);
366 	return found_page;
367 }
368