xref: /linux/mm/swap_state.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 
19 #include <asm/pgtable.h>
20 
21 /*
22  * swapper_space is a fiction, retained to simplify the path through
23  * vmscan's shrink_list, to make sync_page look nicer, and to allow
24  * future use of radix_tree tags in the swap cache.
25  */
26 static struct address_space_operations swap_aops = {
27 	.writepage	= swap_writepage,
28 	.sync_page	= block_sync_page,
29 	.set_page_dirty	= __set_page_dirty_nobuffers,
30 	.migratepage	= migrate_page,
31 };
32 
33 static struct backing_dev_info swap_backing_dev_info = {
34 	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
35 	.unplug_io_fn	= swap_unplug_io_fn,
36 };
37 
38 struct address_space swapper_space = {
39 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
40 	.tree_lock	= RW_LOCK_UNLOCKED,
41 	.a_ops		= &swap_aops,
42 	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
43 	.backing_dev_info = &swap_backing_dev_info,
44 };
45 
46 #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
47 
48 static struct {
49 	unsigned long add_total;
50 	unsigned long del_total;
51 	unsigned long find_success;
52 	unsigned long find_total;
53 	unsigned long noent_race;
54 	unsigned long exist_race;
55 } swap_cache_info;
56 
57 void show_swap_cache_info(void)
58 {
59 	printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
60 		swap_cache_info.add_total, swap_cache_info.del_total,
61 		swap_cache_info.find_success, swap_cache_info.find_total,
62 		swap_cache_info.noent_race, swap_cache_info.exist_race);
63 	printk("Free swap  = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
64 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
65 }
66 
67 /*
68  * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
69  * but sets SwapCache flag and private instead of mapping and index.
70  */
71 static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
72 			       gfp_t gfp_mask)
73 {
74 	int error;
75 
76 	BUG_ON(PageSwapCache(page));
77 	BUG_ON(PagePrivate(page));
78 	error = radix_tree_preload(gfp_mask);
79 	if (!error) {
80 		write_lock_irq(&swapper_space.tree_lock);
81 		error = radix_tree_insert(&swapper_space.page_tree,
82 						entry.val, page);
83 		if (!error) {
84 			page_cache_get(page);
85 			SetPageLocked(page);
86 			SetPageSwapCache(page);
87 			set_page_private(page, entry.val);
88 			total_swapcache_pages++;
89 			pagecache_acct(1);
90 		}
91 		write_unlock_irq(&swapper_space.tree_lock);
92 		radix_tree_preload_end();
93 	}
94 	return error;
95 }
96 
97 static int add_to_swap_cache(struct page *page, swp_entry_t entry)
98 {
99 	int error;
100 
101 	if (!swap_duplicate(entry)) {
102 		INC_CACHE_INFO(noent_race);
103 		return -ENOENT;
104 	}
105 	error = __add_to_swap_cache(page, entry, GFP_KERNEL);
106 	/*
107 	 * Anon pages are already on the LRU, we don't run lru_cache_add here.
108 	 */
109 	if (error) {
110 		swap_free(entry);
111 		if (error == -EEXIST)
112 			INC_CACHE_INFO(exist_race);
113 		return error;
114 	}
115 	INC_CACHE_INFO(add_total);
116 	return 0;
117 }
118 
119 /*
120  * This must be called only on pages that have
121  * been verified to be in the swap cache.
122  */
123 void __delete_from_swap_cache(struct page *page)
124 {
125 	BUG_ON(!PageLocked(page));
126 	BUG_ON(!PageSwapCache(page));
127 	BUG_ON(PageWriteback(page));
128 	BUG_ON(PagePrivate(page));
129 
130 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
131 	set_page_private(page, 0);
132 	ClearPageSwapCache(page);
133 	total_swapcache_pages--;
134 	pagecache_acct(-1);
135 	INC_CACHE_INFO(del_total);
136 }
137 
138 /**
139  * add_to_swap - allocate swap space for a page
140  * @page: page we want to move to swap
141  *
142  * Allocate swap space for the page and add the page to the
143  * swap cache.  Caller needs to hold the page lock.
144  */
145 int add_to_swap(struct page * page, gfp_t gfp_mask)
146 {
147 	swp_entry_t entry;
148 	int err;
149 
150 	if (!PageLocked(page))
151 		BUG();
152 
153 	for (;;) {
154 		entry = get_swap_page();
155 		if (!entry.val)
156 			return 0;
157 
158 		/*
159 		 * Radix-tree node allocations from PF_MEMALLOC contexts could
160 		 * completely exhaust the page allocator. __GFP_NOMEMALLOC
161 		 * stops emergency reserves from being allocated.
162 		 *
163 		 * TODO: this could cause a theoretical memory reclaim
164 		 * deadlock in the swap out path.
165 		 */
166 		/*
167 		 * Add it to the swap cache and mark it dirty
168 		 */
169 		err = __add_to_swap_cache(page, entry,
170 				gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
171 
172 		switch (err) {
173 		case 0:				/* Success */
174 			SetPageUptodate(page);
175 			SetPageDirty(page);
176 			INC_CACHE_INFO(add_total);
177 			return 1;
178 		case -EEXIST:
179 			/* Raced with "speculative" read_swap_cache_async */
180 			INC_CACHE_INFO(exist_race);
181 			swap_free(entry);
182 			continue;
183 		default:
184 			/* -ENOMEM radix-tree allocation failure */
185 			swap_free(entry);
186 			return 0;
187 		}
188 	}
189 }
190 
191 /*
192  * This must be called only on pages that have
193  * been verified to be in the swap cache and locked.
194  * It will never put the page into the free list,
195  * the caller has a reference on the page.
196  */
197 void delete_from_swap_cache(struct page *page)
198 {
199 	swp_entry_t entry;
200 
201 	entry.val = page_private(page);
202 
203 	write_lock_irq(&swapper_space.tree_lock);
204 	__delete_from_swap_cache(page);
205 	write_unlock_irq(&swapper_space.tree_lock);
206 
207 	swap_free(entry);
208 	page_cache_release(page);
209 }
210 
211 /*
212  * Strange swizzling function only for use by shmem_writepage
213  */
214 int move_to_swap_cache(struct page *page, swp_entry_t entry)
215 {
216 	int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
217 	if (!err) {
218 		remove_from_page_cache(page);
219 		page_cache_release(page);	/* pagecache ref */
220 		if (!swap_duplicate(entry))
221 			BUG();
222 		SetPageDirty(page);
223 		INC_CACHE_INFO(add_total);
224 	} else if (err == -EEXIST)
225 		INC_CACHE_INFO(exist_race);
226 	return err;
227 }
228 
229 /*
230  * Strange swizzling function for shmem_getpage (and shmem_unuse)
231  */
232 int move_from_swap_cache(struct page *page, unsigned long index,
233 		struct address_space *mapping)
234 {
235 	int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
236 	if (!err) {
237 		delete_from_swap_cache(page);
238 		/* shift page from clean_pages to dirty_pages list */
239 		ClearPageDirty(page);
240 		set_page_dirty(page);
241 	}
242 	return err;
243 }
244 
245 /*
246  * If we are the only user, then try to free up the swap cache.
247  *
248  * Its ok to check for PageSwapCache without the page lock
249  * here because we are going to recheck again inside
250  * exclusive_swap_page() _with_ the lock.
251  * 					- Marcelo
252  */
253 static inline void free_swap_cache(struct page *page)
254 {
255 	if (PageSwapCache(page) && !TestSetPageLocked(page)) {
256 		remove_exclusive_swap_page(page);
257 		unlock_page(page);
258 	}
259 }
260 
261 /*
262  * Perform a free_page(), also freeing any swap cache associated with
263  * this page if it is the last user of the page.
264  */
265 void free_page_and_swap_cache(struct page *page)
266 {
267 	free_swap_cache(page);
268 	page_cache_release(page);
269 }
270 
271 /*
272  * Passed an array of pages, drop them all from swapcache and then release
273  * them.  They are removed from the LRU and freed if this is their last use.
274  */
275 void free_pages_and_swap_cache(struct page **pages, int nr)
276 {
277 	struct page **pagep = pages;
278 
279 	lru_add_drain();
280 	while (nr) {
281 		int todo = min(nr, PAGEVEC_SIZE);
282 		int i;
283 
284 		for (i = 0; i < todo; i++)
285 			free_swap_cache(pagep[i]);
286 		release_pages(pagep, todo, 0);
287 		pagep += todo;
288 		nr -= todo;
289 	}
290 }
291 
292 /*
293  * Lookup a swap entry in the swap cache. A found page will be returned
294  * unlocked and with its refcount incremented - we rely on the kernel
295  * lock getting page table operations atomic even if we drop the page
296  * lock before returning.
297  */
298 struct page * lookup_swap_cache(swp_entry_t entry)
299 {
300 	struct page *page;
301 
302 	page = find_get_page(&swapper_space, entry.val);
303 
304 	if (page)
305 		INC_CACHE_INFO(find_success);
306 
307 	INC_CACHE_INFO(find_total);
308 	return page;
309 }
310 
311 /*
312  * Locate a page of swap in physical memory, reserving swap cache space
313  * and reading the disk if it is not already cached.
314  * A failure return means that either the page allocation failed or that
315  * the swap entry is no longer in use.
316  */
317 struct page *read_swap_cache_async(swp_entry_t entry,
318 			struct vm_area_struct *vma, unsigned long addr)
319 {
320 	struct page *found_page, *new_page = NULL;
321 	int err;
322 
323 	do {
324 		/*
325 		 * First check the swap cache.  Since this is normally
326 		 * called after lookup_swap_cache() failed, re-calling
327 		 * that would confuse statistics.
328 		 */
329 		found_page = find_get_page(&swapper_space, entry.val);
330 		if (found_page)
331 			break;
332 
333 		/*
334 		 * Get a new page to read into from swap.
335 		 */
336 		if (!new_page) {
337 			new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
338 			if (!new_page)
339 				break;		/* Out of memory */
340 		}
341 
342 		/*
343 		 * Associate the page with swap entry in the swap cache.
344 		 * May fail (-ENOENT) if swap entry has been freed since
345 		 * our caller observed it.  May fail (-EEXIST) if there
346 		 * is already a page associated with this entry in the
347 		 * swap cache: added by a racing read_swap_cache_async,
348 		 * or by try_to_swap_out (or shmem_writepage) re-using
349 		 * the just freed swap entry for an existing page.
350 		 * May fail (-ENOMEM) if radix-tree node allocation failed.
351 		 */
352 		err = add_to_swap_cache(new_page, entry);
353 		if (!err) {
354 			/*
355 			 * Initiate read into locked page and return.
356 			 */
357 			lru_cache_add_active(new_page);
358 			swap_readpage(NULL, new_page);
359 			return new_page;
360 		}
361 	} while (err != -ENOENT && err != -ENOMEM);
362 
363 	if (new_page)
364 		page_cache_release(new_page);
365 	return found_page;
366 }
367