xref: /linux/mm/filemap.c (revision 00a6d7b6762c27d441e9ac8faff36384bc0fc180)
1 /*
2  *	linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6 
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/export.h>
13 #include <linux/compiler.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/aio.h>
17 #include <linux/capability.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/security.h>
32 #include <linux/cpuset.h>
33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
34 #include <linux/memcontrol.h>
35 #include <linux/cleancache.h>
36 #include <linux/rmap.h>
37 #include "internal.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/filemap.h>
41 
42 /*
43  * FIXME: remove all knowledge of the buffer layer from the core VM
44  */
45 #include <linux/buffer_head.h> /* for try_to_free_buffers */
46 
47 #include <asm/mman.h>
48 
49 /*
50  * Shared mappings implemented 30.11.1994. It's not fully working yet,
51  * though.
52  *
53  * Shared mappings now work. 15.8.1995  Bruno.
54  *
55  * finished 'unifying' the page and buffer cache and SMP-threaded the
56  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
57  *
58  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
59  */
60 
61 /*
62  * Lock ordering:
63  *
64  *  ->i_mmap_mutex		(truncate_pagecache)
65  *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
66  *      ->swap_lock		(exclusive_swap_page, others)
67  *        ->mapping->tree_lock
68  *
69  *  ->i_mutex
70  *    ->i_mmap_mutex		(truncate->unmap_mapping_range)
71  *
72  *  ->mmap_sem
73  *    ->i_mmap_mutex
74  *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
75  *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
76  *
77  *  ->mmap_sem
78  *    ->lock_page		(access_process_vm)
79  *
80  *  ->i_mutex			(generic_perform_write)
81  *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
82  *
83  *  bdi->wb.list_lock
84  *    sb_lock			(fs/fs-writeback.c)
85  *    ->mapping->tree_lock	(__sync_single_inode)
86  *
87  *  ->i_mmap_mutex
88  *    ->anon_vma.lock		(vma_adjust)
89  *
90  *  ->anon_vma.lock
91  *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
92  *
93  *  ->page_table_lock or pte_lock
94  *    ->swap_lock		(try_to_unmap_one)
95  *    ->private_lock		(try_to_unmap_one)
96  *    ->tree_lock		(try_to_unmap_one)
97  *    ->zone.lru_lock		(follow_page->mark_page_accessed)
98  *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
99  *    ->private_lock		(page_remove_rmap->set_page_dirty)
100  *    ->tree_lock		(page_remove_rmap->set_page_dirty)
101  *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
102  *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
103  *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
104  *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
105  *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
106  *
107  * ->i_mmap_mutex
108  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
109  */
110 
111 static void page_cache_tree_delete(struct address_space *mapping,
112 				   struct page *page, void *shadow)
113 {
114 	struct radix_tree_node *node;
115 	unsigned long index;
116 	unsigned int offset;
117 	unsigned int tag;
118 	void **slot;
119 
120 	VM_BUG_ON(!PageLocked(page));
121 
122 	__radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
123 
124 	if (shadow) {
125 		mapping->nrshadows++;
126 		/*
127 		 * Make sure the nrshadows update is committed before
128 		 * the nrpages update so that final truncate racing
129 		 * with reclaim does not see both counters 0 at the
130 		 * same time and miss a shadow entry.
131 		 */
132 		smp_wmb();
133 	}
134 	mapping->nrpages--;
135 
136 	if (!node) {
137 		/* Clear direct pointer tags in root node */
138 		mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
139 		radix_tree_replace_slot(slot, shadow);
140 		return;
141 	}
142 
143 	/* Clear tree tags for the removed page */
144 	index = page->index;
145 	offset = index & RADIX_TREE_MAP_MASK;
146 	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
147 		if (test_bit(offset, node->tags[tag]))
148 			radix_tree_tag_clear(&mapping->page_tree, index, tag);
149 	}
150 
151 	/* Delete page, swap shadow entry */
152 	radix_tree_replace_slot(slot, shadow);
153 	workingset_node_pages_dec(node);
154 	if (shadow)
155 		workingset_node_shadows_inc(node);
156 	else
157 		if (__radix_tree_delete_node(&mapping->page_tree, node))
158 			return;
159 
160 	/*
161 	 * Track node that only contains shadow entries.
162 	 *
163 	 * Avoid acquiring the list_lru lock if already tracked.  The
164 	 * list_empty() test is safe as node->private_list is
165 	 * protected by mapping->tree_lock.
166 	 */
167 	if (!workingset_node_pages(node) &&
168 	    list_empty(&node->private_list)) {
169 		node->private_data = mapping;
170 		list_lru_add(&workingset_shadow_nodes, &node->private_list);
171 	}
172 }
173 
174 /*
175  * Delete a page from the page cache and free it. Caller has to make
176  * sure the page is locked and that nobody else uses it - or that usage
177  * is safe.  The caller must hold the mapping's tree_lock.
178  */
179 void __delete_from_page_cache(struct page *page, void *shadow)
180 {
181 	struct address_space *mapping = page->mapping;
182 
183 	trace_mm_filemap_delete_from_page_cache(page);
184 	/*
185 	 * if we're uptodate, flush out into the cleancache, otherwise
186 	 * invalidate any existing cleancache entries.  We can't leave
187 	 * stale data around in the cleancache once our page is gone
188 	 */
189 	if (PageUptodate(page) && PageMappedToDisk(page))
190 		cleancache_put_page(page);
191 	else
192 		cleancache_invalidate_page(mapping, page);
193 
194 	page_cache_tree_delete(mapping, page, shadow);
195 
196 	page->mapping = NULL;
197 	/* Leave page->index set: truncation lookup relies upon it */
198 
199 	__dec_zone_page_state(page, NR_FILE_PAGES);
200 	if (PageSwapBacked(page))
201 		__dec_zone_page_state(page, NR_SHMEM);
202 	BUG_ON(page_mapped(page));
203 
204 	/*
205 	 * Some filesystems seem to re-dirty the page even after
206 	 * the VM has canceled the dirty bit (eg ext3 journaling).
207 	 *
208 	 * Fix it up by doing a final dirty accounting check after
209 	 * having removed the page entirely.
210 	 */
211 	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
212 		dec_zone_page_state(page, NR_FILE_DIRTY);
213 		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
214 	}
215 }
216 
217 /**
218  * delete_from_page_cache - delete page from page cache
219  * @page: the page which the kernel is trying to remove from page cache
220  *
221  * This must be called only on pages that have been verified to be in the page
222  * cache and locked.  It will never put the page into the free list, the caller
223  * has a reference on the page.
224  */
225 void delete_from_page_cache(struct page *page)
226 {
227 	struct address_space *mapping = page->mapping;
228 	void (*freepage)(struct page *);
229 
230 	BUG_ON(!PageLocked(page));
231 
232 	freepage = mapping->a_ops->freepage;
233 	spin_lock_irq(&mapping->tree_lock);
234 	__delete_from_page_cache(page, NULL);
235 	spin_unlock_irq(&mapping->tree_lock);
236 	mem_cgroup_uncharge_cache_page(page);
237 
238 	if (freepage)
239 		freepage(page);
240 	page_cache_release(page);
241 }
242 EXPORT_SYMBOL(delete_from_page_cache);
243 
244 static int sleep_on_page(void *word)
245 {
246 	io_schedule();
247 	return 0;
248 }
249 
250 static int sleep_on_page_killable(void *word)
251 {
252 	sleep_on_page(word);
253 	return fatal_signal_pending(current) ? -EINTR : 0;
254 }
255 
256 static int filemap_check_errors(struct address_space *mapping)
257 {
258 	int ret = 0;
259 	/* Check for outstanding write errors */
260 	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
261 		ret = -ENOSPC;
262 	if (test_and_clear_bit(AS_EIO, &mapping->flags))
263 		ret = -EIO;
264 	return ret;
265 }
266 
267 /**
268  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
269  * @mapping:	address space structure to write
270  * @start:	offset in bytes where the range starts
271  * @end:	offset in bytes where the range ends (inclusive)
272  * @sync_mode:	enable synchronous operation
273  *
274  * Start writeback against all of a mapping's dirty pages that lie
275  * within the byte offsets <start, end> inclusive.
276  *
277  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
278  * opposed to a regular memory cleansing writeback.  The difference between
279  * these two operations is that if a dirty page/buffer is encountered, it must
280  * be waited upon, and not just skipped over.
281  */
282 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
283 				loff_t end, int sync_mode)
284 {
285 	int ret;
286 	struct writeback_control wbc = {
287 		.sync_mode = sync_mode,
288 		.nr_to_write = LONG_MAX,
289 		.range_start = start,
290 		.range_end = end,
291 	};
292 
293 	if (!mapping_cap_writeback_dirty(mapping))
294 		return 0;
295 
296 	ret = do_writepages(mapping, &wbc);
297 	return ret;
298 }
299 
300 static inline int __filemap_fdatawrite(struct address_space *mapping,
301 	int sync_mode)
302 {
303 	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
304 }
305 
306 int filemap_fdatawrite(struct address_space *mapping)
307 {
308 	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
309 }
310 EXPORT_SYMBOL(filemap_fdatawrite);
311 
312 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
313 				loff_t end)
314 {
315 	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
316 }
317 EXPORT_SYMBOL(filemap_fdatawrite_range);
318 
319 /**
320  * filemap_flush - mostly a non-blocking flush
321  * @mapping:	target address_space
322  *
323  * This is a mostly non-blocking flush.  Not suitable for data-integrity
324  * purposes - I/O may not be started against all dirty pages.
325  */
326 int filemap_flush(struct address_space *mapping)
327 {
328 	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
329 }
330 EXPORT_SYMBOL(filemap_flush);
331 
332 /**
333  * filemap_fdatawait_range - wait for writeback to complete
334  * @mapping:		address space structure to wait for
335  * @start_byte:		offset in bytes where the range starts
336  * @end_byte:		offset in bytes where the range ends (inclusive)
337  *
338  * Walk the list of under-writeback pages of the given address space
339  * in the given range and wait for all of them.
340  */
341 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
342 			    loff_t end_byte)
343 {
344 	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
345 	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
346 	struct pagevec pvec;
347 	int nr_pages;
348 	int ret2, ret = 0;
349 
350 	if (end_byte < start_byte)
351 		goto out;
352 
353 	pagevec_init(&pvec, 0);
354 	while ((index <= end) &&
355 			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
356 			PAGECACHE_TAG_WRITEBACK,
357 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
358 		unsigned i;
359 
360 		for (i = 0; i < nr_pages; i++) {
361 			struct page *page = pvec.pages[i];
362 
363 			/* until radix tree lookup accepts end_index */
364 			if (page->index > end)
365 				continue;
366 
367 			wait_on_page_writeback(page);
368 			if (TestClearPageError(page))
369 				ret = -EIO;
370 		}
371 		pagevec_release(&pvec);
372 		cond_resched();
373 	}
374 out:
375 	ret2 = filemap_check_errors(mapping);
376 	if (!ret)
377 		ret = ret2;
378 
379 	return ret;
380 }
381 EXPORT_SYMBOL(filemap_fdatawait_range);
382 
383 /**
384  * filemap_fdatawait - wait for all under-writeback pages to complete
385  * @mapping: address space structure to wait for
386  *
387  * Walk the list of under-writeback pages of the given address space
388  * and wait for all of them.
389  */
390 int filemap_fdatawait(struct address_space *mapping)
391 {
392 	loff_t i_size = i_size_read(mapping->host);
393 
394 	if (i_size == 0)
395 		return 0;
396 
397 	return filemap_fdatawait_range(mapping, 0, i_size - 1);
398 }
399 EXPORT_SYMBOL(filemap_fdatawait);
400 
401 int filemap_write_and_wait(struct address_space *mapping)
402 {
403 	int err = 0;
404 
405 	if (mapping->nrpages) {
406 		err = filemap_fdatawrite(mapping);
407 		/*
408 		 * Even if the above returned error, the pages may be
409 		 * written partially (e.g. -ENOSPC), so we wait for it.
410 		 * But the -EIO is special case, it may indicate the worst
411 		 * thing (e.g. bug) happened, so we avoid waiting for it.
412 		 */
413 		if (err != -EIO) {
414 			int err2 = filemap_fdatawait(mapping);
415 			if (!err)
416 				err = err2;
417 		}
418 	} else {
419 		err = filemap_check_errors(mapping);
420 	}
421 	return err;
422 }
423 EXPORT_SYMBOL(filemap_write_and_wait);
424 
425 /**
426  * filemap_write_and_wait_range - write out & wait on a file range
427  * @mapping:	the address_space for the pages
428  * @lstart:	offset in bytes where the range starts
429  * @lend:	offset in bytes where the range ends (inclusive)
430  *
431  * Write out and wait upon file offsets lstart->lend, inclusive.
432  *
433  * Note that `lend' is inclusive (describes the last byte to be written) so
434  * that this function can be used to write to the very end-of-file (end = -1).
435  */
436 int filemap_write_and_wait_range(struct address_space *mapping,
437 				 loff_t lstart, loff_t lend)
438 {
439 	int err = 0;
440 
441 	if (mapping->nrpages) {
442 		err = __filemap_fdatawrite_range(mapping, lstart, lend,
443 						 WB_SYNC_ALL);
444 		/* See comment of filemap_write_and_wait() */
445 		if (err != -EIO) {
446 			int err2 = filemap_fdatawait_range(mapping,
447 						lstart, lend);
448 			if (!err)
449 				err = err2;
450 		}
451 	} else {
452 		err = filemap_check_errors(mapping);
453 	}
454 	return err;
455 }
456 EXPORT_SYMBOL(filemap_write_and_wait_range);
457 
458 /**
459  * replace_page_cache_page - replace a pagecache page with a new one
460  * @old:	page to be replaced
461  * @new:	page to replace with
462  * @gfp_mask:	allocation mode
463  *
464  * This function replaces a page in the pagecache with a new one.  On
465  * success it acquires the pagecache reference for the new page and
466  * drops it for the old page.  Both the old and new pages must be
467  * locked.  This function does not add the new page to the LRU, the
468  * caller must do that.
469  *
470  * The remove + add is atomic.  The only way this function can fail is
471  * memory allocation failure.
472  */
473 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
474 {
475 	int error;
476 
477 	VM_BUG_ON_PAGE(!PageLocked(old), old);
478 	VM_BUG_ON_PAGE(!PageLocked(new), new);
479 	VM_BUG_ON_PAGE(new->mapping, new);
480 
481 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
482 	if (!error) {
483 		struct address_space *mapping = old->mapping;
484 		void (*freepage)(struct page *);
485 
486 		pgoff_t offset = old->index;
487 		freepage = mapping->a_ops->freepage;
488 
489 		page_cache_get(new);
490 		new->mapping = mapping;
491 		new->index = offset;
492 
493 		spin_lock_irq(&mapping->tree_lock);
494 		__delete_from_page_cache(old, NULL);
495 		error = radix_tree_insert(&mapping->page_tree, offset, new);
496 		BUG_ON(error);
497 		mapping->nrpages++;
498 		__inc_zone_page_state(new, NR_FILE_PAGES);
499 		if (PageSwapBacked(new))
500 			__inc_zone_page_state(new, NR_SHMEM);
501 		spin_unlock_irq(&mapping->tree_lock);
502 		/* mem_cgroup codes must not be called under tree_lock */
503 		mem_cgroup_replace_page_cache(old, new);
504 		radix_tree_preload_end();
505 		if (freepage)
506 			freepage(old);
507 		page_cache_release(old);
508 	}
509 
510 	return error;
511 }
512 EXPORT_SYMBOL_GPL(replace_page_cache_page);
513 
514 static int page_cache_tree_insert(struct address_space *mapping,
515 				  struct page *page, void **shadowp)
516 {
517 	struct radix_tree_node *node;
518 	void **slot;
519 	int error;
520 
521 	error = __radix_tree_create(&mapping->page_tree, page->index,
522 				    &node, &slot);
523 	if (error)
524 		return error;
525 	if (*slot) {
526 		void *p;
527 
528 		p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
529 		if (!radix_tree_exceptional_entry(p))
530 			return -EEXIST;
531 		if (shadowp)
532 			*shadowp = p;
533 		mapping->nrshadows--;
534 		if (node)
535 			workingset_node_shadows_dec(node);
536 	}
537 	radix_tree_replace_slot(slot, page);
538 	mapping->nrpages++;
539 	if (node) {
540 		workingset_node_pages_inc(node);
541 		/*
542 		 * Don't track node that contains actual pages.
543 		 *
544 		 * Avoid acquiring the list_lru lock if already
545 		 * untracked.  The list_empty() test is safe as
546 		 * node->private_list is protected by
547 		 * mapping->tree_lock.
548 		 */
549 		if (!list_empty(&node->private_list))
550 			list_lru_del(&workingset_shadow_nodes,
551 				     &node->private_list);
552 	}
553 	return 0;
554 }
555 
556 static int __add_to_page_cache_locked(struct page *page,
557 				      struct address_space *mapping,
558 				      pgoff_t offset, gfp_t gfp_mask,
559 				      void **shadowp)
560 {
561 	int error;
562 
563 	VM_BUG_ON_PAGE(!PageLocked(page), page);
564 	VM_BUG_ON_PAGE(PageSwapBacked(page), page);
565 
566 	error = mem_cgroup_charge_file(page, current->mm,
567 					gfp_mask & GFP_RECLAIM_MASK);
568 	if (error)
569 		return error;
570 
571 	error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
572 	if (error) {
573 		mem_cgroup_uncharge_cache_page(page);
574 		return error;
575 	}
576 
577 	page_cache_get(page);
578 	page->mapping = mapping;
579 	page->index = offset;
580 
581 	spin_lock_irq(&mapping->tree_lock);
582 	error = page_cache_tree_insert(mapping, page, shadowp);
583 	radix_tree_preload_end();
584 	if (unlikely(error))
585 		goto err_insert;
586 	__inc_zone_page_state(page, NR_FILE_PAGES);
587 	spin_unlock_irq(&mapping->tree_lock);
588 	trace_mm_filemap_add_to_page_cache(page);
589 	return 0;
590 err_insert:
591 	page->mapping = NULL;
592 	/* Leave page->index set: truncation relies upon it */
593 	spin_unlock_irq(&mapping->tree_lock);
594 	mem_cgroup_uncharge_cache_page(page);
595 	page_cache_release(page);
596 	return error;
597 }
598 
599 /**
600  * add_to_page_cache_locked - add a locked page to the pagecache
601  * @page:	page to add
602  * @mapping:	the page's address_space
603  * @offset:	page index
604  * @gfp_mask:	page allocation mode
605  *
606  * This function is used to add a page to the pagecache. It must be locked.
607  * This function does not add the page to the LRU.  The caller must do that.
608  */
609 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
610 		pgoff_t offset, gfp_t gfp_mask)
611 {
612 	return __add_to_page_cache_locked(page, mapping, offset,
613 					  gfp_mask, NULL);
614 }
615 EXPORT_SYMBOL(add_to_page_cache_locked);
616 
617 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
618 				pgoff_t offset, gfp_t gfp_mask)
619 {
620 	void *shadow = NULL;
621 	int ret;
622 
623 	__set_page_locked(page);
624 	ret = __add_to_page_cache_locked(page, mapping, offset,
625 					 gfp_mask, &shadow);
626 	if (unlikely(ret))
627 		__clear_page_locked(page);
628 	else {
629 		/*
630 		 * The page might have been evicted from cache only
631 		 * recently, in which case it should be activated like
632 		 * any other repeatedly accessed page.
633 		 */
634 		if (shadow && workingset_refault(shadow)) {
635 			SetPageActive(page);
636 			workingset_activation(page);
637 		} else
638 			ClearPageActive(page);
639 		lru_cache_add(page);
640 	}
641 	return ret;
642 }
643 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
644 
645 #ifdef CONFIG_NUMA
646 struct page *__page_cache_alloc(gfp_t gfp)
647 {
648 	int n;
649 	struct page *page;
650 
651 	if (cpuset_do_page_mem_spread()) {
652 		unsigned int cpuset_mems_cookie;
653 		do {
654 			cpuset_mems_cookie = read_mems_allowed_begin();
655 			n = cpuset_mem_spread_node();
656 			page = alloc_pages_exact_node(n, gfp, 0);
657 		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
658 
659 		return page;
660 	}
661 	return alloc_pages(gfp, 0);
662 }
663 EXPORT_SYMBOL(__page_cache_alloc);
664 #endif
665 
666 /*
667  * In order to wait for pages to become available there must be
668  * waitqueues associated with pages. By using a hash table of
669  * waitqueues where the bucket discipline is to maintain all
670  * waiters on the same queue and wake all when any of the pages
671  * become available, and for the woken contexts to check to be
672  * sure the appropriate page became available, this saves space
673  * at a cost of "thundering herd" phenomena during rare hash
674  * collisions.
675  */
676 static wait_queue_head_t *page_waitqueue(struct page *page)
677 {
678 	const struct zone *zone = page_zone(page);
679 
680 	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
681 }
682 
683 static inline void wake_up_page(struct page *page, int bit)
684 {
685 	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
686 }
687 
688 void wait_on_page_bit(struct page *page, int bit_nr)
689 {
690 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
691 
692 	if (test_bit(bit_nr, &page->flags))
693 		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
694 							TASK_UNINTERRUPTIBLE);
695 }
696 EXPORT_SYMBOL(wait_on_page_bit);
697 
698 int wait_on_page_bit_killable(struct page *page, int bit_nr)
699 {
700 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
701 
702 	if (!test_bit(bit_nr, &page->flags))
703 		return 0;
704 
705 	return __wait_on_bit(page_waitqueue(page), &wait,
706 			     sleep_on_page_killable, TASK_KILLABLE);
707 }
708 
709 /**
710  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
711  * @page: Page defining the wait queue of interest
712  * @waiter: Waiter to add to the queue
713  *
714  * Add an arbitrary @waiter to the wait queue for the nominated @page.
715  */
716 void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
717 {
718 	wait_queue_head_t *q = page_waitqueue(page);
719 	unsigned long flags;
720 
721 	spin_lock_irqsave(&q->lock, flags);
722 	__add_wait_queue(q, waiter);
723 	spin_unlock_irqrestore(&q->lock, flags);
724 }
725 EXPORT_SYMBOL_GPL(add_page_wait_queue);
726 
727 /**
728  * unlock_page - unlock a locked page
729  * @page: the page
730  *
731  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
732  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
733  * mechananism between PageLocked pages and PageWriteback pages is shared.
734  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
735  *
736  * The mb is necessary to enforce ordering between the clear_bit and the read
737  * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
738  */
739 void unlock_page(struct page *page)
740 {
741 	VM_BUG_ON_PAGE(!PageLocked(page), page);
742 	clear_bit_unlock(PG_locked, &page->flags);
743 	smp_mb__after_clear_bit();
744 	wake_up_page(page, PG_locked);
745 }
746 EXPORT_SYMBOL(unlock_page);
747 
748 /**
749  * end_page_writeback - end writeback against a page
750  * @page: the page
751  */
752 void end_page_writeback(struct page *page)
753 {
754 	if (TestClearPageReclaim(page))
755 		rotate_reclaimable_page(page);
756 
757 	if (!test_clear_page_writeback(page))
758 		BUG();
759 
760 	smp_mb__after_clear_bit();
761 	wake_up_page(page, PG_writeback);
762 }
763 EXPORT_SYMBOL(end_page_writeback);
764 
765 /**
766  * __lock_page - get a lock on the page, assuming we need to sleep to get it
767  * @page: the page to lock
768  */
769 void __lock_page(struct page *page)
770 {
771 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
772 
773 	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
774 							TASK_UNINTERRUPTIBLE);
775 }
776 EXPORT_SYMBOL(__lock_page);
777 
778 int __lock_page_killable(struct page *page)
779 {
780 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
781 
782 	return __wait_on_bit_lock(page_waitqueue(page), &wait,
783 					sleep_on_page_killable, TASK_KILLABLE);
784 }
785 EXPORT_SYMBOL_GPL(__lock_page_killable);
786 
787 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
788 			 unsigned int flags)
789 {
790 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
791 		/*
792 		 * CAUTION! In this case, mmap_sem is not released
793 		 * even though return 0.
794 		 */
795 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
796 			return 0;
797 
798 		up_read(&mm->mmap_sem);
799 		if (flags & FAULT_FLAG_KILLABLE)
800 			wait_on_page_locked_killable(page);
801 		else
802 			wait_on_page_locked(page);
803 		return 0;
804 	} else {
805 		if (flags & FAULT_FLAG_KILLABLE) {
806 			int ret;
807 
808 			ret = __lock_page_killable(page);
809 			if (ret) {
810 				up_read(&mm->mmap_sem);
811 				return 0;
812 			}
813 		} else
814 			__lock_page(page);
815 		return 1;
816 	}
817 }
818 
819 /**
820  * page_cache_next_hole - find the next hole (not-present entry)
821  * @mapping: mapping
822  * @index: index
823  * @max_scan: maximum range to search
824  *
825  * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
826  * lowest indexed hole.
827  *
828  * Returns: the index of the hole if found, otherwise returns an index
829  * outside of the set specified (in which case 'return - index >=
830  * max_scan' will be true). In rare cases of index wrap-around, 0 will
831  * be returned.
832  *
833  * page_cache_next_hole may be called under rcu_read_lock. However,
834  * like radix_tree_gang_lookup, this will not atomically search a
835  * snapshot of the tree at a single point in time. For example, if a
836  * hole is created at index 5, then subsequently a hole is created at
837  * index 10, page_cache_next_hole covering both indexes may return 10
838  * if called under rcu_read_lock.
839  */
840 pgoff_t page_cache_next_hole(struct address_space *mapping,
841 			     pgoff_t index, unsigned long max_scan)
842 {
843 	unsigned long i;
844 
845 	for (i = 0; i < max_scan; i++) {
846 		struct page *page;
847 
848 		page = radix_tree_lookup(&mapping->page_tree, index);
849 		if (!page || radix_tree_exceptional_entry(page))
850 			break;
851 		index++;
852 		if (index == 0)
853 			break;
854 	}
855 
856 	return index;
857 }
858 EXPORT_SYMBOL(page_cache_next_hole);
859 
860 /**
861  * page_cache_prev_hole - find the prev hole (not-present entry)
862  * @mapping: mapping
863  * @index: index
864  * @max_scan: maximum range to search
865  *
866  * Search backwards in the range [max(index-max_scan+1, 0), index] for
867  * the first hole.
868  *
869  * Returns: the index of the hole if found, otherwise returns an index
870  * outside of the set specified (in which case 'index - return >=
871  * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
872  * will be returned.
873  *
874  * page_cache_prev_hole may be called under rcu_read_lock. However,
875  * like radix_tree_gang_lookup, this will not atomically search a
876  * snapshot of the tree at a single point in time. For example, if a
877  * hole is created at index 10, then subsequently a hole is created at
878  * index 5, page_cache_prev_hole covering both indexes may return 5 if
879  * called under rcu_read_lock.
880  */
881 pgoff_t page_cache_prev_hole(struct address_space *mapping,
882 			     pgoff_t index, unsigned long max_scan)
883 {
884 	unsigned long i;
885 
886 	for (i = 0; i < max_scan; i++) {
887 		struct page *page;
888 
889 		page = radix_tree_lookup(&mapping->page_tree, index);
890 		if (!page || radix_tree_exceptional_entry(page))
891 			break;
892 		index--;
893 		if (index == ULONG_MAX)
894 			break;
895 	}
896 
897 	return index;
898 }
899 EXPORT_SYMBOL(page_cache_prev_hole);
900 
901 /**
902  * find_get_entry - find and get a page cache entry
903  * @mapping: the address_space to search
904  * @offset: the page cache index
905  *
906  * Looks up the page cache slot at @mapping & @offset.  If there is a
907  * page cache page, it is returned with an increased refcount.
908  *
909  * If the slot holds a shadow entry of a previously evicted page, or a
910  * swap entry from shmem/tmpfs, it is returned.
911  *
912  * Otherwise, %NULL is returned.
913  */
914 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
915 {
916 	void **pagep;
917 	struct page *page;
918 
919 	rcu_read_lock();
920 repeat:
921 	page = NULL;
922 	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
923 	if (pagep) {
924 		page = radix_tree_deref_slot(pagep);
925 		if (unlikely(!page))
926 			goto out;
927 		if (radix_tree_exception(page)) {
928 			if (radix_tree_deref_retry(page))
929 				goto repeat;
930 			/*
931 			 * A shadow entry of a recently evicted page,
932 			 * or a swap entry from shmem/tmpfs.  Return
933 			 * it without attempting to raise page count.
934 			 */
935 			goto out;
936 		}
937 		if (!page_cache_get_speculative(page))
938 			goto repeat;
939 
940 		/*
941 		 * Has the page moved?
942 		 * This is part of the lockless pagecache protocol. See
943 		 * include/linux/pagemap.h for details.
944 		 */
945 		if (unlikely(page != *pagep)) {
946 			page_cache_release(page);
947 			goto repeat;
948 		}
949 	}
950 out:
951 	rcu_read_unlock();
952 
953 	return page;
954 }
955 EXPORT_SYMBOL(find_get_entry);
956 
957 /**
958  * find_get_page - find and get a page reference
959  * @mapping: the address_space to search
960  * @offset: the page index
961  *
962  * Looks up the page cache slot at @mapping & @offset.  If there is a
963  * page cache page, it is returned with an increased refcount.
964  *
965  * Otherwise, %NULL is returned.
966  */
967 struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
968 {
969 	struct page *page = find_get_entry(mapping, offset);
970 
971 	if (radix_tree_exceptional_entry(page))
972 		page = NULL;
973 	return page;
974 }
975 EXPORT_SYMBOL(find_get_page);
976 
977 /**
978  * find_lock_entry - locate, pin and lock a page cache entry
979  * @mapping: the address_space to search
980  * @offset: the page cache index
981  *
982  * Looks up the page cache slot at @mapping & @offset.  If there is a
983  * page cache page, it is returned locked and with an increased
984  * refcount.
985  *
986  * If the slot holds a shadow entry of a previously evicted page, or a
987  * swap entry from shmem/tmpfs, it is returned.
988  *
989  * Otherwise, %NULL is returned.
990  *
991  * find_lock_entry() may sleep.
992  */
993 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
994 {
995 	struct page *page;
996 
997 repeat:
998 	page = find_get_entry(mapping, offset);
999 	if (page && !radix_tree_exception(page)) {
1000 		lock_page(page);
1001 		/* Has the page been truncated? */
1002 		if (unlikely(page->mapping != mapping)) {
1003 			unlock_page(page);
1004 			page_cache_release(page);
1005 			goto repeat;
1006 		}
1007 		VM_BUG_ON_PAGE(page->index != offset, page);
1008 	}
1009 	return page;
1010 }
1011 EXPORT_SYMBOL(find_lock_entry);
1012 
1013 /**
1014  * find_lock_page - locate, pin and lock a pagecache page
1015  * @mapping: the address_space to search
1016  * @offset: the page index
1017  *
1018  * Looks up the page cache slot at @mapping & @offset.  If there is a
1019  * page cache page, it is returned locked and with an increased
1020  * refcount.
1021  *
1022  * Otherwise, %NULL is returned.
1023  *
1024  * find_lock_page() may sleep.
1025  */
1026 struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
1027 {
1028 	struct page *page = find_lock_entry(mapping, offset);
1029 
1030 	if (radix_tree_exceptional_entry(page))
1031 		page = NULL;
1032 	return page;
1033 }
1034 EXPORT_SYMBOL(find_lock_page);
1035 
1036 /**
1037  * find_or_create_page - locate or add a pagecache page
1038  * @mapping: the page's address_space
1039  * @index: the page's index into the mapping
1040  * @gfp_mask: page allocation mode
1041  *
1042  * Looks up the page cache slot at @mapping & @offset.  If there is a
1043  * page cache page, it is returned locked and with an increased
1044  * refcount.
1045  *
1046  * If the page is not present, a new page is allocated using @gfp_mask
1047  * and added to the page cache and the VM's LRU list.  The page is
1048  * returned locked and with an increased refcount.
1049  *
1050  * On memory exhaustion, %NULL is returned.
1051  *
1052  * find_or_create_page() may sleep, even if @gfp_flags specifies an
1053  * atomic allocation!
1054  */
1055 struct page *find_or_create_page(struct address_space *mapping,
1056 		pgoff_t index, gfp_t gfp_mask)
1057 {
1058 	struct page *page;
1059 	int err;
1060 repeat:
1061 	page = find_lock_page(mapping, index);
1062 	if (!page) {
1063 		page = __page_cache_alloc(gfp_mask);
1064 		if (!page)
1065 			return NULL;
1066 		/*
1067 		 * We want a regular kernel memory (not highmem or DMA etc)
1068 		 * allocation for the radix tree nodes, but we need to honour
1069 		 * the context-specific requirements the caller has asked for.
1070 		 * GFP_RECLAIM_MASK collects those requirements.
1071 		 */
1072 		err = add_to_page_cache_lru(page, mapping, index,
1073 			(gfp_mask & GFP_RECLAIM_MASK));
1074 		if (unlikely(err)) {
1075 			page_cache_release(page);
1076 			page = NULL;
1077 			if (err == -EEXIST)
1078 				goto repeat;
1079 		}
1080 	}
1081 	return page;
1082 }
1083 EXPORT_SYMBOL(find_or_create_page);
1084 
1085 /**
1086  * find_get_entries - gang pagecache lookup
1087  * @mapping:	The address_space to search
1088  * @start:	The starting page cache index
1089  * @nr_entries:	The maximum number of entries
1090  * @entries:	Where the resulting entries are placed
1091  * @indices:	The cache indices corresponding to the entries in @entries
1092  *
1093  * find_get_entries() will search for and return a group of up to
1094  * @nr_entries entries in the mapping.  The entries are placed at
1095  * @entries.  find_get_entries() takes a reference against any actual
1096  * pages it returns.
1097  *
1098  * The search returns a group of mapping-contiguous page cache entries
1099  * with ascending indexes.  There may be holes in the indices due to
1100  * not-present pages.
1101  *
1102  * Any shadow entries of evicted pages, or swap entries from
1103  * shmem/tmpfs, are included in the returned array.
1104  *
1105  * find_get_entries() returns the number of pages and shadow entries
1106  * which were found.
1107  */
1108 unsigned find_get_entries(struct address_space *mapping,
1109 			  pgoff_t start, unsigned int nr_entries,
1110 			  struct page **entries, pgoff_t *indices)
1111 {
1112 	void **slot;
1113 	unsigned int ret = 0;
1114 	struct radix_tree_iter iter;
1115 
1116 	if (!nr_entries)
1117 		return 0;
1118 
1119 	rcu_read_lock();
1120 restart:
1121 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1122 		struct page *page;
1123 repeat:
1124 		page = radix_tree_deref_slot(slot);
1125 		if (unlikely(!page))
1126 			continue;
1127 		if (radix_tree_exception(page)) {
1128 			if (radix_tree_deref_retry(page))
1129 				goto restart;
1130 			/*
1131 			 * A shadow entry of a recently evicted page,
1132 			 * or a swap entry from shmem/tmpfs.  Return
1133 			 * it without attempting to raise page count.
1134 			 */
1135 			goto export;
1136 		}
1137 		if (!page_cache_get_speculative(page))
1138 			goto repeat;
1139 
1140 		/* Has the page moved? */
1141 		if (unlikely(page != *slot)) {
1142 			page_cache_release(page);
1143 			goto repeat;
1144 		}
1145 export:
1146 		indices[ret] = iter.index;
1147 		entries[ret] = page;
1148 		if (++ret == nr_entries)
1149 			break;
1150 	}
1151 	rcu_read_unlock();
1152 	return ret;
1153 }
1154 
1155 /**
1156  * find_get_pages - gang pagecache lookup
1157  * @mapping:	The address_space to search
1158  * @start:	The starting page index
1159  * @nr_pages:	The maximum number of pages
1160  * @pages:	Where the resulting pages are placed
1161  *
1162  * find_get_pages() will search for and return a group of up to
1163  * @nr_pages pages in the mapping.  The pages are placed at @pages.
1164  * find_get_pages() takes a reference against the returned pages.
1165  *
1166  * The search returns a group of mapping-contiguous pages with ascending
1167  * indexes.  There may be holes in the indices due to not-present pages.
1168  *
1169  * find_get_pages() returns the number of pages which were found.
1170  */
1171 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1172 			    unsigned int nr_pages, struct page **pages)
1173 {
1174 	struct radix_tree_iter iter;
1175 	void **slot;
1176 	unsigned ret = 0;
1177 
1178 	if (unlikely(!nr_pages))
1179 		return 0;
1180 
1181 	rcu_read_lock();
1182 restart:
1183 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1184 		struct page *page;
1185 repeat:
1186 		page = radix_tree_deref_slot(slot);
1187 		if (unlikely(!page))
1188 			continue;
1189 
1190 		if (radix_tree_exception(page)) {
1191 			if (radix_tree_deref_retry(page)) {
1192 				/*
1193 				 * Transient condition which can only trigger
1194 				 * when entry at index 0 moves out of or back
1195 				 * to root: none yet gotten, safe to restart.
1196 				 */
1197 				WARN_ON(iter.index);
1198 				goto restart;
1199 			}
1200 			/*
1201 			 * A shadow entry of a recently evicted page,
1202 			 * or a swap entry from shmem/tmpfs.  Skip
1203 			 * over it.
1204 			 */
1205 			continue;
1206 		}
1207 
1208 		if (!page_cache_get_speculative(page))
1209 			goto repeat;
1210 
1211 		/* Has the page moved? */
1212 		if (unlikely(page != *slot)) {
1213 			page_cache_release(page);
1214 			goto repeat;
1215 		}
1216 
1217 		pages[ret] = page;
1218 		if (++ret == nr_pages)
1219 			break;
1220 	}
1221 
1222 	rcu_read_unlock();
1223 	return ret;
1224 }
1225 
1226 /**
1227  * find_get_pages_contig - gang contiguous pagecache lookup
1228  * @mapping:	The address_space to search
1229  * @index:	The starting page index
1230  * @nr_pages:	The maximum number of pages
1231  * @pages:	Where the resulting pages are placed
1232  *
1233  * find_get_pages_contig() works exactly like find_get_pages(), except
1234  * that the returned number of pages are guaranteed to be contiguous.
1235  *
1236  * find_get_pages_contig() returns the number of pages which were found.
1237  */
1238 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1239 			       unsigned int nr_pages, struct page **pages)
1240 {
1241 	struct radix_tree_iter iter;
1242 	void **slot;
1243 	unsigned int ret = 0;
1244 
1245 	if (unlikely(!nr_pages))
1246 		return 0;
1247 
1248 	rcu_read_lock();
1249 restart:
1250 	radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1251 		struct page *page;
1252 repeat:
1253 		page = radix_tree_deref_slot(slot);
1254 		/* The hole, there no reason to continue */
1255 		if (unlikely(!page))
1256 			break;
1257 
1258 		if (radix_tree_exception(page)) {
1259 			if (radix_tree_deref_retry(page)) {
1260 				/*
1261 				 * Transient condition which can only trigger
1262 				 * when entry at index 0 moves out of or back
1263 				 * to root: none yet gotten, safe to restart.
1264 				 */
1265 				goto restart;
1266 			}
1267 			/*
1268 			 * A shadow entry of a recently evicted page,
1269 			 * or a swap entry from shmem/tmpfs.  Stop
1270 			 * looking for contiguous pages.
1271 			 */
1272 			break;
1273 		}
1274 
1275 		if (!page_cache_get_speculative(page))
1276 			goto repeat;
1277 
1278 		/* Has the page moved? */
1279 		if (unlikely(page != *slot)) {
1280 			page_cache_release(page);
1281 			goto repeat;
1282 		}
1283 
1284 		/*
1285 		 * must check mapping and index after taking the ref.
1286 		 * otherwise we can get both false positives and false
1287 		 * negatives, which is just confusing to the caller.
1288 		 */
1289 		if (page->mapping == NULL || page->index != iter.index) {
1290 			page_cache_release(page);
1291 			break;
1292 		}
1293 
1294 		pages[ret] = page;
1295 		if (++ret == nr_pages)
1296 			break;
1297 	}
1298 	rcu_read_unlock();
1299 	return ret;
1300 }
1301 EXPORT_SYMBOL(find_get_pages_contig);
1302 
1303 /**
1304  * find_get_pages_tag - find and return pages that match @tag
1305  * @mapping:	the address_space to search
1306  * @index:	the starting page index
1307  * @tag:	the tag index
1308  * @nr_pages:	the maximum number of pages
1309  * @pages:	where the resulting pages are placed
1310  *
1311  * Like find_get_pages, except we only return pages which are tagged with
1312  * @tag.   We update @index to index the next page for the traversal.
1313  */
1314 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1315 			int tag, unsigned int nr_pages, struct page **pages)
1316 {
1317 	struct radix_tree_iter iter;
1318 	void **slot;
1319 	unsigned ret = 0;
1320 
1321 	if (unlikely(!nr_pages))
1322 		return 0;
1323 
1324 	rcu_read_lock();
1325 restart:
1326 	radix_tree_for_each_tagged(slot, &mapping->page_tree,
1327 				   &iter, *index, tag) {
1328 		struct page *page;
1329 repeat:
1330 		page = radix_tree_deref_slot(slot);
1331 		if (unlikely(!page))
1332 			continue;
1333 
1334 		if (radix_tree_exception(page)) {
1335 			if (radix_tree_deref_retry(page)) {
1336 				/*
1337 				 * Transient condition which can only trigger
1338 				 * when entry at index 0 moves out of or back
1339 				 * to root: none yet gotten, safe to restart.
1340 				 */
1341 				goto restart;
1342 			}
1343 			/*
1344 			 * A shadow entry of a recently evicted page.
1345 			 *
1346 			 * Those entries should never be tagged, but
1347 			 * this tree walk is lockless and the tags are
1348 			 * looked up in bulk, one radix tree node at a
1349 			 * time, so there is a sizable window for page
1350 			 * reclaim to evict a page we saw tagged.
1351 			 *
1352 			 * Skip over it.
1353 			 */
1354 			continue;
1355 		}
1356 
1357 		if (!page_cache_get_speculative(page))
1358 			goto repeat;
1359 
1360 		/* Has the page moved? */
1361 		if (unlikely(page != *slot)) {
1362 			page_cache_release(page);
1363 			goto repeat;
1364 		}
1365 
1366 		pages[ret] = page;
1367 		if (++ret == nr_pages)
1368 			break;
1369 	}
1370 
1371 	rcu_read_unlock();
1372 
1373 	if (ret)
1374 		*index = pages[ret - 1]->index + 1;
1375 
1376 	return ret;
1377 }
1378 EXPORT_SYMBOL(find_get_pages_tag);
1379 
1380 /**
1381  * grab_cache_page_nowait - returns locked page at given index in given cache
1382  * @mapping: target address_space
1383  * @index: the page index
1384  *
1385  * Same as grab_cache_page(), but do not wait if the page is unavailable.
1386  * This is intended for speculative data generators, where the data can
1387  * be regenerated if the page couldn't be grabbed.  This routine should
1388  * be safe to call while holding the lock for another page.
1389  *
1390  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1391  * and deadlock against the caller's locked page.
1392  */
1393 struct page *
1394 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1395 {
1396 	struct page *page = find_get_page(mapping, index);
1397 
1398 	if (page) {
1399 		if (trylock_page(page))
1400 			return page;
1401 		page_cache_release(page);
1402 		return NULL;
1403 	}
1404 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1405 	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1406 		page_cache_release(page);
1407 		page = NULL;
1408 	}
1409 	return page;
1410 }
1411 EXPORT_SYMBOL(grab_cache_page_nowait);
1412 
1413 /*
1414  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1415  * a _large_ part of the i/o request. Imagine the worst scenario:
1416  *
1417  *      ---R__________________________________________B__________
1418  *         ^ reading here                             ^ bad block(assume 4k)
1419  *
1420  * read(R) => miss => readahead(R...B) => media error => frustrating retries
1421  * => failing the whole request => read(R) => read(R+1) =>
1422  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1423  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1424  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1425  *
1426  * It is going insane. Fix it by quickly scaling down the readahead size.
1427  */
1428 static void shrink_readahead_size_eio(struct file *filp,
1429 					struct file_ra_state *ra)
1430 {
1431 	ra->ra_pages /= 4;
1432 }
1433 
1434 /**
1435  * do_generic_file_read - generic file read routine
1436  * @filp:	the file to read
1437  * @ppos:	current file position
1438  * @iter:	data destination
1439  * @written:	already copied
1440  *
1441  * This is a generic file read routine, and uses the
1442  * mapping->a_ops->readpage() function for the actual low-level stuff.
1443  *
1444  * This is really ugly. But the goto's actually try to clarify some
1445  * of the logic when it comes to error handling etc.
1446  */
1447 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1448 		struct iov_iter *iter, ssize_t written)
1449 {
1450 	struct address_space *mapping = filp->f_mapping;
1451 	struct inode *inode = mapping->host;
1452 	struct file_ra_state *ra = &filp->f_ra;
1453 	pgoff_t index;
1454 	pgoff_t last_index;
1455 	pgoff_t prev_index;
1456 	unsigned long offset;      /* offset into pagecache page */
1457 	unsigned int prev_offset;
1458 	int error = 0;
1459 
1460 	index = *ppos >> PAGE_CACHE_SHIFT;
1461 	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1462 	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1463 	last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1464 	offset = *ppos & ~PAGE_CACHE_MASK;
1465 
1466 	for (;;) {
1467 		struct page *page;
1468 		pgoff_t end_index;
1469 		loff_t isize;
1470 		unsigned long nr, ret;
1471 
1472 		cond_resched();
1473 find_page:
1474 		page = find_get_page(mapping, index);
1475 		if (!page) {
1476 			page_cache_sync_readahead(mapping,
1477 					ra, filp,
1478 					index, last_index - index);
1479 			page = find_get_page(mapping, index);
1480 			if (unlikely(page == NULL))
1481 				goto no_cached_page;
1482 		}
1483 		if (PageReadahead(page)) {
1484 			page_cache_async_readahead(mapping,
1485 					ra, filp, page,
1486 					index, last_index - index);
1487 		}
1488 		if (!PageUptodate(page)) {
1489 			if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1490 					!mapping->a_ops->is_partially_uptodate)
1491 				goto page_not_up_to_date;
1492 			if (!trylock_page(page))
1493 				goto page_not_up_to_date;
1494 			/* Did it get truncated before we got the lock? */
1495 			if (!page->mapping)
1496 				goto page_not_up_to_date_locked;
1497 			if (!mapping->a_ops->is_partially_uptodate(page,
1498 							offset, iter->count))
1499 				goto page_not_up_to_date_locked;
1500 			unlock_page(page);
1501 		}
1502 page_ok:
1503 		/*
1504 		 * i_size must be checked after we know the page is Uptodate.
1505 		 *
1506 		 * Checking i_size after the check allows us to calculate
1507 		 * the correct value for "nr", which means the zero-filled
1508 		 * part of the page is not copied back to userspace (unless
1509 		 * another truncate extends the file - this is desired though).
1510 		 */
1511 
1512 		isize = i_size_read(inode);
1513 		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1514 		if (unlikely(!isize || index > end_index)) {
1515 			page_cache_release(page);
1516 			goto out;
1517 		}
1518 
1519 		/* nr is the maximum number of bytes to copy from this page */
1520 		nr = PAGE_CACHE_SIZE;
1521 		if (index == end_index) {
1522 			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1523 			if (nr <= offset) {
1524 				page_cache_release(page);
1525 				goto out;
1526 			}
1527 		}
1528 		nr = nr - offset;
1529 
1530 		/* If users can be writing to this page using arbitrary
1531 		 * virtual addresses, take care about potential aliasing
1532 		 * before reading the page on the kernel side.
1533 		 */
1534 		if (mapping_writably_mapped(mapping))
1535 			flush_dcache_page(page);
1536 
1537 		/*
1538 		 * When a sequential read accesses a page several times,
1539 		 * only mark it as accessed the first time.
1540 		 */
1541 		if (prev_index != index || offset != prev_offset)
1542 			mark_page_accessed(page);
1543 		prev_index = index;
1544 
1545 		/*
1546 		 * Ok, we have the page, and it's up-to-date, so
1547 		 * now we can copy it to user space...
1548 		 */
1549 
1550 		ret = copy_page_to_iter(page, offset, nr, iter);
1551 		offset += ret;
1552 		index += offset >> PAGE_CACHE_SHIFT;
1553 		offset &= ~PAGE_CACHE_MASK;
1554 		prev_offset = offset;
1555 
1556 		page_cache_release(page);
1557 		written += ret;
1558 		if (!iov_iter_count(iter))
1559 			goto out;
1560 		if (ret < nr) {
1561 			error = -EFAULT;
1562 			goto out;
1563 		}
1564 		continue;
1565 
1566 page_not_up_to_date:
1567 		/* Get exclusive access to the page ... */
1568 		error = lock_page_killable(page);
1569 		if (unlikely(error))
1570 			goto readpage_error;
1571 
1572 page_not_up_to_date_locked:
1573 		/* Did it get truncated before we got the lock? */
1574 		if (!page->mapping) {
1575 			unlock_page(page);
1576 			page_cache_release(page);
1577 			continue;
1578 		}
1579 
1580 		/* Did somebody else fill it already? */
1581 		if (PageUptodate(page)) {
1582 			unlock_page(page);
1583 			goto page_ok;
1584 		}
1585 
1586 readpage:
1587 		/*
1588 		 * A previous I/O error may have been due to temporary
1589 		 * failures, eg. multipath errors.
1590 		 * PG_error will be set again if readpage fails.
1591 		 */
1592 		ClearPageError(page);
1593 		/* Start the actual read. The read will unlock the page. */
1594 		error = mapping->a_ops->readpage(filp, page);
1595 
1596 		if (unlikely(error)) {
1597 			if (error == AOP_TRUNCATED_PAGE) {
1598 				page_cache_release(page);
1599 				error = 0;
1600 				goto find_page;
1601 			}
1602 			goto readpage_error;
1603 		}
1604 
1605 		if (!PageUptodate(page)) {
1606 			error = lock_page_killable(page);
1607 			if (unlikely(error))
1608 				goto readpage_error;
1609 			if (!PageUptodate(page)) {
1610 				if (page->mapping == NULL) {
1611 					/*
1612 					 * invalidate_mapping_pages got it
1613 					 */
1614 					unlock_page(page);
1615 					page_cache_release(page);
1616 					goto find_page;
1617 				}
1618 				unlock_page(page);
1619 				shrink_readahead_size_eio(filp, ra);
1620 				error = -EIO;
1621 				goto readpage_error;
1622 			}
1623 			unlock_page(page);
1624 		}
1625 
1626 		goto page_ok;
1627 
1628 readpage_error:
1629 		/* UHHUH! A synchronous read error occurred. Report it */
1630 		page_cache_release(page);
1631 		goto out;
1632 
1633 no_cached_page:
1634 		/*
1635 		 * Ok, it wasn't cached, so we need to create a new
1636 		 * page..
1637 		 */
1638 		page = page_cache_alloc_cold(mapping);
1639 		if (!page) {
1640 			error = -ENOMEM;
1641 			goto out;
1642 		}
1643 		error = add_to_page_cache_lru(page, mapping,
1644 						index, GFP_KERNEL);
1645 		if (error) {
1646 			page_cache_release(page);
1647 			if (error == -EEXIST) {
1648 				error = 0;
1649 				goto find_page;
1650 			}
1651 			goto out;
1652 		}
1653 		goto readpage;
1654 	}
1655 
1656 out:
1657 	ra->prev_pos = prev_index;
1658 	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1659 	ra->prev_pos |= prev_offset;
1660 
1661 	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1662 	file_accessed(filp);
1663 	return written ? written : error;
1664 }
1665 
1666 /*
1667  * Performs necessary checks before doing a write
1668  * @iov:	io vector request
1669  * @nr_segs:	number of segments in the iovec
1670  * @count:	number of bytes to write
1671  * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1672  *
1673  * Adjust number of segments and amount of bytes to write (nr_segs should be
1674  * properly initialized first). Returns appropriate error code that caller
1675  * should return or zero in case that write should be allowed.
1676  */
1677 int generic_segment_checks(const struct iovec *iov,
1678 			unsigned long *nr_segs, size_t *count, int access_flags)
1679 {
1680 	unsigned long   seg;
1681 	size_t cnt = 0;
1682 	for (seg = 0; seg < *nr_segs; seg++) {
1683 		const struct iovec *iv = &iov[seg];
1684 
1685 		/*
1686 		 * If any segment has a negative length, or the cumulative
1687 		 * length ever wraps negative then return -EINVAL.
1688 		 */
1689 		cnt += iv->iov_len;
1690 		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1691 			return -EINVAL;
1692 		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1693 			continue;
1694 		if (seg == 0)
1695 			return -EFAULT;
1696 		*nr_segs = seg;
1697 		cnt -= iv->iov_len;	/* This segment is no good */
1698 		break;
1699 	}
1700 	*count = cnt;
1701 	return 0;
1702 }
1703 EXPORT_SYMBOL(generic_segment_checks);
1704 
1705 /**
1706  * generic_file_aio_read - generic filesystem read routine
1707  * @iocb:	kernel I/O control block
1708  * @iov:	io vector request
1709  * @nr_segs:	number of segments in the iovec
1710  * @pos:	current file position
1711  *
1712  * This is the "read()" routine for all filesystems
1713  * that can use the page cache directly.
1714  */
1715 ssize_t
1716 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1717 		unsigned long nr_segs, loff_t pos)
1718 {
1719 	struct file *filp = iocb->ki_filp;
1720 	ssize_t retval;
1721 	size_t count;
1722 	loff_t *ppos = &iocb->ki_pos;
1723 	struct iov_iter i;
1724 
1725 	count = 0;
1726 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1727 	if (retval)
1728 		return retval;
1729 	iov_iter_init(&i, iov, nr_segs, count, 0);
1730 
1731 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1732 	if (filp->f_flags & O_DIRECT) {
1733 		loff_t size;
1734 		struct address_space *mapping;
1735 		struct inode *inode;
1736 
1737 		mapping = filp->f_mapping;
1738 		inode = mapping->host;
1739 		if (!count)
1740 			goto out; /* skip atime */
1741 		size = i_size_read(inode);
1742 		retval = filemap_write_and_wait_range(mapping, pos,
1743 					pos + iov_length(iov, nr_segs) - 1);
1744 		if (!retval) {
1745 			retval = mapping->a_ops->direct_IO(READ, iocb,
1746 							   iov, pos, nr_segs);
1747 		}
1748 		if (retval > 0) {
1749 			*ppos = pos + retval;
1750 			count -= retval;
1751 			/*
1752 			 * If we did a short DIO read we need to skip the
1753 			 * section of the iov that we've already read data into.
1754 			 */
1755 			iov_iter_advance(&i, retval);
1756 		}
1757 
1758 		/*
1759 		 * Btrfs can have a short DIO read if we encounter
1760 		 * compressed extents, so if there was an error, or if
1761 		 * we've already read everything we wanted to, or if
1762 		 * there was a short read because we hit EOF, go ahead
1763 		 * and return.  Otherwise fallthrough to buffered io for
1764 		 * the rest of the read.
1765 		 */
1766 		if (retval < 0 || !count || *ppos >= size) {
1767 			file_accessed(filp);
1768 			goto out;
1769 		}
1770 	}
1771 
1772 	retval = do_generic_file_read(filp, ppos, &i, retval);
1773 out:
1774 	return retval;
1775 }
1776 EXPORT_SYMBOL(generic_file_aio_read);
1777 
1778 #ifdef CONFIG_MMU
1779 /**
1780  * page_cache_read - adds requested page to the page cache if not already there
1781  * @file:	file to read
1782  * @offset:	page index
1783  *
1784  * This adds the requested page to the page cache if it isn't already there,
1785  * and schedules an I/O to read in its contents from disk.
1786  */
1787 static int page_cache_read(struct file *file, pgoff_t offset)
1788 {
1789 	struct address_space *mapping = file->f_mapping;
1790 	struct page *page;
1791 	int ret;
1792 
1793 	do {
1794 		page = page_cache_alloc_cold(mapping);
1795 		if (!page)
1796 			return -ENOMEM;
1797 
1798 		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1799 		if (ret == 0)
1800 			ret = mapping->a_ops->readpage(file, page);
1801 		else if (ret == -EEXIST)
1802 			ret = 0; /* losing race to add is OK */
1803 
1804 		page_cache_release(page);
1805 
1806 	} while (ret == AOP_TRUNCATED_PAGE);
1807 
1808 	return ret;
1809 }
1810 
1811 #define MMAP_LOTSAMISS  (100)
1812 
1813 /*
1814  * Synchronous readahead happens when we don't even find
1815  * a page in the page cache at all.
1816  */
1817 static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1818 				   struct file_ra_state *ra,
1819 				   struct file *file,
1820 				   pgoff_t offset)
1821 {
1822 	unsigned long ra_pages;
1823 	struct address_space *mapping = file->f_mapping;
1824 
1825 	/* If we don't want any read-ahead, don't bother */
1826 	if (vma->vm_flags & VM_RAND_READ)
1827 		return;
1828 	if (!ra->ra_pages)
1829 		return;
1830 
1831 	if (vma->vm_flags & VM_SEQ_READ) {
1832 		page_cache_sync_readahead(mapping, ra, file, offset,
1833 					  ra->ra_pages);
1834 		return;
1835 	}
1836 
1837 	/* Avoid banging the cache line if not needed */
1838 	if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1839 		ra->mmap_miss++;
1840 
1841 	/*
1842 	 * Do we miss much more than hit in this file? If so,
1843 	 * stop bothering with read-ahead. It will only hurt.
1844 	 */
1845 	if (ra->mmap_miss > MMAP_LOTSAMISS)
1846 		return;
1847 
1848 	/*
1849 	 * mmap read-around
1850 	 */
1851 	ra_pages = max_sane_readahead(ra->ra_pages);
1852 	ra->start = max_t(long, 0, offset - ra_pages / 2);
1853 	ra->size = ra_pages;
1854 	ra->async_size = ra_pages / 4;
1855 	ra_submit(ra, mapping, file);
1856 }
1857 
1858 /*
1859  * Asynchronous readahead happens when we find the page and PG_readahead,
1860  * so we want to possibly extend the readahead further..
1861  */
1862 static void do_async_mmap_readahead(struct vm_area_struct *vma,
1863 				    struct file_ra_state *ra,
1864 				    struct file *file,
1865 				    struct page *page,
1866 				    pgoff_t offset)
1867 {
1868 	struct address_space *mapping = file->f_mapping;
1869 
1870 	/* If we don't want any read-ahead, don't bother */
1871 	if (vma->vm_flags & VM_RAND_READ)
1872 		return;
1873 	if (ra->mmap_miss > 0)
1874 		ra->mmap_miss--;
1875 	if (PageReadahead(page))
1876 		page_cache_async_readahead(mapping, ra, file,
1877 					   page, offset, ra->ra_pages);
1878 }
1879 
1880 /**
1881  * filemap_fault - read in file data for page fault handling
1882  * @vma:	vma in which the fault was taken
1883  * @vmf:	struct vm_fault containing details of the fault
1884  *
1885  * filemap_fault() is invoked via the vma operations vector for a
1886  * mapped memory region to read in file data during a page fault.
1887  *
1888  * The goto's are kind of ugly, but this streamlines the normal case of having
1889  * it in the page cache, and handles the special cases reasonably without
1890  * having a lot of duplicated code.
1891  */
1892 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1893 {
1894 	int error;
1895 	struct file *file = vma->vm_file;
1896 	struct address_space *mapping = file->f_mapping;
1897 	struct file_ra_state *ra = &file->f_ra;
1898 	struct inode *inode = mapping->host;
1899 	pgoff_t offset = vmf->pgoff;
1900 	struct page *page;
1901 	loff_t size;
1902 	int ret = 0;
1903 
1904 	size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1905 	if (offset >= size >> PAGE_CACHE_SHIFT)
1906 		return VM_FAULT_SIGBUS;
1907 
1908 	/*
1909 	 * Do we have something in the page cache already?
1910 	 */
1911 	page = find_get_page(mapping, offset);
1912 	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1913 		/*
1914 		 * We found the page, so try async readahead before
1915 		 * waiting for the lock.
1916 		 */
1917 		do_async_mmap_readahead(vma, ra, file, page, offset);
1918 	} else if (!page) {
1919 		/* No page in the page cache at all */
1920 		do_sync_mmap_readahead(vma, ra, file, offset);
1921 		count_vm_event(PGMAJFAULT);
1922 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1923 		ret = VM_FAULT_MAJOR;
1924 retry_find:
1925 		page = find_get_page(mapping, offset);
1926 		if (!page)
1927 			goto no_cached_page;
1928 	}
1929 
1930 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1931 		page_cache_release(page);
1932 		return ret | VM_FAULT_RETRY;
1933 	}
1934 
1935 	/* Did it get truncated? */
1936 	if (unlikely(page->mapping != mapping)) {
1937 		unlock_page(page);
1938 		put_page(page);
1939 		goto retry_find;
1940 	}
1941 	VM_BUG_ON_PAGE(page->index != offset, page);
1942 
1943 	/*
1944 	 * We have a locked page in the page cache, now we need to check
1945 	 * that it's up-to-date. If not, it is going to be due to an error.
1946 	 */
1947 	if (unlikely(!PageUptodate(page)))
1948 		goto page_not_uptodate;
1949 
1950 	/*
1951 	 * Found the page and have a reference on it.
1952 	 * We must recheck i_size under page lock.
1953 	 */
1954 	size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1955 	if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
1956 		unlock_page(page);
1957 		page_cache_release(page);
1958 		return VM_FAULT_SIGBUS;
1959 	}
1960 
1961 	vmf->page = page;
1962 	return ret | VM_FAULT_LOCKED;
1963 
1964 no_cached_page:
1965 	/*
1966 	 * We're only likely to ever get here if MADV_RANDOM is in
1967 	 * effect.
1968 	 */
1969 	error = page_cache_read(file, offset);
1970 
1971 	/*
1972 	 * The page we want has now been added to the page cache.
1973 	 * In the unlikely event that someone removed it in the
1974 	 * meantime, we'll just come back here and read it again.
1975 	 */
1976 	if (error >= 0)
1977 		goto retry_find;
1978 
1979 	/*
1980 	 * An error return from page_cache_read can result if the
1981 	 * system is low on memory, or a problem occurs while trying
1982 	 * to schedule I/O.
1983 	 */
1984 	if (error == -ENOMEM)
1985 		return VM_FAULT_OOM;
1986 	return VM_FAULT_SIGBUS;
1987 
1988 page_not_uptodate:
1989 	/*
1990 	 * Umm, take care of errors if the page isn't up-to-date.
1991 	 * Try to re-read it _once_. We do this synchronously,
1992 	 * because there really aren't any performance issues here
1993 	 * and we need to check for errors.
1994 	 */
1995 	ClearPageError(page);
1996 	error = mapping->a_ops->readpage(file, page);
1997 	if (!error) {
1998 		wait_on_page_locked(page);
1999 		if (!PageUptodate(page))
2000 			error = -EIO;
2001 	}
2002 	page_cache_release(page);
2003 
2004 	if (!error || error == AOP_TRUNCATED_PAGE)
2005 		goto retry_find;
2006 
2007 	/* Things didn't work out. Return zero to tell the mm layer so. */
2008 	shrink_readahead_size_eio(file, ra);
2009 	return VM_FAULT_SIGBUS;
2010 }
2011 EXPORT_SYMBOL(filemap_fault);
2012 
2013 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
2014 {
2015 	struct radix_tree_iter iter;
2016 	void **slot;
2017 	struct file *file = vma->vm_file;
2018 	struct address_space *mapping = file->f_mapping;
2019 	loff_t size;
2020 	struct page *page;
2021 	unsigned long address = (unsigned long) vmf->virtual_address;
2022 	unsigned long addr;
2023 	pte_t *pte;
2024 
2025 	rcu_read_lock();
2026 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
2027 		if (iter.index > vmf->max_pgoff)
2028 			break;
2029 repeat:
2030 		page = radix_tree_deref_slot(slot);
2031 		if (unlikely(!page))
2032 			goto next;
2033 		if (radix_tree_exception(page)) {
2034 			if (radix_tree_deref_retry(page))
2035 				break;
2036 			else
2037 				goto next;
2038 		}
2039 
2040 		if (!page_cache_get_speculative(page))
2041 			goto repeat;
2042 
2043 		/* Has the page moved? */
2044 		if (unlikely(page != *slot)) {
2045 			page_cache_release(page);
2046 			goto repeat;
2047 		}
2048 
2049 		if (!PageUptodate(page) ||
2050 				PageReadahead(page) ||
2051 				PageHWPoison(page))
2052 			goto skip;
2053 		if (!trylock_page(page))
2054 			goto skip;
2055 
2056 		if (page->mapping != mapping || !PageUptodate(page))
2057 			goto unlock;
2058 
2059 		size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
2060 		if (page->index >= size >> PAGE_CACHE_SHIFT)
2061 			goto unlock;
2062 
2063 		pte = vmf->pte + page->index - vmf->pgoff;
2064 		if (!pte_none(*pte))
2065 			goto unlock;
2066 
2067 		if (file->f_ra.mmap_miss > 0)
2068 			file->f_ra.mmap_miss--;
2069 		addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2070 		do_set_pte(vma, addr, page, pte, false, false);
2071 		unlock_page(page);
2072 		goto next;
2073 unlock:
2074 		unlock_page(page);
2075 skip:
2076 		page_cache_release(page);
2077 next:
2078 		if (iter.index == vmf->max_pgoff)
2079 			break;
2080 	}
2081 	rcu_read_unlock();
2082 }
2083 EXPORT_SYMBOL(filemap_map_pages);
2084 
2085 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2086 {
2087 	struct page *page = vmf->page;
2088 	struct inode *inode = file_inode(vma->vm_file);
2089 	int ret = VM_FAULT_LOCKED;
2090 
2091 	sb_start_pagefault(inode->i_sb);
2092 	file_update_time(vma->vm_file);
2093 	lock_page(page);
2094 	if (page->mapping != inode->i_mapping) {
2095 		unlock_page(page);
2096 		ret = VM_FAULT_NOPAGE;
2097 		goto out;
2098 	}
2099 	/*
2100 	 * We mark the page dirty already here so that when freeze is in
2101 	 * progress, we are guaranteed that writeback during freezing will
2102 	 * see the dirty page and writeprotect it again.
2103 	 */
2104 	set_page_dirty(page);
2105 	wait_for_stable_page(page);
2106 out:
2107 	sb_end_pagefault(inode->i_sb);
2108 	return ret;
2109 }
2110 EXPORT_SYMBOL(filemap_page_mkwrite);
2111 
2112 const struct vm_operations_struct generic_file_vm_ops = {
2113 	.fault		= filemap_fault,
2114 	.map_pages	= filemap_map_pages,
2115 	.page_mkwrite	= filemap_page_mkwrite,
2116 	.remap_pages	= generic_file_remap_pages,
2117 };
2118 
2119 /* This is used for a general mmap of a disk file */
2120 
2121 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2122 {
2123 	struct address_space *mapping = file->f_mapping;
2124 
2125 	if (!mapping->a_ops->readpage)
2126 		return -ENOEXEC;
2127 	file_accessed(file);
2128 	vma->vm_ops = &generic_file_vm_ops;
2129 	return 0;
2130 }
2131 
2132 /*
2133  * This is for filesystems which do not implement ->writepage.
2134  */
2135 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2136 {
2137 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2138 		return -EINVAL;
2139 	return generic_file_mmap(file, vma);
2140 }
2141 #else
2142 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2143 {
2144 	return -ENOSYS;
2145 }
2146 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2147 {
2148 	return -ENOSYS;
2149 }
2150 #endif /* CONFIG_MMU */
2151 
2152 EXPORT_SYMBOL(generic_file_mmap);
2153 EXPORT_SYMBOL(generic_file_readonly_mmap);
2154 
2155 static struct page *wait_on_page_read(struct page *page)
2156 {
2157 	if (!IS_ERR(page)) {
2158 		wait_on_page_locked(page);
2159 		if (!PageUptodate(page)) {
2160 			page_cache_release(page);
2161 			page = ERR_PTR(-EIO);
2162 		}
2163 	}
2164 	return page;
2165 }
2166 
2167 static struct page *__read_cache_page(struct address_space *mapping,
2168 				pgoff_t index,
2169 				int (*filler)(void *, struct page *),
2170 				void *data,
2171 				gfp_t gfp)
2172 {
2173 	struct page *page;
2174 	int err;
2175 repeat:
2176 	page = find_get_page(mapping, index);
2177 	if (!page) {
2178 		page = __page_cache_alloc(gfp | __GFP_COLD);
2179 		if (!page)
2180 			return ERR_PTR(-ENOMEM);
2181 		err = add_to_page_cache_lru(page, mapping, index, gfp);
2182 		if (unlikely(err)) {
2183 			page_cache_release(page);
2184 			if (err == -EEXIST)
2185 				goto repeat;
2186 			/* Presumably ENOMEM for radix tree node */
2187 			return ERR_PTR(err);
2188 		}
2189 		err = filler(data, page);
2190 		if (err < 0) {
2191 			page_cache_release(page);
2192 			page = ERR_PTR(err);
2193 		} else {
2194 			page = wait_on_page_read(page);
2195 		}
2196 	}
2197 	return page;
2198 }
2199 
2200 static struct page *do_read_cache_page(struct address_space *mapping,
2201 				pgoff_t index,
2202 				int (*filler)(void *, struct page *),
2203 				void *data,
2204 				gfp_t gfp)
2205 
2206 {
2207 	struct page *page;
2208 	int err;
2209 
2210 retry:
2211 	page = __read_cache_page(mapping, index, filler, data, gfp);
2212 	if (IS_ERR(page))
2213 		return page;
2214 	if (PageUptodate(page))
2215 		goto out;
2216 
2217 	lock_page(page);
2218 	if (!page->mapping) {
2219 		unlock_page(page);
2220 		page_cache_release(page);
2221 		goto retry;
2222 	}
2223 	if (PageUptodate(page)) {
2224 		unlock_page(page);
2225 		goto out;
2226 	}
2227 	err = filler(data, page);
2228 	if (err < 0) {
2229 		page_cache_release(page);
2230 		return ERR_PTR(err);
2231 	} else {
2232 		page = wait_on_page_read(page);
2233 		if (IS_ERR(page))
2234 			return page;
2235 	}
2236 out:
2237 	mark_page_accessed(page);
2238 	return page;
2239 }
2240 
2241 /**
2242  * read_cache_page - read into page cache, fill it if needed
2243  * @mapping:	the page's address_space
2244  * @index:	the page index
2245  * @filler:	function to perform the read
2246  * @data:	first arg to filler(data, page) function, often left as NULL
2247  *
2248  * Read into the page cache. If a page already exists, and PageUptodate() is
2249  * not set, try to fill the page and wait for it to become unlocked.
2250  *
2251  * If the page does not get brought uptodate, return -EIO.
2252  */
2253 struct page *read_cache_page(struct address_space *mapping,
2254 				pgoff_t index,
2255 				int (*filler)(void *, struct page *),
2256 				void *data)
2257 {
2258 	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2259 }
2260 EXPORT_SYMBOL(read_cache_page);
2261 
2262 /**
2263  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2264  * @mapping:	the page's address_space
2265  * @index:	the page index
2266  * @gfp:	the page allocator flags to use if allocating
2267  *
2268  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2269  * any new page allocations done using the specified allocation flags.
2270  *
2271  * If the page does not get brought uptodate, return -EIO.
2272  */
2273 struct page *read_cache_page_gfp(struct address_space *mapping,
2274 				pgoff_t index,
2275 				gfp_t gfp)
2276 {
2277 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2278 
2279 	return do_read_cache_page(mapping, index, filler, NULL, gfp);
2280 }
2281 EXPORT_SYMBOL(read_cache_page_gfp);
2282 
2283 /*
2284  * Performs necessary checks before doing a write
2285  *
2286  * Can adjust writing position or amount of bytes to write.
2287  * Returns appropriate error code that caller should return or
2288  * zero in case that write should be allowed.
2289  */
2290 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2291 {
2292 	struct inode *inode = file->f_mapping->host;
2293 	unsigned long limit = rlimit(RLIMIT_FSIZE);
2294 
2295         if (unlikely(*pos < 0))
2296                 return -EINVAL;
2297 
2298 	if (!isblk) {
2299 		/* FIXME: this is for backwards compatibility with 2.4 */
2300 		if (file->f_flags & O_APPEND)
2301                         *pos = i_size_read(inode);
2302 
2303 		if (limit != RLIM_INFINITY) {
2304 			if (*pos >= limit) {
2305 				send_sig(SIGXFSZ, current, 0);
2306 				return -EFBIG;
2307 			}
2308 			if (*count > limit - (typeof(limit))*pos) {
2309 				*count = limit - (typeof(limit))*pos;
2310 			}
2311 		}
2312 	}
2313 
2314 	/*
2315 	 * LFS rule
2316 	 */
2317 	if (unlikely(*pos + *count > MAX_NON_LFS &&
2318 				!(file->f_flags & O_LARGEFILE))) {
2319 		if (*pos >= MAX_NON_LFS) {
2320 			return -EFBIG;
2321 		}
2322 		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2323 			*count = MAX_NON_LFS - (unsigned long)*pos;
2324 		}
2325 	}
2326 
2327 	/*
2328 	 * Are we about to exceed the fs block limit ?
2329 	 *
2330 	 * If we have written data it becomes a short write.  If we have
2331 	 * exceeded without writing data we send a signal and return EFBIG.
2332 	 * Linus frestrict idea will clean these up nicely..
2333 	 */
2334 	if (likely(!isblk)) {
2335 		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2336 			if (*count || *pos > inode->i_sb->s_maxbytes) {
2337 				return -EFBIG;
2338 			}
2339 			/* zero-length writes at ->s_maxbytes are OK */
2340 		}
2341 
2342 		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2343 			*count = inode->i_sb->s_maxbytes - *pos;
2344 	} else {
2345 #ifdef CONFIG_BLOCK
2346 		loff_t isize;
2347 		if (bdev_read_only(I_BDEV(inode)))
2348 			return -EPERM;
2349 		isize = i_size_read(inode);
2350 		if (*pos >= isize) {
2351 			if (*count || *pos > isize)
2352 				return -ENOSPC;
2353 		}
2354 
2355 		if (*pos + *count > isize)
2356 			*count = isize - *pos;
2357 #else
2358 		return -EPERM;
2359 #endif
2360 	}
2361 	return 0;
2362 }
2363 EXPORT_SYMBOL(generic_write_checks);
2364 
2365 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2366 				loff_t pos, unsigned len, unsigned flags,
2367 				struct page **pagep, void **fsdata)
2368 {
2369 	const struct address_space_operations *aops = mapping->a_ops;
2370 
2371 	return aops->write_begin(file, mapping, pos, len, flags,
2372 							pagep, fsdata);
2373 }
2374 EXPORT_SYMBOL(pagecache_write_begin);
2375 
2376 int pagecache_write_end(struct file *file, struct address_space *mapping,
2377 				loff_t pos, unsigned len, unsigned copied,
2378 				struct page *page, void *fsdata)
2379 {
2380 	const struct address_space_operations *aops = mapping->a_ops;
2381 
2382 	mark_page_accessed(page);
2383 	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2384 }
2385 EXPORT_SYMBOL(pagecache_write_end);
2386 
2387 ssize_t
2388 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2389 		unsigned long *nr_segs, loff_t pos,
2390 		size_t count, size_t ocount)
2391 {
2392 	struct file	*file = iocb->ki_filp;
2393 	struct address_space *mapping = file->f_mapping;
2394 	struct inode	*inode = mapping->host;
2395 	ssize_t		written;
2396 	size_t		write_len;
2397 	pgoff_t		end;
2398 
2399 	if (count != ocount)
2400 		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2401 
2402 	write_len = iov_length(iov, *nr_segs);
2403 	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2404 
2405 	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2406 	if (written)
2407 		goto out;
2408 
2409 	/*
2410 	 * After a write we want buffered reads to be sure to go to disk to get
2411 	 * the new data.  We invalidate clean cached page from the region we're
2412 	 * about to write.  We do this *before* the write so that we can return
2413 	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2414 	 */
2415 	if (mapping->nrpages) {
2416 		written = invalidate_inode_pages2_range(mapping,
2417 					pos >> PAGE_CACHE_SHIFT, end);
2418 		/*
2419 		 * If a page can not be invalidated, return 0 to fall back
2420 		 * to buffered write.
2421 		 */
2422 		if (written) {
2423 			if (written == -EBUSY)
2424 				return 0;
2425 			goto out;
2426 		}
2427 	}
2428 
2429 	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2430 
2431 	/*
2432 	 * Finally, try again to invalidate clean pages which might have been
2433 	 * cached by non-direct readahead, or faulted in by get_user_pages()
2434 	 * if the source of the write was an mmap'ed region of the file
2435 	 * we're writing.  Either one is a pretty crazy thing to do,
2436 	 * so we don't support it 100%.  If this invalidation
2437 	 * fails, tough, the write still worked...
2438 	 */
2439 	if (mapping->nrpages) {
2440 		invalidate_inode_pages2_range(mapping,
2441 					      pos >> PAGE_CACHE_SHIFT, end);
2442 	}
2443 
2444 	if (written > 0) {
2445 		pos += written;
2446 		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2447 			i_size_write(inode, pos);
2448 			mark_inode_dirty(inode);
2449 		}
2450 		iocb->ki_pos = pos;
2451 	}
2452 out:
2453 	return written;
2454 }
2455 EXPORT_SYMBOL(generic_file_direct_write);
2456 
2457 /*
2458  * Find or create a page at the given pagecache position. Return the locked
2459  * page. This function is specifically for buffered writes.
2460  */
2461 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2462 					pgoff_t index, unsigned flags)
2463 {
2464 	int status;
2465 	gfp_t gfp_mask;
2466 	struct page *page;
2467 	gfp_t gfp_notmask = 0;
2468 
2469 	gfp_mask = mapping_gfp_mask(mapping);
2470 	if (mapping_cap_account_dirty(mapping))
2471 		gfp_mask |= __GFP_WRITE;
2472 	if (flags & AOP_FLAG_NOFS)
2473 		gfp_notmask = __GFP_FS;
2474 repeat:
2475 	page = find_lock_page(mapping, index);
2476 	if (page)
2477 		goto found;
2478 
2479 	page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2480 	if (!page)
2481 		return NULL;
2482 	status = add_to_page_cache_lru(page, mapping, index,
2483 						GFP_KERNEL & ~gfp_notmask);
2484 	if (unlikely(status)) {
2485 		page_cache_release(page);
2486 		if (status == -EEXIST)
2487 			goto repeat;
2488 		return NULL;
2489 	}
2490 found:
2491 	wait_for_stable_page(page);
2492 	return page;
2493 }
2494 EXPORT_SYMBOL(grab_cache_page_write_begin);
2495 
2496 ssize_t generic_perform_write(struct file *file,
2497 				struct iov_iter *i, loff_t pos)
2498 {
2499 	struct address_space *mapping = file->f_mapping;
2500 	const struct address_space_operations *a_ops = mapping->a_ops;
2501 	long status = 0;
2502 	ssize_t written = 0;
2503 	unsigned int flags = 0;
2504 
2505 	/*
2506 	 * Copies from kernel address space cannot fail (NFSD is a big user).
2507 	 */
2508 	if (segment_eq(get_fs(), KERNEL_DS))
2509 		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2510 
2511 	do {
2512 		struct page *page;
2513 		unsigned long offset;	/* Offset into pagecache page */
2514 		unsigned long bytes;	/* Bytes to write to page */
2515 		size_t copied;		/* Bytes copied from user */
2516 		void *fsdata;
2517 
2518 		offset = (pos & (PAGE_CACHE_SIZE - 1));
2519 		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2520 						iov_iter_count(i));
2521 
2522 again:
2523 		/*
2524 		 * Bring in the user page that we will copy from _first_.
2525 		 * Otherwise there's a nasty deadlock on copying from the
2526 		 * same page as we're writing to, without it being marked
2527 		 * up-to-date.
2528 		 *
2529 		 * Not only is this an optimisation, but it is also required
2530 		 * to check that the address is actually valid, when atomic
2531 		 * usercopies are used, below.
2532 		 */
2533 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2534 			status = -EFAULT;
2535 			break;
2536 		}
2537 
2538 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2539 						&page, &fsdata);
2540 		if (unlikely(status))
2541 			break;
2542 
2543 		if (mapping_writably_mapped(mapping))
2544 			flush_dcache_page(page);
2545 
2546 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2547 		flush_dcache_page(page);
2548 
2549 		mark_page_accessed(page);
2550 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2551 						page, fsdata);
2552 		if (unlikely(status < 0))
2553 			break;
2554 		copied = status;
2555 
2556 		cond_resched();
2557 
2558 		iov_iter_advance(i, copied);
2559 		if (unlikely(copied == 0)) {
2560 			/*
2561 			 * If we were unable to copy any data at all, we must
2562 			 * fall back to a single segment length write.
2563 			 *
2564 			 * If we didn't fallback here, we could livelock
2565 			 * because not all segments in the iov can be copied at
2566 			 * once without a pagefault.
2567 			 */
2568 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2569 						iov_iter_single_seg_count(i));
2570 			goto again;
2571 		}
2572 		pos += copied;
2573 		written += copied;
2574 
2575 		balance_dirty_pages_ratelimited(mapping);
2576 		if (fatal_signal_pending(current)) {
2577 			status = -EINTR;
2578 			break;
2579 		}
2580 	} while (iov_iter_count(i));
2581 
2582 	return written ? written : status;
2583 }
2584 EXPORT_SYMBOL(generic_perform_write);
2585 
2586 /**
2587  * __generic_file_aio_write - write data to a file
2588  * @iocb:	IO state structure (file, offset, etc.)
2589  * @iov:	vector with data to write
2590  * @nr_segs:	number of segments in the vector
2591  *
2592  * This function does all the work needed for actually writing data to a
2593  * file. It does all basic checks, removes SUID from the file, updates
2594  * modification times and calls proper subroutines depending on whether we
2595  * do direct IO or a standard buffered write.
2596  *
2597  * It expects i_mutex to be grabbed unless we work on a block device or similar
2598  * object which does not need locking at all.
2599  *
2600  * This function does *not* take care of syncing data in case of O_SYNC write.
2601  * A caller has to handle it. This is mainly due to the fact that we want to
2602  * avoid syncing under i_mutex.
2603  */
2604 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2605 				 unsigned long nr_segs)
2606 {
2607 	struct file *file = iocb->ki_filp;
2608 	struct address_space * mapping = file->f_mapping;
2609 	size_t ocount;		/* original count */
2610 	size_t count;		/* after file limit checks */
2611 	struct inode 	*inode = mapping->host;
2612 	loff_t		pos = iocb->ki_pos;
2613 	ssize_t		written = 0;
2614 	ssize_t		err;
2615 	ssize_t		status;
2616 	struct iov_iter from;
2617 
2618 	ocount = 0;
2619 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2620 	if (err)
2621 		return err;
2622 
2623 	count = ocount;
2624 
2625 	/* We can write back this queue in page reclaim */
2626 	current->backing_dev_info = mapping->backing_dev_info;
2627 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2628 	if (err)
2629 		goto out;
2630 
2631 	if (count == 0)
2632 		goto out;
2633 
2634 	err = file_remove_suid(file);
2635 	if (err)
2636 		goto out;
2637 
2638 	err = file_update_time(file);
2639 	if (err)
2640 		goto out;
2641 
2642 	iov_iter_init(&from, iov, nr_segs, count, 0);
2643 
2644 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2645 	if (unlikely(file->f_flags & O_DIRECT)) {
2646 		loff_t endbyte;
2647 
2648 		written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos,
2649 							count, ocount);
2650 		if (written < 0 || written == count)
2651 			goto out;
2652 		iov_iter_advance(&from, written);
2653 
2654 		/*
2655 		 * direct-io write to a hole: fall through to buffered I/O
2656 		 * for completing the rest of the request.
2657 		 */
2658 		pos += written;
2659 		count -= written;
2660 
2661 		status = generic_perform_write(file, &from, pos);
2662 		/*
2663 		 * If generic_perform_write() returned a synchronous error
2664 		 * then we want to return the number of bytes which were
2665 		 * direct-written, or the error code if that was zero.  Note
2666 		 * that this differs from normal direct-io semantics, which
2667 		 * will return -EFOO even if some bytes were written.
2668 		 */
2669 		if (unlikely(status < 0) && !written) {
2670 			err = status;
2671 			goto out;
2672 		}
2673 		iocb->ki_pos = pos + status;
2674 		/*
2675 		 * We need to ensure that the page cache pages are written to
2676 		 * disk and invalidated to preserve the expected O_DIRECT
2677 		 * semantics.
2678 		 */
2679 		endbyte = pos + status - 1;
2680 		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2681 		if (err == 0) {
2682 			written += status;
2683 			invalidate_mapping_pages(mapping,
2684 						 pos >> PAGE_CACHE_SHIFT,
2685 						 endbyte >> PAGE_CACHE_SHIFT);
2686 		} else {
2687 			/*
2688 			 * We don't know how much we wrote, so just return
2689 			 * the number of bytes which were direct-written
2690 			 */
2691 		}
2692 	} else {
2693 		written = generic_perform_write(file, &from, pos);
2694 		if (likely(written >= 0))
2695 			iocb->ki_pos = pos + written;
2696 	}
2697 out:
2698 	current->backing_dev_info = NULL;
2699 	return written ? written : err;
2700 }
2701 EXPORT_SYMBOL(__generic_file_aio_write);
2702 
2703 /**
2704  * generic_file_aio_write - write data to a file
2705  * @iocb:	IO state structure
2706  * @iov:	vector with data to write
2707  * @nr_segs:	number of segments in the vector
2708  * @pos:	position in file where to write
2709  *
2710  * This is a wrapper around __generic_file_aio_write() to be used by most
2711  * filesystems. It takes care of syncing the file in case of O_SYNC file
2712  * and acquires i_mutex as needed.
2713  */
2714 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2715 		unsigned long nr_segs, loff_t pos)
2716 {
2717 	struct file *file = iocb->ki_filp;
2718 	struct inode *inode = file->f_mapping->host;
2719 	ssize_t ret;
2720 
2721 	BUG_ON(iocb->ki_pos != pos);
2722 
2723 	mutex_lock(&inode->i_mutex);
2724 	ret = __generic_file_aio_write(iocb, iov, nr_segs);
2725 	mutex_unlock(&inode->i_mutex);
2726 
2727 	if (ret > 0) {
2728 		ssize_t err;
2729 
2730 		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2731 		if (err < 0)
2732 			ret = err;
2733 	}
2734 	return ret;
2735 }
2736 EXPORT_SYMBOL(generic_file_aio_write);
2737 
2738 /**
2739  * try_to_release_page() - release old fs-specific metadata on a page
2740  *
2741  * @page: the page which the kernel is trying to free
2742  * @gfp_mask: memory allocation flags (and I/O mode)
2743  *
2744  * The address_space is to try to release any data against the page
2745  * (presumably at page->private).  If the release was successful, return `1'.
2746  * Otherwise return zero.
2747  *
2748  * This may also be called if PG_fscache is set on a page, indicating that the
2749  * page is known to the local caching routines.
2750  *
2751  * The @gfp_mask argument specifies whether I/O may be performed to release
2752  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2753  *
2754  */
2755 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2756 {
2757 	struct address_space * const mapping = page->mapping;
2758 
2759 	BUG_ON(!PageLocked(page));
2760 	if (PageWriteback(page))
2761 		return 0;
2762 
2763 	if (mapping && mapping->a_ops->releasepage)
2764 		return mapping->a_ops->releasepage(page, gfp_mask);
2765 	return try_to_free_buffers(page);
2766 }
2767 
2768 EXPORT_SYMBOL(try_to_release_page);
2769