xref: /linux/mm/folio-compat.c (revision 67f49869106f78882a8a09b736d4884be85aba18)
1 /*
2  * Compatibility functions which bloat the callers too much to make inline.
3  * All of the callers of these functions should be converted to use folios
4  * eventually.
5  */
6 
7 #include <linux/migrate.h>
8 #include <linux/pagemap.h>
9 #include <linux/swap.h>
10 #include "internal.h"
11 
12 struct address_space *page_mapping(struct page *page)
13 {
14 	return folio_mapping(page_folio(page));
15 }
16 EXPORT_SYMBOL(page_mapping);
17 
18 void unlock_page(struct page *page)
19 {
20 	return folio_unlock(page_folio(page));
21 }
22 EXPORT_SYMBOL(unlock_page);
23 
24 void end_page_writeback(struct page *page)
25 {
26 	return folio_end_writeback(page_folio(page));
27 }
28 EXPORT_SYMBOL(end_page_writeback);
29 
30 void wait_on_page_writeback(struct page *page)
31 {
32 	return folio_wait_writeback(page_folio(page));
33 }
34 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
35 
36 void wait_for_stable_page(struct page *page)
37 {
38 	return folio_wait_stable(page_folio(page));
39 }
40 EXPORT_SYMBOL_GPL(wait_for_stable_page);
41 
42 void mark_page_accessed(struct page *page)
43 {
44 	folio_mark_accessed(page_folio(page));
45 }
46 EXPORT_SYMBOL(mark_page_accessed);
47 
48 bool set_page_writeback(struct page *page)
49 {
50 	return folio_start_writeback(page_folio(page));
51 }
52 EXPORT_SYMBOL(set_page_writeback);
53 
54 bool set_page_dirty(struct page *page)
55 {
56 	return folio_mark_dirty(page_folio(page));
57 }
58 EXPORT_SYMBOL(set_page_dirty);
59 
60 int __set_page_dirty_nobuffers(struct page *page)
61 {
62 	return filemap_dirty_folio(page_mapping(page), page_folio(page));
63 }
64 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
65 
66 bool clear_page_dirty_for_io(struct page *page)
67 {
68 	return folio_clear_dirty_for_io(page_folio(page));
69 }
70 EXPORT_SYMBOL(clear_page_dirty_for_io);
71 
72 bool redirty_page_for_writepage(struct writeback_control *wbc,
73 		struct page *page)
74 {
75 	return folio_redirty_for_writepage(wbc, page_folio(page));
76 }
77 EXPORT_SYMBOL(redirty_page_for_writepage);
78 
79 void lru_cache_add_inactive_or_unevictable(struct page *page,
80 		struct vm_area_struct *vma)
81 {
82 	folio_add_lru_vma(page_folio(page), vma);
83 }
84 
85 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
86 		pgoff_t index, gfp_t gfp)
87 {
88 	return filemap_add_folio(mapping, page_folio(page), index, gfp);
89 }
90 EXPORT_SYMBOL(add_to_page_cache_lru);
91 
92 noinline
93 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
94 		int fgp_flags, gfp_t gfp)
95 {
96 	struct folio *folio;
97 
98 	folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
99 	if (!folio || xa_is_value(folio))
100 		return &folio->page;
101 	return folio_file_page(folio, index);
102 }
103 EXPORT_SYMBOL(pagecache_get_page);
104 
105 struct page *grab_cache_page_write_begin(struct address_space *mapping,
106 					pgoff_t index)
107 {
108 	unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
109 
110 	return pagecache_get_page(mapping, index, fgp_flags,
111 			mapping_gfp_mask(mapping));
112 }
113 EXPORT_SYMBOL(grab_cache_page_write_begin);
114 
115 int isolate_lru_page(struct page *page)
116 {
117 	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
118 		return -EBUSY;
119 	return folio_isolate_lru((struct folio *)page);
120 }
121 
122 void putback_lru_page(struct page *page)
123 {
124 	folio_putback_lru(page_folio(page));
125 }
126