xref: /linux/net/ceph/pagevec.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
9 
10 #include <linux/ceph/libceph.h>
11 
12 /*
13  * build a vector of user pages
14  */
15 struct page **ceph_get_direct_page_vector(const char __user *data,
16 						 int num_pages,
17 						 loff_t off, size_t len)
18 {
19 	struct page **pages;
20 	int rc;
21 
22 	pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 	if (!pages)
24 		return ERR_PTR(-ENOMEM);
25 
26 	down_read(&current->mm->mmap_sem);
27 	rc = get_user_pages(current, current->mm, (unsigned long)data,
28 			    num_pages, 0, 0, pages, NULL);
29 	up_read(&current->mm->mmap_sem);
30 	if (rc < 0)
31 		goto fail;
32 	return pages;
33 
34 fail:
35 	kfree(pages);
36 	return ERR_PTR(rc);
37 }
38 EXPORT_SYMBOL(ceph_get_direct_page_vector);
39 
40 void ceph_put_page_vector(struct page **pages, int num_pages)
41 {
42 	int i;
43 
44 	for (i = 0; i < num_pages; i++)
45 		put_page(pages[i]);
46 	kfree(pages);
47 }
48 EXPORT_SYMBOL(ceph_put_page_vector);
49 
50 void ceph_release_page_vector(struct page **pages, int num_pages)
51 {
52 	int i;
53 
54 	for (i = 0; i < num_pages; i++)
55 		__free_pages(pages[i], 0);
56 	kfree(pages);
57 }
58 EXPORT_SYMBOL(ceph_release_page_vector);
59 
60 /*
61  * allocate a vector new pages
62  */
63 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
64 {
65 	struct page **pages;
66 	int i;
67 
68 	pages = kmalloc(sizeof(*pages) * num_pages, flags);
69 	if (!pages)
70 		return ERR_PTR(-ENOMEM);
71 	for (i = 0; i < num_pages; i++) {
72 		pages[i] = __page_cache_alloc(flags);
73 		if (pages[i] == NULL) {
74 			ceph_release_page_vector(pages, i);
75 			return ERR_PTR(-ENOMEM);
76 		}
77 	}
78 	return pages;
79 }
80 EXPORT_SYMBOL(ceph_alloc_page_vector);
81 
82 /*
83  * copy user data into a page vector
84  */
85 int ceph_copy_user_to_page_vector(struct page **pages,
86 					 const char __user *data,
87 					 loff_t off, size_t len)
88 {
89 	int i = 0;
90 	int po = off & ~PAGE_CACHE_MASK;
91 	int left = len;
92 	int l, bad;
93 
94 	while (left > 0) {
95 		l = min_t(int, PAGE_CACHE_SIZE-po, left);
96 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
97 		if (bad == l)
98 			return -EFAULT;
99 		data += l - bad;
100 		left -= l - bad;
101 		po += l - bad;
102 		if (po == PAGE_CACHE_SIZE) {
103 			po = 0;
104 			i++;
105 		}
106 	}
107 	return len;
108 }
109 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
110 
111 int ceph_copy_to_page_vector(struct page **pages,
112 				    const char *data,
113 				    loff_t off, size_t len)
114 {
115 	int i = 0;
116 	size_t po = off & ~PAGE_CACHE_MASK;
117 	size_t left = len;
118 	size_t l;
119 
120 	while (left > 0) {
121 		l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
122 		memcpy(page_address(pages[i]) + po, data, l);
123 		data += l;
124 		left -= l;
125 		po += l;
126 		if (po == PAGE_CACHE_SIZE) {
127 			po = 0;
128 			i++;
129 		}
130 	}
131 	return len;
132 }
133 EXPORT_SYMBOL(ceph_copy_to_page_vector);
134 
135 int ceph_copy_from_page_vector(struct page **pages,
136 				    char *data,
137 				    loff_t off, size_t len)
138 {
139 	int i = 0;
140 	size_t po = off & ~PAGE_CACHE_MASK;
141 	size_t left = len;
142 	size_t l;
143 
144 	while (left > 0) {
145 		l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
146 		memcpy(data, page_address(pages[i]) + po, l);
147 		data += l;
148 		left -= l;
149 		po += l;
150 		if (po == PAGE_CACHE_SIZE) {
151 			po = 0;
152 			i++;
153 		}
154 	}
155 	return len;
156 }
157 EXPORT_SYMBOL(ceph_copy_from_page_vector);
158 
159 /*
160  * copy user data from a page vector into a user pointer
161  */
162 int ceph_copy_page_vector_to_user(struct page **pages,
163 					 char __user *data,
164 					 loff_t off, size_t len)
165 {
166 	int i = 0;
167 	int po = off & ~PAGE_CACHE_MASK;
168 	int left = len;
169 	int l, bad;
170 
171 	while (left > 0) {
172 		l = min_t(int, left, PAGE_CACHE_SIZE-po);
173 		bad = copy_to_user(data, page_address(pages[i]) + po, l);
174 		if (bad == l)
175 			return -EFAULT;
176 		data += l - bad;
177 		left -= l - bad;
178 		if (po) {
179 			po += l - bad;
180 			if (po == PAGE_CACHE_SIZE)
181 				po = 0;
182 		}
183 		i++;
184 	}
185 	return len;
186 }
187 EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
188 
189 /*
190  * Zero an extent within a page vector.  Offset is relative to the
191  * start of the first page.
192  */
193 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
194 {
195 	int i = off >> PAGE_CACHE_SHIFT;
196 
197 	off &= ~PAGE_CACHE_MASK;
198 
199 	dout("zero_page_vector_page %u~%u\n", off, len);
200 
201 	/* leading partial page? */
202 	if (off) {
203 		int end = min((int)PAGE_CACHE_SIZE, off + len);
204 		dout("zeroing %d %p head from %d\n", i, pages[i],
205 		     (int)off);
206 		zero_user_segment(pages[i], off, end);
207 		len -= (end - off);
208 		i++;
209 	}
210 	while (len >= PAGE_CACHE_SIZE) {
211 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
212 		zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
213 		len -= PAGE_CACHE_SIZE;
214 		i++;
215 	}
216 	/* trailing partial page? */
217 	if (len) {
218 		dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
219 		zero_user_segment(pages[i], 0, len);
220 	}
221 }
222 EXPORT_SYMBOL(ceph_zero_page_vector_range);
223 
224