xref: /linux/net/ceph/pagevec.c (revision 9e9f60108423f18a99c9cc93ef7f23490ecc709b)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
9 
10 #include <linux/ceph/libceph.h>
11 
12 /*
13  * build a vector of user pages
14  */
15 struct page **ceph_get_direct_page_vector(const void __user *data,
16 					  int num_pages, bool write_page)
17 {
18 	struct page **pages;
19 	int got = 0;
20 	int rc = 0;
21 
22 	pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 	if (!pages)
24 		return ERR_PTR(-ENOMEM);
25 
26 	down_read(&current->mm->mmap_sem);
27 	while (got < num_pages) {
28 		rc = get_user_pages(current, current->mm,
29 		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 		    num_pages - got, write_page, 0, pages + got, NULL);
31 		if (rc < 0)
32 			break;
33 		BUG_ON(rc == 0);
34 		got += rc;
35 	}
36 	up_read(&current->mm->mmap_sem);
37 	if (rc < 0)
38 		goto fail;
39 	return pages;
40 
41 fail:
42 	ceph_put_page_vector(pages, got, false);
43 	return ERR_PTR(rc);
44 }
45 EXPORT_SYMBOL(ceph_get_direct_page_vector);
46 
47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
48 {
49 	int i;
50 
51 	for (i = 0; i < num_pages; i++) {
52 		if (dirty)
53 			set_page_dirty_lock(pages[i]);
54 		put_page(pages[i]);
55 	}
56 	if (is_vmalloc_addr(pages))
57 		vfree(pages);
58 	else
59 		kfree(pages);
60 }
61 EXPORT_SYMBOL(ceph_put_page_vector);
62 
63 void ceph_release_page_vector(struct page **pages, int num_pages)
64 {
65 	int i;
66 
67 	for (i = 0; i < num_pages; i++)
68 		__free_pages(pages[i], 0);
69 	kfree(pages);
70 }
71 EXPORT_SYMBOL(ceph_release_page_vector);
72 
73 /*
74  * allocate a vector new pages
75  */
76 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
77 {
78 	struct page **pages;
79 	int i;
80 
81 	pages = kmalloc(sizeof(*pages) * num_pages, flags);
82 	if (!pages)
83 		return ERR_PTR(-ENOMEM);
84 	for (i = 0; i < num_pages; i++) {
85 		pages[i] = __page_cache_alloc(flags);
86 		if (pages[i] == NULL) {
87 			ceph_release_page_vector(pages, i);
88 			return ERR_PTR(-ENOMEM);
89 		}
90 	}
91 	return pages;
92 }
93 EXPORT_SYMBOL(ceph_alloc_page_vector);
94 
95 /*
96  * copy user data into a page vector
97  */
98 int ceph_copy_user_to_page_vector(struct page **pages,
99 					 const void __user *data,
100 					 loff_t off, size_t len)
101 {
102 	int i = 0;
103 	int po = off & ~PAGE_CACHE_MASK;
104 	int left = len;
105 	int l, bad;
106 
107 	while (left > 0) {
108 		l = min_t(int, PAGE_CACHE_SIZE-po, left);
109 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
110 		if (bad == l)
111 			return -EFAULT;
112 		data += l - bad;
113 		left -= l - bad;
114 		po += l - bad;
115 		if (po == PAGE_CACHE_SIZE) {
116 			po = 0;
117 			i++;
118 		}
119 	}
120 	return len;
121 }
122 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
123 
124 void ceph_copy_to_page_vector(struct page **pages,
125 				    const void *data,
126 				    loff_t off, size_t len)
127 {
128 	int i = 0;
129 	size_t po = off & ~PAGE_CACHE_MASK;
130 	size_t left = len;
131 
132 	while (left > 0) {
133 		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
134 
135 		memcpy(page_address(pages[i]) + po, data, l);
136 		data += l;
137 		left -= l;
138 		po += l;
139 		if (po == PAGE_CACHE_SIZE) {
140 			po = 0;
141 			i++;
142 		}
143 	}
144 }
145 EXPORT_SYMBOL(ceph_copy_to_page_vector);
146 
147 void ceph_copy_from_page_vector(struct page **pages,
148 				    void *data,
149 				    loff_t off, size_t len)
150 {
151 	int i = 0;
152 	size_t po = off & ~PAGE_CACHE_MASK;
153 	size_t left = len;
154 
155 	while (left > 0) {
156 		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
157 
158 		memcpy(data, page_address(pages[i]) + po, l);
159 		data += l;
160 		left -= l;
161 		po += l;
162 		if (po == PAGE_CACHE_SIZE) {
163 			po = 0;
164 			i++;
165 		}
166 	}
167 }
168 EXPORT_SYMBOL(ceph_copy_from_page_vector);
169 
170 /*
171  * Zero an extent within a page vector.  Offset is relative to the
172  * start of the first page.
173  */
174 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
175 {
176 	int i = off >> PAGE_CACHE_SHIFT;
177 
178 	off &= ~PAGE_CACHE_MASK;
179 
180 	dout("zero_page_vector_page %u~%u\n", off, len);
181 
182 	/* leading partial page? */
183 	if (off) {
184 		int end = min((int)PAGE_CACHE_SIZE, off + len);
185 		dout("zeroing %d %p head from %d\n", i, pages[i],
186 		     (int)off);
187 		zero_user_segment(pages[i], off, end);
188 		len -= (end - off);
189 		i++;
190 	}
191 	while (len >= PAGE_CACHE_SIZE) {
192 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
193 		zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
194 		len -= PAGE_CACHE_SIZE;
195 		i++;
196 	}
197 	/* trailing partial page? */
198 	if (len) {
199 		dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
200 		zero_user_segment(pages[i], 0, len);
201 	}
202 }
203 EXPORT_SYMBOL(ceph_zero_page_vector_range);
204 
205