xref: /linux/fs/nfs/pagelist.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs3.h>
17 #include <linux/nfs4.h>
18 #include <linux/nfs_page.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 
22 #include "internal.h"
23 
24 static struct kmem_cache *nfs_page_cachep;
25 
26 static inline struct nfs_page *
27 nfs_page_alloc(void)
28 {
29 	struct nfs_page	*p;
30 	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
31 	if (p) {
32 		memset(p, 0, sizeof(*p));
33 		INIT_LIST_HEAD(&p->wb_list);
34 	}
35 	return p;
36 }
37 
38 static inline void
39 nfs_page_free(struct nfs_page *p)
40 {
41 	kmem_cache_free(nfs_page_cachep, p);
42 }
43 
44 /**
45  * nfs_create_request - Create an NFS read/write request.
46  * @file: file descriptor to use
47  * @inode: inode to which the request is attached
48  * @page: page to write
49  * @offset: starting offset within the page for the write
50  * @count: number of bytes to read/write
51  *
52  * The page must be locked by the caller. This makes sure we never
53  * create two different requests for the same page.
54  * User should ensure it is safe to sleep in this function.
55  */
56 struct nfs_page *
57 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
58 		   struct page *page,
59 		   unsigned int offset, unsigned int count)
60 {
61 	struct nfs_page		*req;
62 
63 	for (;;) {
64 		/* try to allocate the request struct */
65 		req = nfs_page_alloc();
66 		if (req != NULL)
67 			break;
68 
69 		if (fatal_signal_pending(current))
70 			return ERR_PTR(-ERESTARTSYS);
71 		yield();
72 	}
73 
74 	/* Initialize the request struct. Initially, we assume a
75 	 * long write-back delay. This will be adjusted in
76 	 * update_nfs_request below if the region is not locked. */
77 	req->wb_page    = page;
78 	atomic_set(&req->wb_complete, 0);
79 	req->wb_index	= page->index;
80 	page_cache_get(page);
81 	BUG_ON(PagePrivate(page));
82 	BUG_ON(!PageLocked(page));
83 	BUG_ON(page->mapping->host != inode);
84 	req->wb_offset  = offset;
85 	req->wb_pgbase	= offset;
86 	req->wb_bytes   = count;
87 	req->wb_context = get_nfs_open_context(ctx);
88 	kref_init(&req->wb_kref);
89 	return req;
90 }
91 
92 /**
93  * nfs_unlock_request - Unlock request and wake up sleepers.
94  * @req:
95  */
96 void nfs_unlock_request(struct nfs_page *req)
97 {
98 	if (!NFS_WBACK_BUSY(req)) {
99 		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
100 		BUG();
101 	}
102 	smp_mb__before_clear_bit();
103 	clear_bit(PG_BUSY, &req->wb_flags);
104 	smp_mb__after_clear_bit();
105 	wake_up_bit(&req->wb_flags, PG_BUSY);
106 	nfs_release_request(req);
107 }
108 
109 /**
110  * nfs_set_page_tag_locked - Tag a request as locked
111  * @req:
112  */
113 int nfs_set_page_tag_locked(struct nfs_page *req)
114 {
115 	struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
116 
117 	if (!nfs_lock_request_dontget(req))
118 		return 0;
119 	if (req->wb_page != NULL)
120 		radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
121 	return 1;
122 }
123 
124 /**
125  * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
126  */
127 void nfs_clear_page_tag_locked(struct nfs_page *req)
128 {
129 	struct inode *inode = req->wb_context->path.dentry->d_inode;
130 	struct nfs_inode *nfsi = NFS_I(inode);
131 
132 	if (req->wb_page != NULL) {
133 		spin_lock(&inode->i_lock);
134 		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
135 		nfs_unlock_request(req);
136 		spin_unlock(&inode->i_lock);
137 	} else
138 		nfs_unlock_request(req);
139 }
140 
141 /**
142  * nfs_clear_request - Free up all resources allocated to the request
143  * @req:
144  *
145  * Release page resources associated with a write request after it
146  * has completed.
147  */
148 void nfs_clear_request(struct nfs_page *req)
149 {
150 	struct page *page = req->wb_page;
151 	if (page != NULL) {
152 		page_cache_release(page);
153 		req->wb_page = NULL;
154 	}
155 }
156 
157 
158 /**
159  * nfs_release_request - Release the count on an NFS read/write request
160  * @req: request to release
161  *
162  * Note: Should never be called with the spinlock held!
163  */
164 static void nfs_free_request(struct kref *kref)
165 {
166 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
167 
168 	/* Release struct file or cached credential */
169 	nfs_clear_request(req);
170 	put_nfs_open_context(req->wb_context);
171 	nfs_page_free(req);
172 }
173 
174 void nfs_release_request(struct nfs_page *req)
175 {
176 	kref_put(&req->wb_kref, nfs_free_request);
177 }
178 
179 static int nfs_wait_bit_killable(void *word)
180 {
181 	int ret = 0;
182 
183 	if (fatal_signal_pending(current))
184 		ret = -ERESTARTSYS;
185 	else
186 		schedule();
187 	return ret;
188 }
189 
190 /**
191  * nfs_wait_on_request - Wait for a request to complete.
192  * @req: request to wait upon.
193  *
194  * Interruptible by fatal signals only.
195  * The user is responsible for holding a count on the request.
196  */
197 int
198 nfs_wait_on_request(struct nfs_page *req)
199 {
200 	int ret = 0;
201 
202 	if (!test_bit(PG_BUSY, &req->wb_flags))
203 		goto out;
204 	ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
205 			nfs_wait_bit_killable, TASK_KILLABLE);
206 out:
207 	return ret;
208 }
209 
210 /**
211  * nfs_pageio_init - initialise a page io descriptor
212  * @desc: pointer to descriptor
213  * @inode: pointer to inode
214  * @doio: pointer to io function
215  * @bsize: io block size
216  * @io_flags: extra parameters for the io function
217  */
218 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
219 		     struct inode *inode,
220 		     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
221 		     size_t bsize,
222 		     int io_flags)
223 {
224 	INIT_LIST_HEAD(&desc->pg_list);
225 	desc->pg_bytes_written = 0;
226 	desc->pg_count = 0;
227 	desc->pg_bsize = bsize;
228 	desc->pg_base = 0;
229 	desc->pg_inode = inode;
230 	desc->pg_doio = doio;
231 	desc->pg_ioflags = io_flags;
232 	desc->pg_error = 0;
233 }
234 
235 /**
236  * nfs_can_coalesce_requests - test two requests for compatibility
237  * @prev: pointer to nfs_page
238  * @req: pointer to nfs_page
239  *
240  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
241  * page data area they describe is contiguous, and that their RPC
242  * credentials, NFSv4 open state, and lockowners are the same.
243  *
244  * Return 'true' if this is the case, else return 'false'.
245  */
246 static int nfs_can_coalesce_requests(struct nfs_page *prev,
247 				     struct nfs_page *req)
248 {
249 	if (req->wb_context->cred != prev->wb_context->cred)
250 		return 0;
251 	if (req->wb_context->lockowner != prev->wb_context->lockowner)
252 		return 0;
253 	if (req->wb_context->state != prev->wb_context->state)
254 		return 0;
255 	if (req->wb_index != (prev->wb_index + 1))
256 		return 0;
257 	if (req->wb_pgbase != 0)
258 		return 0;
259 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
260 		return 0;
261 	return 1;
262 }
263 
264 /**
265  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
266  * @desc: destination io descriptor
267  * @req: request
268  *
269  * Returns true if the request 'req' was successfully coalesced into the
270  * existing list of pages 'desc'.
271  */
272 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
273 				     struct nfs_page *req)
274 {
275 	size_t newlen = req->wb_bytes;
276 
277 	if (desc->pg_count != 0) {
278 		struct nfs_page *prev;
279 
280 		/*
281 		 * FIXME: ideally we should be able to coalesce all requests
282 		 * that are not block boundary aligned, but currently this
283 		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
284 		 * since nfs_flush_multi and nfs_pagein_multi assume you
285 		 * can have only one struct nfs_page.
286 		 */
287 		if (desc->pg_bsize < PAGE_SIZE)
288 			return 0;
289 		newlen += desc->pg_count;
290 		if (newlen > desc->pg_bsize)
291 			return 0;
292 		prev = nfs_list_entry(desc->pg_list.prev);
293 		if (!nfs_can_coalesce_requests(prev, req))
294 			return 0;
295 	} else
296 		desc->pg_base = req->wb_pgbase;
297 	nfs_list_remove_request(req);
298 	nfs_list_add_request(req, &desc->pg_list);
299 	desc->pg_count = newlen;
300 	return 1;
301 }
302 
303 /*
304  * Helper for nfs_pageio_add_request and nfs_pageio_complete
305  */
306 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
307 {
308 	if (!list_empty(&desc->pg_list)) {
309 		int error = desc->pg_doio(desc->pg_inode,
310 					  &desc->pg_list,
311 					  nfs_page_array_len(desc->pg_base,
312 							     desc->pg_count),
313 					  desc->pg_count,
314 					  desc->pg_ioflags);
315 		if (error < 0)
316 			desc->pg_error = error;
317 		else
318 			desc->pg_bytes_written += desc->pg_count;
319 	}
320 	if (list_empty(&desc->pg_list)) {
321 		desc->pg_count = 0;
322 		desc->pg_base = 0;
323 	}
324 }
325 
326 /**
327  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
328  * @desc: destination io descriptor
329  * @req: request
330  *
331  * Returns true if the request 'req' was successfully coalesced into the
332  * existing list of pages 'desc'.
333  */
334 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
335 			   struct nfs_page *req)
336 {
337 	while (!nfs_pageio_do_add_request(desc, req)) {
338 		nfs_pageio_doio(desc);
339 		if (desc->pg_error < 0)
340 			return 0;
341 	}
342 	return 1;
343 }
344 
345 /**
346  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
347  * @desc: pointer to io descriptor
348  */
349 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
350 {
351 	nfs_pageio_doio(desc);
352 }
353 
354 /**
355  * nfs_pageio_cond_complete - Conditional I/O completion
356  * @desc: pointer to io descriptor
357  * @index: page index
358  *
359  * It is important to ensure that processes don't try to take locks
360  * on non-contiguous ranges of pages as that might deadlock. This
361  * function should be called before attempting to wait on a locked
362  * nfs_page. It will complete the I/O if the page index 'index'
363  * is not contiguous with the existing list of pages in 'desc'.
364  */
365 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
366 {
367 	if (!list_empty(&desc->pg_list)) {
368 		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
369 		if (index != prev->wb_index + 1)
370 			nfs_pageio_doio(desc);
371 	}
372 }
373 
374 #define NFS_SCAN_MAXENTRIES 16
375 /**
376  * nfs_scan_list - Scan a list for matching requests
377  * @nfsi: NFS inode
378  * @dst: Destination list
379  * @idx_start: lower bound of page->index to scan
380  * @npages: idx_start + npages sets the upper bound to scan.
381  * @tag: tag to scan for
382  *
383  * Moves elements from one of the inode request lists.
384  * If the number of requests is set to 0, the entire address_space
385  * starting at index idx_start, is scanned.
386  * The requests are *not* checked to ensure that they form a contiguous set.
387  * You must be holding the inode's i_lock when calling this function
388  */
389 int nfs_scan_list(struct nfs_inode *nfsi,
390 		struct list_head *dst, pgoff_t idx_start,
391 		unsigned int npages, int tag)
392 {
393 	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
394 	struct nfs_page *req;
395 	pgoff_t idx_end;
396 	int found, i;
397 	int res;
398 
399 	res = 0;
400 	if (npages == 0)
401 		idx_end = ~0;
402 	else
403 		idx_end = idx_start + npages - 1;
404 
405 	for (;;) {
406 		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
407 				(void **)&pgvec[0], idx_start,
408 				NFS_SCAN_MAXENTRIES, tag);
409 		if (found <= 0)
410 			break;
411 		for (i = 0; i < found; i++) {
412 			req = pgvec[i];
413 			if (req->wb_index > idx_end)
414 				goto out;
415 			idx_start = req->wb_index + 1;
416 			if (nfs_set_page_tag_locked(req)) {
417 				kref_get(&req->wb_kref);
418 				nfs_list_remove_request(req);
419 				radix_tree_tag_clear(&nfsi->nfs_page_tree,
420 						req->wb_index, tag);
421 				nfs_list_add_request(req, dst);
422 				res++;
423 				if (res == INT_MAX)
424 					goto out;
425 			}
426 		}
427 		/* for latency reduction */
428 		cond_resched_lock(&nfsi->vfs_inode.i_lock);
429 	}
430 out:
431 	return res;
432 }
433 
434 int __init nfs_init_nfspagecache(void)
435 {
436 	nfs_page_cachep = kmem_cache_create("nfs_page",
437 					    sizeof(struct nfs_page),
438 					    0, SLAB_HWCACHE_ALIGN,
439 					    NULL);
440 	if (nfs_page_cachep == NULL)
441 		return -ENOMEM;
442 
443 	return 0;
444 }
445 
446 void nfs_destroy_nfspagecache(void)
447 {
448 	kmem_cache_destroy(nfs_page_cachep);
449 }
450 
451