xref: /linux/fs/nfs/pagelist.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs3.h>
17 #include <linux/nfs4.h>
18 #include <linux/nfs_page.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 
22 #include "internal.h"
23 
24 static struct kmem_cache *nfs_page_cachep;
25 
26 static inline struct nfs_page *
27 nfs_page_alloc(void)
28 {
29 	struct nfs_page	*p;
30 	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
31 	if (p) {
32 		memset(p, 0, sizeof(*p));
33 		INIT_LIST_HEAD(&p->wb_list);
34 	}
35 	return p;
36 }
37 
38 static inline void
39 nfs_page_free(struct nfs_page *p)
40 {
41 	kmem_cache_free(nfs_page_cachep, p);
42 }
43 
44 /**
45  * nfs_create_request - Create an NFS read/write request.
46  * @file: file descriptor to use
47  * @inode: inode to which the request is attached
48  * @page: page to write
49  * @offset: starting offset within the page for the write
50  * @count: number of bytes to read/write
51  *
52  * The page must be locked by the caller. This makes sure we never
53  * create two different requests for the same page.
54  * User should ensure it is safe to sleep in this function.
55  */
56 struct nfs_page *
57 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
58 		   struct page *page,
59 		   unsigned int offset, unsigned int count)
60 {
61 	struct nfs_server *server = NFS_SERVER(inode);
62 	struct nfs_page		*req;
63 
64 	for (;;) {
65 		/* try to allocate the request struct */
66 		req = nfs_page_alloc();
67 		if (req != NULL)
68 			break;
69 
70 		if (signalled() && (server->flags & NFS_MOUNT_INTR))
71 			return ERR_PTR(-ERESTARTSYS);
72 		yield();
73 	}
74 
75 	/* Initialize the request struct. Initially, we assume a
76 	 * long write-back delay. This will be adjusted in
77 	 * update_nfs_request below if the region is not locked. */
78 	req->wb_page    = page;
79 	atomic_set(&req->wb_complete, 0);
80 	req->wb_index	= page->index;
81 	page_cache_get(page);
82 	BUG_ON(PagePrivate(page));
83 	BUG_ON(!PageLocked(page));
84 	BUG_ON(page->mapping->host != inode);
85 	req->wb_offset  = offset;
86 	req->wb_pgbase	= offset;
87 	req->wb_bytes   = count;
88 	req->wb_context = get_nfs_open_context(ctx);
89 	kref_init(&req->wb_kref);
90 	return req;
91 }
92 
93 /**
94  * nfs_unlock_request - Unlock request and wake up sleepers.
95  * @req:
96  */
97 void nfs_unlock_request(struct nfs_page *req)
98 {
99 	if (!NFS_WBACK_BUSY(req)) {
100 		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
101 		BUG();
102 	}
103 	smp_mb__before_clear_bit();
104 	clear_bit(PG_BUSY, &req->wb_flags);
105 	smp_mb__after_clear_bit();
106 	wake_up_bit(&req->wb_flags, PG_BUSY);
107 	nfs_release_request(req);
108 }
109 
110 /**
111  * nfs_set_page_tag_locked - Tag a request as locked
112  * @req:
113  */
114 static int nfs_set_page_tag_locked(struct nfs_page *req)
115 {
116 	struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
117 
118 	if (!nfs_lock_request(req))
119 		return 0;
120 	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
121 	return 1;
122 }
123 
124 /**
125  * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
126  */
127 void nfs_clear_page_tag_locked(struct nfs_page *req)
128 {
129 	struct inode *inode = req->wb_context->path.dentry->d_inode;
130 	struct nfs_inode *nfsi = NFS_I(inode);
131 
132 	if (req->wb_page != NULL) {
133 		spin_lock(&inode->i_lock);
134 		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
135 		spin_unlock(&inode->i_lock);
136 	}
137 	nfs_unlock_request(req);
138 }
139 
140 /**
141  * nfs_clear_request - Free up all resources allocated to the request
142  * @req:
143  *
144  * Release page resources associated with a write request after it
145  * has completed.
146  */
147 void nfs_clear_request(struct nfs_page *req)
148 {
149 	struct page *page = req->wb_page;
150 	if (page != NULL) {
151 		page_cache_release(page);
152 		req->wb_page = NULL;
153 	}
154 }
155 
156 
157 /**
158  * nfs_release_request - Release the count on an NFS read/write request
159  * @req: request to release
160  *
161  * Note: Should never be called with the spinlock held!
162  */
163 static void nfs_free_request(struct kref *kref)
164 {
165 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
166 
167 	/* Release struct file or cached credential */
168 	nfs_clear_request(req);
169 	put_nfs_open_context(req->wb_context);
170 	nfs_page_free(req);
171 }
172 
173 void nfs_release_request(struct nfs_page *req)
174 {
175 	kref_put(&req->wb_kref, nfs_free_request);
176 }
177 
178 static int nfs_wait_bit_interruptible(void *word)
179 {
180 	int ret = 0;
181 
182 	if (signal_pending(current))
183 		ret = -ERESTARTSYS;
184 	else
185 		schedule();
186 	return ret;
187 }
188 
189 /**
190  * nfs_wait_on_request - Wait for a request to complete.
191  * @req: request to wait upon.
192  *
193  * Interruptible by signals only if mounted with intr flag.
194  * The user is responsible for holding a count on the request.
195  */
196 int
197 nfs_wait_on_request(struct nfs_page *req)
198 {
199 	struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
200 	sigset_t oldmask;
201 	int ret = 0;
202 
203 	if (!test_bit(PG_BUSY, &req->wb_flags))
204 		goto out;
205 	/*
206 	 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
207 	 *	 are not interrupted if intr flag is not set
208 	 */
209 	rpc_clnt_sigmask(clnt, &oldmask);
210 	ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
211 			nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
212 	rpc_clnt_sigunmask(clnt, &oldmask);
213 out:
214 	return ret;
215 }
216 
217 /**
218  * nfs_pageio_init - initialise a page io descriptor
219  * @desc: pointer to descriptor
220  * @inode: pointer to inode
221  * @doio: pointer to io function
222  * @bsize: io block size
223  * @io_flags: extra parameters for the io function
224  */
225 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
226 		     struct inode *inode,
227 		     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
228 		     size_t bsize,
229 		     int io_flags)
230 {
231 	INIT_LIST_HEAD(&desc->pg_list);
232 	desc->pg_bytes_written = 0;
233 	desc->pg_count = 0;
234 	desc->pg_bsize = bsize;
235 	desc->pg_base = 0;
236 	desc->pg_inode = inode;
237 	desc->pg_doio = doio;
238 	desc->pg_ioflags = io_flags;
239 	desc->pg_error = 0;
240 }
241 
242 /**
243  * nfs_can_coalesce_requests - test two requests for compatibility
244  * @prev: pointer to nfs_page
245  * @req: pointer to nfs_page
246  *
247  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
248  * page data area they describe is contiguous, and that their RPC
249  * credentials, NFSv4 open state, and lockowners are the same.
250  *
251  * Return 'true' if this is the case, else return 'false'.
252  */
253 static int nfs_can_coalesce_requests(struct nfs_page *prev,
254 				     struct nfs_page *req)
255 {
256 	if (req->wb_context->cred != prev->wb_context->cred)
257 		return 0;
258 	if (req->wb_context->lockowner != prev->wb_context->lockowner)
259 		return 0;
260 	if (req->wb_context->state != prev->wb_context->state)
261 		return 0;
262 	if (req->wb_index != (prev->wb_index + 1))
263 		return 0;
264 	if (req->wb_pgbase != 0)
265 		return 0;
266 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
267 		return 0;
268 	return 1;
269 }
270 
271 /**
272  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
273  * @desc: destination io descriptor
274  * @req: request
275  *
276  * Returns true if the request 'req' was successfully coalesced into the
277  * existing list of pages 'desc'.
278  */
279 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
280 				     struct nfs_page *req)
281 {
282 	size_t newlen = req->wb_bytes;
283 
284 	if (desc->pg_count != 0) {
285 		struct nfs_page *prev;
286 
287 		/*
288 		 * FIXME: ideally we should be able to coalesce all requests
289 		 * that are not block boundary aligned, but currently this
290 		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
291 		 * since nfs_flush_multi and nfs_pagein_multi assume you
292 		 * can have only one struct nfs_page.
293 		 */
294 		if (desc->pg_bsize < PAGE_SIZE)
295 			return 0;
296 		newlen += desc->pg_count;
297 		if (newlen > desc->pg_bsize)
298 			return 0;
299 		prev = nfs_list_entry(desc->pg_list.prev);
300 		if (!nfs_can_coalesce_requests(prev, req))
301 			return 0;
302 	} else
303 		desc->pg_base = req->wb_pgbase;
304 	nfs_list_remove_request(req);
305 	nfs_list_add_request(req, &desc->pg_list);
306 	desc->pg_count = newlen;
307 	return 1;
308 }
309 
310 /*
311  * Helper for nfs_pageio_add_request and nfs_pageio_complete
312  */
313 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
314 {
315 	if (!list_empty(&desc->pg_list)) {
316 		int error = desc->pg_doio(desc->pg_inode,
317 					  &desc->pg_list,
318 					  nfs_page_array_len(desc->pg_base,
319 							     desc->pg_count),
320 					  desc->pg_count,
321 					  desc->pg_ioflags);
322 		if (error < 0)
323 			desc->pg_error = error;
324 		else
325 			desc->pg_bytes_written += desc->pg_count;
326 	}
327 	if (list_empty(&desc->pg_list)) {
328 		desc->pg_count = 0;
329 		desc->pg_base = 0;
330 	}
331 }
332 
333 /**
334  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
335  * @desc: destination io descriptor
336  * @req: request
337  *
338  * Returns true if the request 'req' was successfully coalesced into the
339  * existing list of pages 'desc'.
340  */
341 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
342 			   struct nfs_page *req)
343 {
344 	while (!nfs_pageio_do_add_request(desc, req)) {
345 		nfs_pageio_doio(desc);
346 		if (desc->pg_error < 0)
347 			return 0;
348 	}
349 	return 1;
350 }
351 
352 /**
353  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
354  * @desc: pointer to io descriptor
355  */
356 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
357 {
358 	nfs_pageio_doio(desc);
359 }
360 
361 /**
362  * nfs_pageio_cond_complete - Conditional I/O completion
363  * @desc: pointer to io descriptor
364  * @index: page index
365  *
366  * It is important to ensure that processes don't try to take locks
367  * on non-contiguous ranges of pages as that might deadlock. This
368  * function should be called before attempting to wait on a locked
369  * nfs_page. It will complete the I/O if the page index 'index'
370  * is not contiguous with the existing list of pages in 'desc'.
371  */
372 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
373 {
374 	if (!list_empty(&desc->pg_list)) {
375 		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
376 		if (index != prev->wb_index + 1)
377 			nfs_pageio_doio(desc);
378 	}
379 }
380 
381 #define NFS_SCAN_MAXENTRIES 16
382 /**
383  * nfs_scan_list - Scan a list for matching requests
384  * @nfsi: NFS inode
385  * @dst: Destination list
386  * @idx_start: lower bound of page->index to scan
387  * @npages: idx_start + npages sets the upper bound to scan.
388  * @tag: tag to scan for
389  *
390  * Moves elements from one of the inode request lists.
391  * If the number of requests is set to 0, the entire address_space
392  * starting at index idx_start, is scanned.
393  * The requests are *not* checked to ensure that they form a contiguous set.
394  * You must be holding the inode's i_lock when calling this function
395  */
396 int nfs_scan_list(struct nfs_inode *nfsi,
397 		struct list_head *dst, pgoff_t idx_start,
398 		unsigned int npages, int tag)
399 {
400 	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
401 	struct nfs_page *req;
402 	pgoff_t idx_end;
403 	int found, i;
404 	int res;
405 
406 	res = 0;
407 	if (npages == 0)
408 		idx_end = ~0;
409 	else
410 		idx_end = idx_start + npages - 1;
411 
412 	for (;;) {
413 		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
414 				(void **)&pgvec[0], idx_start,
415 				NFS_SCAN_MAXENTRIES, tag);
416 		if (found <= 0)
417 			break;
418 		for (i = 0; i < found; i++) {
419 			req = pgvec[i];
420 			if (req->wb_index > idx_end)
421 				goto out;
422 			idx_start = req->wb_index + 1;
423 			if (nfs_set_page_tag_locked(req)) {
424 				nfs_list_remove_request(req);
425 				radix_tree_tag_clear(&nfsi->nfs_page_tree,
426 						req->wb_index, tag);
427 				nfs_list_add_request(req, dst);
428 				res++;
429 				if (res == INT_MAX)
430 					goto out;
431 			}
432 		}
433 		/* for latency reduction */
434 		cond_resched_lock(&nfsi->vfs_inode.i_lock);
435 	}
436 out:
437 	return res;
438 }
439 
440 int __init nfs_init_nfspagecache(void)
441 {
442 	nfs_page_cachep = kmem_cache_create("nfs_page",
443 					    sizeof(struct nfs_page),
444 					    0, SLAB_HWCACHE_ALIGN,
445 					    NULL);
446 	if (nfs_page_cachep == NULL)
447 		return -ENOMEM;
448 
449 	return 0;
450 }
451 
452 void nfs_destroy_nfspagecache(void)
453 {
454 	kmem_cache_destroy(nfs_page_cachep);
455 }
456 
457