xref: /linux/fs/nfs/pagelist.c (revision 47da88f36639b8de57f6cdd680f8c27528ccd67c)
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs3.h>
17 #include <linux/nfs4.h>
18 #include <linux/nfs_page.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 
22 #include "internal.h"
23 
24 static struct kmem_cache *nfs_page_cachep;
25 
26 static inline struct nfs_page *
27 nfs_page_alloc(void)
28 {
29 	struct nfs_page	*p;
30 	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
31 	if (p) {
32 		memset(p, 0, sizeof(*p));
33 		INIT_LIST_HEAD(&p->wb_list);
34 	}
35 	return p;
36 }
37 
38 static inline void
39 nfs_page_free(struct nfs_page *p)
40 {
41 	kmem_cache_free(nfs_page_cachep, p);
42 }
43 
44 /**
45  * nfs_create_request - Create an NFS read/write request.
46  * @file: file descriptor to use
47  * @inode: inode to which the request is attached
48  * @page: page to write
49  * @offset: starting offset within the page for the write
50  * @count: number of bytes to read/write
51  *
52  * The page must be locked by the caller. This makes sure we never
53  * create two different requests for the same page.
54  * User should ensure it is safe to sleep in this function.
55  */
56 struct nfs_page *
57 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
58 		   struct page *page,
59 		   unsigned int offset, unsigned int count)
60 {
61 	struct nfs_page		*req;
62 
63 	/* try to allocate the request struct */
64 	req = nfs_page_alloc();
65 	if (req == NULL)
66 		return ERR_PTR(-ENOMEM);
67 
68 	/* Initialize the request struct. Initially, we assume a
69 	 * long write-back delay. This will be adjusted in
70 	 * update_nfs_request below if the region is not locked. */
71 	req->wb_page    = page;
72 	atomic_set(&req->wb_complete, 0);
73 	req->wb_index	= page->index;
74 	page_cache_get(page);
75 	BUG_ON(PagePrivate(page));
76 	BUG_ON(!PageLocked(page));
77 	BUG_ON(page->mapping->host != inode);
78 	req->wb_offset  = offset;
79 	req->wb_pgbase	= offset;
80 	req->wb_bytes   = count;
81 	req->wb_context = get_nfs_open_context(ctx);
82 	req->wb_lock_context = nfs_get_lock_context(ctx);
83 	kref_init(&req->wb_kref);
84 	return req;
85 }
86 
87 /**
88  * nfs_unlock_request - Unlock request and wake up sleepers.
89  * @req:
90  */
91 void nfs_unlock_request(struct nfs_page *req)
92 {
93 	if (!NFS_WBACK_BUSY(req)) {
94 		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
95 		BUG();
96 	}
97 	smp_mb__before_clear_bit();
98 	clear_bit(PG_BUSY, &req->wb_flags);
99 	smp_mb__after_clear_bit();
100 	wake_up_bit(&req->wb_flags, PG_BUSY);
101 	nfs_release_request(req);
102 }
103 
104 /**
105  * nfs_set_page_tag_locked - Tag a request as locked
106  * @req:
107  */
108 int nfs_set_page_tag_locked(struct nfs_page *req)
109 {
110 	if (!nfs_lock_request_dontget(req))
111 		return 0;
112 	if (req->wb_page != NULL)
113 		radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
114 	return 1;
115 }
116 
117 /**
118  * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
119  */
120 void nfs_clear_page_tag_locked(struct nfs_page *req)
121 {
122 	if (req->wb_page != NULL) {
123 		struct inode *inode = req->wb_context->path.dentry->d_inode;
124 		struct nfs_inode *nfsi = NFS_I(inode);
125 
126 		spin_lock(&inode->i_lock);
127 		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
128 		nfs_unlock_request(req);
129 		spin_unlock(&inode->i_lock);
130 	} else
131 		nfs_unlock_request(req);
132 }
133 
134 /**
135  * nfs_clear_request - Free up all resources allocated to the request
136  * @req:
137  *
138  * Release page and open context resources associated with a read/write
139  * request after it has completed.
140  */
141 void nfs_clear_request(struct nfs_page *req)
142 {
143 	struct page *page = req->wb_page;
144 	struct nfs_open_context *ctx = req->wb_context;
145 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
146 
147 	if (page != NULL) {
148 		page_cache_release(page);
149 		req->wb_page = NULL;
150 	}
151 	if (l_ctx != NULL) {
152 		nfs_put_lock_context(l_ctx);
153 		req->wb_lock_context = NULL;
154 	}
155 	if (ctx != NULL) {
156 		put_nfs_open_context(ctx);
157 		req->wb_context = NULL;
158 	}
159 }
160 
161 
162 /**
163  * nfs_release_request - Release the count on an NFS read/write request
164  * @req: request to release
165  *
166  * Note: Should never be called with the spinlock held!
167  */
168 static void nfs_free_request(struct kref *kref)
169 {
170 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
171 
172 	/* Release struct file and open context */
173 	nfs_clear_request(req);
174 	nfs_page_free(req);
175 }
176 
177 void nfs_release_request(struct nfs_page *req)
178 {
179 	kref_put(&req->wb_kref, nfs_free_request);
180 }
181 
182 static int nfs_wait_bit_uninterruptible(void *word)
183 {
184 	io_schedule();
185 	return 0;
186 }
187 
188 /**
189  * nfs_wait_on_request - Wait for a request to complete.
190  * @req: request to wait upon.
191  *
192  * Interruptible by fatal signals only.
193  * The user is responsible for holding a count on the request.
194  */
195 int
196 nfs_wait_on_request(struct nfs_page *req)
197 {
198 	return wait_on_bit(&req->wb_flags, PG_BUSY,
199 			nfs_wait_bit_uninterruptible,
200 			TASK_UNINTERRUPTIBLE);
201 }
202 
203 /**
204  * nfs_pageio_init - initialise a page io descriptor
205  * @desc: pointer to descriptor
206  * @inode: pointer to inode
207  * @doio: pointer to io function
208  * @bsize: io block size
209  * @io_flags: extra parameters for the io function
210  */
211 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
212 		     struct inode *inode,
213 		     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
214 		     size_t bsize,
215 		     int io_flags)
216 {
217 	INIT_LIST_HEAD(&desc->pg_list);
218 	desc->pg_bytes_written = 0;
219 	desc->pg_count = 0;
220 	desc->pg_bsize = bsize;
221 	desc->pg_base = 0;
222 	desc->pg_inode = inode;
223 	desc->pg_doio = doio;
224 	desc->pg_ioflags = io_flags;
225 	desc->pg_error = 0;
226 }
227 
228 /**
229  * nfs_can_coalesce_requests - test two requests for compatibility
230  * @prev: pointer to nfs_page
231  * @req: pointer to nfs_page
232  *
233  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
234  * page data area they describe is contiguous, and that their RPC
235  * credentials, NFSv4 open state, and lockowners are the same.
236  *
237  * Return 'true' if this is the case, else return 'false'.
238  */
239 static int nfs_can_coalesce_requests(struct nfs_page *prev,
240 				     struct nfs_page *req)
241 {
242 	if (req->wb_context->cred != prev->wb_context->cred)
243 		return 0;
244 	if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
245 		return 0;
246 	if (req->wb_context->state != prev->wb_context->state)
247 		return 0;
248 	if (req->wb_index != (prev->wb_index + 1))
249 		return 0;
250 	if (req->wb_pgbase != 0)
251 		return 0;
252 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
253 		return 0;
254 	return 1;
255 }
256 
257 /**
258  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
259  * @desc: destination io descriptor
260  * @req: request
261  *
262  * Returns true if the request 'req' was successfully coalesced into the
263  * existing list of pages 'desc'.
264  */
265 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
266 				     struct nfs_page *req)
267 {
268 	size_t newlen = req->wb_bytes;
269 
270 	if (desc->pg_count != 0) {
271 		struct nfs_page *prev;
272 
273 		/*
274 		 * FIXME: ideally we should be able to coalesce all requests
275 		 * that are not block boundary aligned, but currently this
276 		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
277 		 * since nfs_flush_multi and nfs_pagein_multi assume you
278 		 * can have only one struct nfs_page.
279 		 */
280 		if (desc->pg_bsize < PAGE_SIZE)
281 			return 0;
282 		newlen += desc->pg_count;
283 		if (newlen > desc->pg_bsize)
284 			return 0;
285 		prev = nfs_list_entry(desc->pg_list.prev);
286 		if (!nfs_can_coalesce_requests(prev, req))
287 			return 0;
288 	} else
289 		desc->pg_base = req->wb_pgbase;
290 	nfs_list_remove_request(req);
291 	nfs_list_add_request(req, &desc->pg_list);
292 	desc->pg_count = newlen;
293 	return 1;
294 }
295 
296 /*
297  * Helper for nfs_pageio_add_request and nfs_pageio_complete
298  */
299 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
300 {
301 	if (!list_empty(&desc->pg_list)) {
302 		int error = desc->pg_doio(desc->pg_inode,
303 					  &desc->pg_list,
304 					  nfs_page_array_len(desc->pg_base,
305 							     desc->pg_count),
306 					  desc->pg_count,
307 					  desc->pg_ioflags);
308 		if (error < 0)
309 			desc->pg_error = error;
310 		else
311 			desc->pg_bytes_written += desc->pg_count;
312 	}
313 	if (list_empty(&desc->pg_list)) {
314 		desc->pg_count = 0;
315 		desc->pg_base = 0;
316 	}
317 }
318 
319 /**
320  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
321  * @desc: destination io descriptor
322  * @req: request
323  *
324  * Returns true if the request 'req' was successfully coalesced into the
325  * existing list of pages 'desc'.
326  */
327 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
328 			   struct nfs_page *req)
329 {
330 	while (!nfs_pageio_do_add_request(desc, req)) {
331 		nfs_pageio_doio(desc);
332 		if (desc->pg_error < 0)
333 			return 0;
334 	}
335 	return 1;
336 }
337 
338 /**
339  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
340  * @desc: pointer to io descriptor
341  */
342 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
343 {
344 	nfs_pageio_doio(desc);
345 }
346 
347 /**
348  * nfs_pageio_cond_complete - Conditional I/O completion
349  * @desc: pointer to io descriptor
350  * @index: page index
351  *
352  * It is important to ensure that processes don't try to take locks
353  * on non-contiguous ranges of pages as that might deadlock. This
354  * function should be called before attempting to wait on a locked
355  * nfs_page. It will complete the I/O if the page index 'index'
356  * is not contiguous with the existing list of pages in 'desc'.
357  */
358 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
359 {
360 	if (!list_empty(&desc->pg_list)) {
361 		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
362 		if (index != prev->wb_index + 1)
363 			nfs_pageio_doio(desc);
364 	}
365 }
366 
367 #define NFS_SCAN_MAXENTRIES 16
368 /**
369  * nfs_scan_list - Scan a list for matching requests
370  * @nfsi: NFS inode
371  * @dst: Destination list
372  * @idx_start: lower bound of page->index to scan
373  * @npages: idx_start + npages sets the upper bound to scan.
374  * @tag: tag to scan for
375  *
376  * Moves elements from one of the inode request lists.
377  * If the number of requests is set to 0, the entire address_space
378  * starting at index idx_start, is scanned.
379  * The requests are *not* checked to ensure that they form a contiguous set.
380  * You must be holding the inode's i_lock when calling this function
381  */
382 int nfs_scan_list(struct nfs_inode *nfsi,
383 		struct list_head *dst, pgoff_t idx_start,
384 		unsigned int npages, int tag)
385 {
386 	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
387 	struct nfs_page *req;
388 	pgoff_t idx_end;
389 	int found, i;
390 	int res;
391 
392 	res = 0;
393 	if (npages == 0)
394 		idx_end = ~0;
395 	else
396 		idx_end = idx_start + npages - 1;
397 
398 	for (;;) {
399 		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
400 				(void **)&pgvec[0], idx_start,
401 				NFS_SCAN_MAXENTRIES, tag);
402 		if (found <= 0)
403 			break;
404 		for (i = 0; i < found; i++) {
405 			req = pgvec[i];
406 			if (req->wb_index > idx_end)
407 				goto out;
408 			idx_start = req->wb_index + 1;
409 			if (nfs_set_page_tag_locked(req)) {
410 				kref_get(&req->wb_kref);
411 				nfs_list_remove_request(req);
412 				radix_tree_tag_clear(&nfsi->nfs_page_tree,
413 						req->wb_index, tag);
414 				nfs_list_add_request(req, dst);
415 				res++;
416 				if (res == INT_MAX)
417 					goto out;
418 			}
419 		}
420 		/* for latency reduction */
421 		cond_resched_lock(&nfsi->vfs_inode.i_lock);
422 	}
423 out:
424 	return res;
425 }
426 
427 int __init nfs_init_nfspagecache(void)
428 {
429 	nfs_page_cachep = kmem_cache_create("nfs_page",
430 					    sizeof(struct nfs_page),
431 					    0, SLAB_HWCACHE_ALIGN,
432 					    NULL);
433 	if (nfs_page_cachep == NULL)
434 		return -ENOMEM;
435 
436 	return 0;
437 }
438 
439 void nfs_destroy_nfspagecache(void)
440 {
441 	kmem_cache_destroy(nfs_page_cachep);
442 }
443 
444