xref: /linux/fs/nfs/pagelist.c (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs3.h>
17 #include <linux/nfs4.h>
18 #include <linux/nfs_page.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 
22 #include "internal.h"
23 
24 static struct kmem_cache *nfs_page_cachep;
25 
26 static inline struct nfs_page *
27 nfs_page_alloc(void)
28 {
29 	struct nfs_page	*p;
30 	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
31 	if (p) {
32 		memset(p, 0, sizeof(*p));
33 		INIT_LIST_HEAD(&p->wb_list);
34 	}
35 	return p;
36 }
37 
38 static inline void
39 nfs_page_free(struct nfs_page *p)
40 {
41 	kmem_cache_free(nfs_page_cachep, p);
42 }
43 
44 /**
45  * nfs_create_request - Create an NFS read/write request.
46  * @file: file descriptor to use
47  * @inode: inode to which the request is attached
48  * @page: page to write
49  * @offset: starting offset within the page for the write
50  * @count: number of bytes to read/write
51  *
52  * The page must be locked by the caller. This makes sure we never
53  * create two different requests for the same page.
54  * User should ensure it is safe to sleep in this function.
55  */
56 struct nfs_page *
57 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
58 		   struct page *page,
59 		   unsigned int offset, unsigned int count)
60 {
61 	struct nfs_page		*req;
62 
63 	/* try to allocate the request struct */
64 	req = nfs_page_alloc();
65 	if (req == NULL)
66 		return ERR_PTR(-ENOMEM);
67 
68 	/* Initialize the request struct. Initially, we assume a
69 	 * long write-back delay. This will be adjusted in
70 	 * update_nfs_request below if the region is not locked. */
71 	req->wb_page    = page;
72 	atomic_set(&req->wb_complete, 0);
73 	req->wb_index	= page->index;
74 	page_cache_get(page);
75 	BUG_ON(PagePrivate(page));
76 	BUG_ON(!PageLocked(page));
77 	BUG_ON(page->mapping->host != inode);
78 	req->wb_offset  = offset;
79 	req->wb_pgbase	= offset;
80 	req->wb_bytes   = count;
81 	req->wb_context = get_nfs_open_context(ctx);
82 	kref_init(&req->wb_kref);
83 	return req;
84 }
85 
86 /**
87  * nfs_unlock_request - Unlock request and wake up sleepers.
88  * @req:
89  */
90 void nfs_unlock_request(struct nfs_page *req)
91 {
92 	if (!NFS_WBACK_BUSY(req)) {
93 		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
94 		BUG();
95 	}
96 	smp_mb__before_clear_bit();
97 	clear_bit(PG_BUSY, &req->wb_flags);
98 	smp_mb__after_clear_bit();
99 	wake_up_bit(&req->wb_flags, PG_BUSY);
100 	nfs_release_request(req);
101 }
102 
103 /**
104  * nfs_set_page_tag_locked - Tag a request as locked
105  * @req:
106  */
107 int nfs_set_page_tag_locked(struct nfs_page *req)
108 {
109 	if (!nfs_lock_request_dontget(req))
110 		return 0;
111 	if (req->wb_page != NULL)
112 		radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
113 	return 1;
114 }
115 
116 /**
117  * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
118  */
119 void nfs_clear_page_tag_locked(struct nfs_page *req)
120 {
121 	if (req->wb_page != NULL) {
122 		struct inode *inode = req->wb_context->path.dentry->d_inode;
123 		struct nfs_inode *nfsi = NFS_I(inode);
124 
125 		spin_lock(&inode->i_lock);
126 		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
127 		nfs_unlock_request(req);
128 		spin_unlock(&inode->i_lock);
129 	} else
130 		nfs_unlock_request(req);
131 }
132 
133 /**
134  * nfs_clear_request - Free up all resources allocated to the request
135  * @req:
136  *
137  * Release page and open context resources associated with a read/write
138  * request after it has completed.
139  */
140 void nfs_clear_request(struct nfs_page *req)
141 {
142 	struct page *page = req->wb_page;
143 	struct nfs_open_context *ctx = req->wb_context;
144 
145 	if (page != NULL) {
146 		page_cache_release(page);
147 		req->wb_page = NULL;
148 	}
149 	if (ctx != NULL) {
150 		put_nfs_open_context(ctx);
151 		req->wb_context = NULL;
152 	}
153 }
154 
155 
156 /**
157  * nfs_release_request - Release the count on an NFS read/write request
158  * @req: request to release
159  *
160  * Note: Should never be called with the spinlock held!
161  */
162 static void nfs_free_request(struct kref *kref)
163 {
164 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
165 
166 	/* Release struct file and open context */
167 	nfs_clear_request(req);
168 	nfs_page_free(req);
169 }
170 
171 void nfs_release_request(struct nfs_page *req)
172 {
173 	kref_put(&req->wb_kref, nfs_free_request);
174 }
175 
176 static int nfs_wait_bit_uninterruptible(void *word)
177 {
178 	io_schedule();
179 	return 0;
180 }
181 
182 /**
183  * nfs_wait_on_request - Wait for a request to complete.
184  * @req: request to wait upon.
185  *
186  * Interruptible by fatal signals only.
187  * The user is responsible for holding a count on the request.
188  */
189 int
190 nfs_wait_on_request(struct nfs_page *req)
191 {
192 	return wait_on_bit(&req->wb_flags, PG_BUSY,
193 			nfs_wait_bit_uninterruptible,
194 			TASK_UNINTERRUPTIBLE);
195 }
196 
197 /**
198  * nfs_pageio_init - initialise a page io descriptor
199  * @desc: pointer to descriptor
200  * @inode: pointer to inode
201  * @doio: pointer to io function
202  * @bsize: io block size
203  * @io_flags: extra parameters for the io function
204  */
205 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
206 		     struct inode *inode,
207 		     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
208 		     size_t bsize,
209 		     int io_flags)
210 {
211 	INIT_LIST_HEAD(&desc->pg_list);
212 	desc->pg_bytes_written = 0;
213 	desc->pg_count = 0;
214 	desc->pg_bsize = bsize;
215 	desc->pg_base = 0;
216 	desc->pg_inode = inode;
217 	desc->pg_doio = doio;
218 	desc->pg_ioflags = io_flags;
219 	desc->pg_error = 0;
220 }
221 
222 /**
223  * nfs_can_coalesce_requests - test two requests for compatibility
224  * @prev: pointer to nfs_page
225  * @req: pointer to nfs_page
226  *
227  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
228  * page data area they describe is contiguous, and that their RPC
229  * credentials, NFSv4 open state, and lockowners are the same.
230  *
231  * Return 'true' if this is the case, else return 'false'.
232  */
233 static int nfs_can_coalesce_requests(struct nfs_page *prev,
234 				     struct nfs_page *req)
235 {
236 	if (req->wb_context->cred != prev->wb_context->cred)
237 		return 0;
238 	if (req->wb_context->lockowner != prev->wb_context->lockowner)
239 		return 0;
240 	if (req->wb_context->state != prev->wb_context->state)
241 		return 0;
242 	if (req->wb_index != (prev->wb_index + 1))
243 		return 0;
244 	if (req->wb_pgbase != 0)
245 		return 0;
246 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
247 		return 0;
248 	return 1;
249 }
250 
251 /**
252  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
253  * @desc: destination io descriptor
254  * @req: request
255  *
256  * Returns true if the request 'req' was successfully coalesced into the
257  * existing list of pages 'desc'.
258  */
259 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
260 				     struct nfs_page *req)
261 {
262 	size_t newlen = req->wb_bytes;
263 
264 	if (desc->pg_count != 0) {
265 		struct nfs_page *prev;
266 
267 		/*
268 		 * FIXME: ideally we should be able to coalesce all requests
269 		 * that are not block boundary aligned, but currently this
270 		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
271 		 * since nfs_flush_multi and nfs_pagein_multi assume you
272 		 * can have only one struct nfs_page.
273 		 */
274 		if (desc->pg_bsize < PAGE_SIZE)
275 			return 0;
276 		newlen += desc->pg_count;
277 		if (newlen > desc->pg_bsize)
278 			return 0;
279 		prev = nfs_list_entry(desc->pg_list.prev);
280 		if (!nfs_can_coalesce_requests(prev, req))
281 			return 0;
282 	} else
283 		desc->pg_base = req->wb_pgbase;
284 	nfs_list_remove_request(req);
285 	nfs_list_add_request(req, &desc->pg_list);
286 	desc->pg_count = newlen;
287 	return 1;
288 }
289 
290 /*
291  * Helper for nfs_pageio_add_request and nfs_pageio_complete
292  */
293 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
294 {
295 	if (!list_empty(&desc->pg_list)) {
296 		int error = desc->pg_doio(desc->pg_inode,
297 					  &desc->pg_list,
298 					  nfs_page_array_len(desc->pg_base,
299 							     desc->pg_count),
300 					  desc->pg_count,
301 					  desc->pg_ioflags);
302 		if (error < 0)
303 			desc->pg_error = error;
304 		else
305 			desc->pg_bytes_written += desc->pg_count;
306 	}
307 	if (list_empty(&desc->pg_list)) {
308 		desc->pg_count = 0;
309 		desc->pg_base = 0;
310 	}
311 }
312 
313 /**
314  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
315  * @desc: destination io descriptor
316  * @req: request
317  *
318  * Returns true if the request 'req' was successfully coalesced into the
319  * existing list of pages 'desc'.
320  */
321 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
322 			   struct nfs_page *req)
323 {
324 	while (!nfs_pageio_do_add_request(desc, req)) {
325 		nfs_pageio_doio(desc);
326 		if (desc->pg_error < 0)
327 			return 0;
328 	}
329 	return 1;
330 }
331 
332 /**
333  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
334  * @desc: pointer to io descriptor
335  */
336 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
337 {
338 	nfs_pageio_doio(desc);
339 }
340 
341 /**
342  * nfs_pageio_cond_complete - Conditional I/O completion
343  * @desc: pointer to io descriptor
344  * @index: page index
345  *
346  * It is important to ensure that processes don't try to take locks
347  * on non-contiguous ranges of pages as that might deadlock. This
348  * function should be called before attempting to wait on a locked
349  * nfs_page. It will complete the I/O if the page index 'index'
350  * is not contiguous with the existing list of pages in 'desc'.
351  */
352 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
353 {
354 	if (!list_empty(&desc->pg_list)) {
355 		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
356 		if (index != prev->wb_index + 1)
357 			nfs_pageio_doio(desc);
358 	}
359 }
360 
361 #define NFS_SCAN_MAXENTRIES 16
362 /**
363  * nfs_scan_list - Scan a list for matching requests
364  * @nfsi: NFS inode
365  * @dst: Destination list
366  * @idx_start: lower bound of page->index to scan
367  * @npages: idx_start + npages sets the upper bound to scan.
368  * @tag: tag to scan for
369  *
370  * Moves elements from one of the inode request lists.
371  * If the number of requests is set to 0, the entire address_space
372  * starting at index idx_start, is scanned.
373  * The requests are *not* checked to ensure that they form a contiguous set.
374  * You must be holding the inode's i_lock when calling this function
375  */
376 int nfs_scan_list(struct nfs_inode *nfsi,
377 		struct list_head *dst, pgoff_t idx_start,
378 		unsigned int npages, int tag)
379 {
380 	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
381 	struct nfs_page *req;
382 	pgoff_t idx_end;
383 	int found, i;
384 	int res;
385 
386 	res = 0;
387 	if (npages == 0)
388 		idx_end = ~0;
389 	else
390 		idx_end = idx_start + npages - 1;
391 
392 	for (;;) {
393 		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
394 				(void **)&pgvec[0], idx_start,
395 				NFS_SCAN_MAXENTRIES, tag);
396 		if (found <= 0)
397 			break;
398 		for (i = 0; i < found; i++) {
399 			req = pgvec[i];
400 			if (req->wb_index > idx_end)
401 				goto out;
402 			idx_start = req->wb_index + 1;
403 			if (nfs_set_page_tag_locked(req)) {
404 				kref_get(&req->wb_kref);
405 				nfs_list_remove_request(req);
406 				radix_tree_tag_clear(&nfsi->nfs_page_tree,
407 						req->wb_index, tag);
408 				nfs_list_add_request(req, dst);
409 				res++;
410 				if (res == INT_MAX)
411 					goto out;
412 			}
413 		}
414 		/* for latency reduction */
415 		cond_resched_lock(&nfsi->vfs_inode.i_lock);
416 	}
417 out:
418 	return res;
419 }
420 
421 int __init nfs_init_nfspagecache(void)
422 {
423 	nfs_page_cachep = kmem_cache_create("nfs_page",
424 					    sizeof(struct nfs_page),
425 					    0, SLAB_HWCACHE_ALIGN,
426 					    NULL);
427 	if (nfs_page_cachep == NULL)
428 		return -ENOMEM;
429 
430 	return 0;
431 }
432 
433 void nfs_destroy_nfspagecache(void)
434 {
435 	kmem_cache_destroy(nfs_page_cachep);
436 }
437 
438