1 /* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/sunrpc/clnt.h> 15 #include <linux/nfs3.h> 16 #include <linux/nfs4.h> 17 #include <linux/nfs_page.h> 18 #include <linux/nfs_fs.h> 19 #include <linux/nfs_mount.h> 20 #include <linux/writeback.h> 21 22 #define NFS_PARANOIA 1 23 24 static struct kmem_cache *nfs_page_cachep; 25 26 static inline struct nfs_page * 27 nfs_page_alloc(void) 28 { 29 struct nfs_page *p; 30 p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); 31 if (p) { 32 memset(p, 0, sizeof(*p)); 33 INIT_LIST_HEAD(&p->wb_list); 34 } 35 return p; 36 } 37 38 static inline void 39 nfs_page_free(struct nfs_page *p) 40 { 41 kmem_cache_free(nfs_page_cachep, p); 42 } 43 44 /** 45 * nfs_create_request - Create an NFS read/write request. 46 * @file: file descriptor to use 47 * @inode: inode to which the request is attached 48 * @page: page to write 49 * @offset: starting offset within the page for the write 50 * @count: number of bytes to read/write 51 * 52 * The page must be locked by the caller. This makes sure we never 53 * create two different requests for the same page, and avoids 54 * a possible deadlock when we reach the hard limit on the number 55 * of dirty pages. 56 * User should ensure it is safe to sleep in this function. 57 */ 58 struct nfs_page * 59 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, 60 struct page *page, 61 unsigned int offset, unsigned int count) 62 { 63 struct nfs_server *server = NFS_SERVER(inode); 64 struct nfs_page *req; 65 66 /* Deal with hard limits. */ 67 for (;;) { 68 /* try to allocate the request struct */ 69 req = nfs_page_alloc(); 70 if (req != NULL) 71 break; 72 73 /* Try to free up at least one request in order to stay 74 * below the hard limit 75 */ 76 if (signalled() && (server->flags & NFS_MOUNT_INTR)) 77 return ERR_PTR(-ERESTARTSYS); 78 yield(); 79 } 80 81 /* Initialize the request struct. Initially, we assume a 82 * long write-back delay. This will be adjusted in 83 * update_nfs_request below if the region is not locked. */ 84 req->wb_page = page; 85 atomic_set(&req->wb_complete, 0); 86 req->wb_index = page->index; 87 page_cache_get(page); 88 BUG_ON(PagePrivate(page)); 89 BUG_ON(!PageLocked(page)); 90 BUG_ON(page->mapping->host != inode); 91 req->wb_offset = offset; 92 req->wb_pgbase = offset; 93 req->wb_bytes = count; 94 atomic_set(&req->wb_count, 1); 95 req->wb_context = get_nfs_open_context(ctx); 96 97 return req; 98 } 99 100 /** 101 * nfs_unlock_request - Unlock request and wake up sleepers. 102 * @req: 103 */ 104 void nfs_unlock_request(struct nfs_page *req) 105 { 106 if (!NFS_WBACK_BUSY(req)) { 107 printk(KERN_ERR "NFS: Invalid unlock attempted\n"); 108 BUG(); 109 } 110 smp_mb__before_clear_bit(); 111 clear_bit(PG_BUSY, &req->wb_flags); 112 smp_mb__after_clear_bit(); 113 wake_up_bit(&req->wb_flags, PG_BUSY); 114 nfs_release_request(req); 115 } 116 117 /** 118 * nfs_set_page_writeback_locked - Lock a request for writeback 119 * @req: 120 */ 121 int nfs_set_page_writeback_locked(struct nfs_page *req) 122 { 123 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 124 125 if (!nfs_lock_request(req)) 126 return 0; 127 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 128 return 1; 129 } 130 131 /** 132 * nfs_clear_page_writeback - Unlock request and wake up sleepers 133 */ 134 void nfs_clear_page_writeback(struct nfs_page *req) 135 { 136 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 137 138 if (req->wb_page != NULL) { 139 spin_lock(&nfsi->req_lock); 140 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 141 spin_unlock(&nfsi->req_lock); 142 } 143 nfs_unlock_request(req); 144 } 145 146 /** 147 * nfs_clear_request - Free up all resources allocated to the request 148 * @req: 149 * 150 * Release page resources associated with a write request after it 151 * has completed. 152 */ 153 void nfs_clear_request(struct nfs_page *req) 154 { 155 struct page *page = req->wb_page; 156 if (page != NULL) { 157 page_cache_release(page); 158 req->wb_page = NULL; 159 } 160 } 161 162 163 /** 164 * nfs_release_request - Release the count on an NFS read/write request 165 * @req: request to release 166 * 167 * Note: Should never be called with the spinlock held! 168 */ 169 void 170 nfs_release_request(struct nfs_page *req) 171 { 172 if (!atomic_dec_and_test(&req->wb_count)) 173 return; 174 175 #ifdef NFS_PARANOIA 176 BUG_ON (!list_empty(&req->wb_list)); 177 BUG_ON (NFS_WBACK_BUSY(req)); 178 #endif 179 180 /* Release struct file or cached credential */ 181 nfs_clear_request(req); 182 put_nfs_open_context(req->wb_context); 183 nfs_page_free(req); 184 } 185 186 static int nfs_wait_bit_interruptible(void *word) 187 { 188 int ret = 0; 189 190 if (signal_pending(current)) 191 ret = -ERESTARTSYS; 192 else 193 schedule(); 194 return ret; 195 } 196 197 /** 198 * nfs_wait_on_request - Wait for a request to complete. 199 * @req: request to wait upon. 200 * 201 * Interruptible by signals only if mounted with intr flag. 202 * The user is responsible for holding a count on the request. 203 */ 204 int 205 nfs_wait_on_request(struct nfs_page *req) 206 { 207 struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); 208 sigset_t oldmask; 209 int ret = 0; 210 211 if (!test_bit(PG_BUSY, &req->wb_flags)) 212 goto out; 213 /* 214 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we 215 * are not interrupted if intr flag is not set 216 */ 217 rpc_clnt_sigmask(clnt, &oldmask); 218 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, 219 nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); 220 rpc_clnt_sigunmask(clnt, &oldmask); 221 out: 222 return ret; 223 } 224 225 /** 226 * nfs_coalesce_requests - Split coalesced requests out from a list. 227 * @head: source list 228 * @dst: destination list 229 * @nmax: maximum number of requests to coalesce 230 * 231 * Moves a maximum of 'nmax' elements from one list to another. 232 * The elements are checked to ensure that they form a contiguous set 233 * of pages, and that the RPC credentials are the same. 234 */ 235 int 236 nfs_coalesce_requests(struct list_head *head, struct list_head *dst, 237 unsigned int nmax) 238 { 239 struct nfs_page *req = NULL; 240 unsigned int npages = 0; 241 242 while (!list_empty(head)) { 243 struct nfs_page *prev = req; 244 245 req = nfs_list_entry(head->next); 246 if (prev) { 247 if (req->wb_context->cred != prev->wb_context->cred) 248 break; 249 if (req->wb_context->lockowner != prev->wb_context->lockowner) 250 break; 251 if (req->wb_context->state != prev->wb_context->state) 252 break; 253 if (req->wb_index != (prev->wb_index + 1)) 254 break; 255 256 if (req->wb_pgbase != 0) 257 break; 258 } 259 nfs_list_remove_request(req); 260 nfs_list_add_request(req, dst); 261 npages++; 262 if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE) 263 break; 264 if (npages >= nmax) 265 break; 266 } 267 return npages; 268 } 269 270 #define NFS_SCAN_MAXENTRIES 16 271 /** 272 * nfs_scan_dirty - Scan the radix tree for dirty requests 273 * @mapping: pointer to address space 274 * @wbc: writeback_control structure 275 * @dst: Destination list 276 * 277 * Moves elements from one of the inode request lists. 278 * If the number of requests is set to 0, the entire address_space 279 * starting at index idx_start, is scanned. 280 * The requests are *not* checked to ensure that they form a contiguous set. 281 * You must be holding the inode's req_lock when calling this function 282 */ 283 long nfs_scan_dirty(struct address_space *mapping, 284 struct writeback_control *wbc, 285 struct list_head *dst) 286 { 287 struct nfs_inode *nfsi = NFS_I(mapping->host); 288 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 289 struct nfs_page *req; 290 pgoff_t idx_start, idx_end; 291 long res = 0; 292 int found, i; 293 294 if (nfsi->ndirty == 0) 295 return 0; 296 if (wbc->range_cyclic) { 297 idx_start = 0; 298 idx_end = ULONG_MAX; 299 } else if (wbc->range_end == 0) { 300 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 301 idx_end = ULONG_MAX; 302 } else { 303 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 304 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; 305 } 306 307 for (;;) { 308 unsigned int toscan = NFS_SCAN_MAXENTRIES; 309 310 found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, 311 (void **)&pgvec[0], idx_start, toscan, 312 NFS_PAGE_TAG_DIRTY); 313 314 /* Did we make progress? */ 315 if (found <= 0) 316 break; 317 318 for (i = 0; i < found; i++) { 319 req = pgvec[i]; 320 if (!wbc->range_cyclic && req->wb_index > idx_end) 321 goto out; 322 323 /* Try to lock request and mark it for writeback */ 324 if (!nfs_set_page_writeback_locked(req)) 325 goto next; 326 radix_tree_tag_clear(&nfsi->nfs_page_tree, 327 req->wb_index, NFS_PAGE_TAG_DIRTY); 328 nfsi->ndirty--; 329 nfs_list_remove_request(req); 330 nfs_list_add_request(req, dst); 331 res++; 332 if (res == LONG_MAX) 333 goto out; 334 next: 335 idx_start = req->wb_index + 1; 336 } 337 } 338 out: 339 WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); 340 return res; 341 } 342 343 /** 344 * nfs_scan_list - Scan a list for matching requests 345 * @nfsi: NFS inode 346 * @head: One of the NFS inode request lists 347 * @dst: Destination list 348 * @idx_start: lower bound of page->index to scan 349 * @npages: idx_start + npages sets the upper bound to scan. 350 * 351 * Moves elements from one of the inode request lists. 352 * If the number of requests is set to 0, the entire address_space 353 * starting at index idx_start, is scanned. 354 * The requests are *not* checked to ensure that they form a contiguous set. 355 * You must be holding the inode's req_lock when calling this function 356 */ 357 int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, 358 struct list_head *dst, unsigned long idx_start, 359 unsigned int npages) 360 { 361 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 362 struct nfs_page *req; 363 unsigned long idx_end; 364 int found, i; 365 int res; 366 367 res = 0; 368 if (npages == 0) 369 idx_end = ~0; 370 else 371 idx_end = idx_start + npages - 1; 372 373 for (;;) { 374 found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, 375 (void **)&pgvec[0], idx_start, 376 NFS_SCAN_MAXENTRIES); 377 if (found <= 0) 378 break; 379 for (i = 0; i < found; i++) { 380 req = pgvec[i]; 381 if (req->wb_index > idx_end) 382 goto out; 383 idx_start = req->wb_index + 1; 384 if (req->wb_list_head != head) 385 continue; 386 if (nfs_set_page_writeback_locked(req)) { 387 nfs_list_remove_request(req); 388 nfs_list_add_request(req, dst); 389 res++; 390 } 391 } 392 393 } 394 out: 395 return res; 396 } 397 398 int __init nfs_init_nfspagecache(void) 399 { 400 nfs_page_cachep = kmem_cache_create("nfs_page", 401 sizeof(struct nfs_page), 402 0, SLAB_HWCACHE_ALIGN, 403 NULL, NULL); 404 if (nfs_page_cachep == NULL) 405 return -ENOMEM; 406 407 return 0; 408 } 409 410 void nfs_destroy_nfspagecache(void) 411 { 412 kmem_cache_destroy(nfs_page_cachep); 413 } 414 415