xref: /linux/fs/afs/file.c (revision 54b0a2011dfcd2e3fe2c28062694fbbe3eb377a3)
1 /* AFS filesystem file handling
2  *
3  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/gfp.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include "internal.h"
21 
22 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
23 static int afs_readpage(struct file *file, struct page *page);
24 static void afs_invalidatepage(struct page *page, unsigned int offset,
25 			       unsigned int length);
26 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
27 
28 static int afs_readpages(struct file *filp, struct address_space *mapping,
29 			 struct list_head *pages, unsigned nr_pages);
30 
31 const struct file_operations afs_file_operations = {
32 	.open		= afs_open,
33 	.release	= afs_release,
34 	.llseek		= generic_file_llseek,
35 	.read_iter	= generic_file_read_iter,
36 	.write_iter	= afs_file_write,
37 	.mmap		= afs_file_mmap,
38 	.splice_read	= generic_file_splice_read,
39 	.fsync		= afs_fsync,
40 	.lock		= afs_lock,
41 	.flock		= afs_flock,
42 };
43 
44 const struct inode_operations afs_file_inode_operations = {
45 	.getattr	= afs_getattr,
46 	.setattr	= afs_setattr,
47 	.permission	= afs_permission,
48 	.listxattr	= afs_listxattr,
49 };
50 
51 const struct address_space_operations afs_fs_aops = {
52 	.readpage	= afs_readpage,
53 	.readpages	= afs_readpages,
54 	.set_page_dirty	= afs_set_page_dirty,
55 	.launder_page	= afs_launder_page,
56 	.releasepage	= afs_releasepage,
57 	.invalidatepage	= afs_invalidatepage,
58 	.write_begin	= afs_write_begin,
59 	.write_end	= afs_write_end,
60 	.writepage	= afs_writepage,
61 	.writepages	= afs_writepages,
62 };
63 
64 static const struct vm_operations_struct afs_vm_ops = {
65 	.fault		= filemap_fault,
66 	.map_pages	= filemap_map_pages,
67 	.page_mkwrite	= afs_page_mkwrite,
68 };
69 
70 /*
71  * Discard a pin on a writeback key.
72  */
73 void afs_put_wb_key(struct afs_wb_key *wbk)
74 {
75 	if (refcount_dec_and_test(&wbk->usage)) {
76 		key_put(wbk->key);
77 		kfree(wbk);
78 	}
79 }
80 
81 /*
82  * Cache key for writeback.
83  */
84 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
85 {
86 	struct afs_wb_key *wbk, *p;
87 
88 	wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
89 	if (!wbk)
90 		return -ENOMEM;
91 	refcount_set(&wbk->usage, 2);
92 	wbk->key = af->key;
93 
94 	spin_lock(&vnode->wb_lock);
95 	list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
96 		if (p->key == wbk->key)
97 			goto found;
98 	}
99 
100 	key_get(wbk->key);
101 	list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
102 	spin_unlock(&vnode->wb_lock);
103 	af->wb = wbk;
104 	return 0;
105 
106 found:
107 	refcount_inc(&p->usage);
108 	spin_unlock(&vnode->wb_lock);
109 	af->wb = p;
110 	kfree(wbk);
111 	return 0;
112 }
113 
114 /*
115  * open an AFS file or directory and attach a key to it
116  */
117 int afs_open(struct inode *inode, struct file *file)
118 {
119 	struct afs_vnode *vnode = AFS_FS_I(inode);
120 	struct afs_file *af;
121 	struct key *key;
122 	int ret;
123 
124 	_enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
125 
126 	key = afs_request_key(vnode->volume->cell);
127 	if (IS_ERR(key)) {
128 		ret = PTR_ERR(key);
129 		goto error;
130 	}
131 
132 	af = kzalloc(sizeof(*af), GFP_KERNEL);
133 	if (!af) {
134 		ret = -ENOMEM;
135 		goto error_key;
136 	}
137 	af->key = key;
138 
139 	ret = afs_validate(vnode, key);
140 	if (ret < 0)
141 		goto error_af;
142 
143 	if (file->f_mode & FMODE_WRITE) {
144 		ret = afs_cache_wb_key(vnode, af);
145 		if (ret < 0)
146 			goto error_af;
147 	}
148 
149 	if (file->f_flags & O_TRUNC)
150 		set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
151 
152 	file->private_data = af;
153 	_leave(" = 0");
154 	return 0;
155 
156 error_af:
157 	kfree(af);
158 error_key:
159 	key_put(key);
160 error:
161 	_leave(" = %d", ret);
162 	return ret;
163 }
164 
165 /*
166  * release an AFS file or directory and discard its key
167  */
168 int afs_release(struct inode *inode, struct file *file)
169 {
170 	struct afs_vnode *vnode = AFS_FS_I(inode);
171 	struct afs_file *af = file->private_data;
172 
173 	_enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
174 
175 	if ((file->f_mode & FMODE_WRITE))
176 		return vfs_fsync(file, 0);
177 
178 	file->private_data = NULL;
179 	if (af->wb)
180 		afs_put_wb_key(af->wb);
181 	key_put(af->key);
182 	kfree(af);
183 	afs_prune_wb_keys(vnode);
184 	_leave(" = 0");
185 	return 0;
186 }
187 
188 /*
189  * Dispose of a ref to a read record.
190  */
191 void afs_put_read(struct afs_read *req)
192 {
193 	int i;
194 
195 	if (refcount_dec_and_test(&req->usage)) {
196 		for (i = 0; i < req->nr_pages; i++)
197 			if (req->pages[i])
198 				put_page(req->pages[i]);
199 		if (req->pages != req->array)
200 			kfree(req->pages);
201 		kfree(req);
202 	}
203 }
204 
205 #ifdef CONFIG_AFS_FSCACHE
206 /*
207  * deal with notification that a page was read from the cache
208  */
209 static void afs_file_readpage_read_complete(struct page *page,
210 					    void *data,
211 					    int error)
212 {
213 	_enter("%p,%p,%d", page, data, error);
214 
215 	/* if the read completes with an error, we just unlock the page and let
216 	 * the VM reissue the readpage */
217 	if (!error)
218 		SetPageUptodate(page);
219 	unlock_page(page);
220 }
221 #endif
222 
223 /*
224  * Fetch file data from the volume.
225  */
226 int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
227 {
228 	struct afs_fs_cursor fc;
229 	int ret;
230 
231 	_enter("%s{%x:%u.%u},%x,,,",
232 	       vnode->volume->name,
233 	       vnode->fid.vid,
234 	       vnode->fid.vnode,
235 	       vnode->fid.unique,
236 	       key_serial(key));
237 
238 	ret = -ERESTARTSYS;
239 	if (afs_begin_vnode_operation(&fc, vnode, key)) {
240 		while (afs_select_fileserver(&fc)) {
241 			fc.cb_break = afs_calc_vnode_cb_break(vnode);
242 			afs_fs_fetch_data(&fc, desc);
243 		}
244 
245 		afs_check_for_remote_deletion(&fc, fc.vnode);
246 		afs_vnode_commit_status(&fc, vnode, fc.cb_break);
247 		ret = afs_end_vnode_operation(&fc);
248 	}
249 
250 	if (ret == 0) {
251 		afs_stat_v(vnode, n_fetches);
252 		atomic_long_add(desc->actual_len,
253 				&afs_v2net(vnode)->n_fetch_bytes);
254 	}
255 
256 	_leave(" = %d", ret);
257 	return ret;
258 }
259 
260 /*
261  * read page from file, directory or symlink, given a key to use
262  */
263 int afs_page_filler(void *data, struct page *page)
264 {
265 	struct inode *inode = page->mapping->host;
266 	struct afs_vnode *vnode = AFS_FS_I(inode);
267 	struct afs_read *req;
268 	struct key *key = data;
269 	int ret;
270 
271 	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
272 
273 	BUG_ON(!PageLocked(page));
274 
275 	ret = -ESTALE;
276 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
277 		goto error;
278 
279 	/* is it cached? */
280 #ifdef CONFIG_AFS_FSCACHE
281 	ret = fscache_read_or_alloc_page(vnode->cache,
282 					 page,
283 					 afs_file_readpage_read_complete,
284 					 NULL,
285 					 GFP_KERNEL);
286 #else
287 	ret = -ENOBUFS;
288 #endif
289 	switch (ret) {
290 		/* read BIO submitted (page in cache) */
291 	case 0:
292 		break;
293 
294 		/* page not yet cached */
295 	case -ENODATA:
296 		_debug("cache said ENODATA");
297 		goto go_on;
298 
299 		/* page will not be cached */
300 	case -ENOBUFS:
301 		_debug("cache said ENOBUFS");
302 	default:
303 	go_on:
304 		req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
305 			      GFP_KERNEL);
306 		if (!req)
307 			goto enomem;
308 
309 		/* We request a full page.  If the page is a partial one at the
310 		 * end of the file, the server will return a short read and the
311 		 * unmarshalling code will clear the unfilled space.
312 		 */
313 		refcount_set(&req->usage, 1);
314 		req->pos = (loff_t)page->index << PAGE_SHIFT;
315 		req->len = PAGE_SIZE;
316 		req->nr_pages = 1;
317 		req->pages = req->array;
318 		req->pages[0] = page;
319 		get_page(page);
320 
321 		/* read the contents of the file from the server into the
322 		 * page */
323 		ret = afs_fetch_data(vnode, key, req);
324 		afs_put_read(req);
325 
326 		if (ret < 0) {
327 			if (ret == -ENOENT) {
328 				_debug("got NOENT from server"
329 				       " - marking file deleted and stale");
330 				set_bit(AFS_VNODE_DELETED, &vnode->flags);
331 				ret = -ESTALE;
332 			}
333 
334 #ifdef CONFIG_AFS_FSCACHE
335 			fscache_uncache_page(vnode->cache, page);
336 #endif
337 			BUG_ON(PageFsCache(page));
338 
339 			if (ret == -EINTR ||
340 			    ret == -ENOMEM ||
341 			    ret == -ERESTARTSYS ||
342 			    ret == -EAGAIN)
343 				goto error;
344 			goto io_error;
345 		}
346 
347 		SetPageUptodate(page);
348 
349 		/* send the page to the cache */
350 #ifdef CONFIG_AFS_FSCACHE
351 		if (PageFsCache(page) &&
352 		    fscache_write_page(vnode->cache, page, vnode->status.size,
353 				       GFP_KERNEL) != 0) {
354 			fscache_uncache_page(vnode->cache, page);
355 			BUG_ON(PageFsCache(page));
356 		}
357 #endif
358 		unlock_page(page);
359 	}
360 
361 	_leave(" = 0");
362 	return 0;
363 
364 io_error:
365 	SetPageError(page);
366 	goto error;
367 enomem:
368 	ret = -ENOMEM;
369 error:
370 	unlock_page(page);
371 	_leave(" = %d", ret);
372 	return ret;
373 }
374 
375 /*
376  * read page from file, directory or symlink, given a file to nominate the key
377  * to be used
378  */
379 static int afs_readpage(struct file *file, struct page *page)
380 {
381 	struct key *key;
382 	int ret;
383 
384 	if (file) {
385 		key = afs_file_key(file);
386 		ASSERT(key != NULL);
387 		ret = afs_page_filler(key, page);
388 	} else {
389 		struct inode *inode = page->mapping->host;
390 		key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
391 		if (IS_ERR(key)) {
392 			ret = PTR_ERR(key);
393 		} else {
394 			ret = afs_page_filler(key, page);
395 			key_put(key);
396 		}
397 	}
398 	return ret;
399 }
400 
401 /*
402  * Make pages available as they're filled.
403  */
404 static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
405 {
406 #ifdef CONFIG_AFS_FSCACHE
407 	struct afs_vnode *vnode = call->reply[0];
408 #endif
409 	struct page *page = req->pages[req->index];
410 
411 	req->pages[req->index] = NULL;
412 	SetPageUptodate(page);
413 
414 	/* send the page to the cache */
415 #ifdef CONFIG_AFS_FSCACHE
416 	if (PageFsCache(page) &&
417 	    fscache_write_page(vnode->cache, page, vnode->status.size,
418 			       GFP_KERNEL) != 0) {
419 		fscache_uncache_page(vnode->cache, page);
420 		BUG_ON(PageFsCache(page));
421 	}
422 #endif
423 	unlock_page(page);
424 	put_page(page);
425 }
426 
427 /*
428  * Read a contiguous set of pages.
429  */
430 static int afs_readpages_one(struct file *file, struct address_space *mapping,
431 			     struct list_head *pages)
432 {
433 	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
434 	struct afs_read *req;
435 	struct list_head *p;
436 	struct page *first, *page;
437 	struct key *key = afs_file_key(file);
438 	pgoff_t index;
439 	int ret, n, i;
440 
441 	/* Count the number of contiguous pages at the front of the list.  Note
442 	 * that the list goes prev-wards rather than next-wards.
443 	 */
444 	first = list_entry(pages->prev, struct page, lru);
445 	index = first->index + 1;
446 	n = 1;
447 	for (p = first->lru.prev; p != pages; p = p->prev) {
448 		page = list_entry(p, struct page, lru);
449 		if (page->index != index)
450 			break;
451 		index++;
452 		n++;
453 	}
454 
455 	req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
456 		      GFP_NOFS);
457 	if (!req)
458 		return -ENOMEM;
459 
460 	refcount_set(&req->usage, 1);
461 	req->page_done = afs_readpages_page_done;
462 	req->pos = first->index;
463 	req->pos <<= PAGE_SHIFT;
464 	req->pages = req->array;
465 
466 	/* Transfer the pages to the request.  We add them in until one fails
467 	 * to add to the LRU and then we stop (as that'll make a hole in the
468 	 * contiguous run.
469 	 *
470 	 * Note that it's possible for the file size to change whilst we're
471 	 * doing this, but we rely on the server returning less than we asked
472 	 * for if the file shrank.  We also rely on this to deal with a partial
473 	 * page at the end of the file.
474 	 */
475 	do {
476 		page = list_entry(pages->prev, struct page, lru);
477 		list_del(&page->lru);
478 		index = page->index;
479 		if (add_to_page_cache_lru(page, mapping, index,
480 					  readahead_gfp_mask(mapping))) {
481 #ifdef CONFIG_AFS_FSCACHE
482 			fscache_uncache_page(vnode->cache, page);
483 #endif
484 			put_page(page);
485 			break;
486 		}
487 
488 		req->pages[req->nr_pages++] = page;
489 		req->len += PAGE_SIZE;
490 	} while (req->nr_pages < n);
491 
492 	if (req->nr_pages == 0) {
493 		kfree(req);
494 		return 0;
495 	}
496 
497 	ret = afs_fetch_data(vnode, key, req);
498 	if (ret < 0)
499 		goto error;
500 
501 	task_io_account_read(PAGE_SIZE * req->nr_pages);
502 	afs_put_read(req);
503 	return 0;
504 
505 error:
506 	if (ret == -ENOENT) {
507 		_debug("got NOENT from server"
508 		       " - marking file deleted and stale");
509 		set_bit(AFS_VNODE_DELETED, &vnode->flags);
510 		ret = -ESTALE;
511 	}
512 
513 	for (i = 0; i < req->nr_pages; i++) {
514 		page = req->pages[i];
515 		if (page) {
516 #ifdef CONFIG_AFS_FSCACHE
517 			fscache_uncache_page(vnode->cache, page);
518 #endif
519 			SetPageError(page);
520 			unlock_page(page);
521 		}
522 	}
523 
524 	afs_put_read(req);
525 	return ret;
526 }
527 
528 /*
529  * read a set of pages
530  */
531 static int afs_readpages(struct file *file, struct address_space *mapping,
532 			 struct list_head *pages, unsigned nr_pages)
533 {
534 	struct key *key = afs_file_key(file);
535 	struct afs_vnode *vnode;
536 	int ret = 0;
537 
538 	_enter("{%d},{%lu},,%d",
539 	       key_serial(key), mapping->host->i_ino, nr_pages);
540 
541 	ASSERT(key != NULL);
542 
543 	vnode = AFS_FS_I(mapping->host);
544 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
545 		_leave(" = -ESTALE");
546 		return -ESTALE;
547 	}
548 
549 	/* attempt to read as many of the pages as possible */
550 #ifdef CONFIG_AFS_FSCACHE
551 	ret = fscache_read_or_alloc_pages(vnode->cache,
552 					  mapping,
553 					  pages,
554 					  &nr_pages,
555 					  afs_file_readpage_read_complete,
556 					  NULL,
557 					  mapping_gfp_mask(mapping));
558 #else
559 	ret = -ENOBUFS;
560 #endif
561 
562 	switch (ret) {
563 		/* all pages are being read from the cache */
564 	case 0:
565 		BUG_ON(!list_empty(pages));
566 		BUG_ON(nr_pages != 0);
567 		_leave(" = 0 [reading all]");
568 		return 0;
569 
570 		/* there were pages that couldn't be read from the cache */
571 	case -ENODATA:
572 	case -ENOBUFS:
573 		break;
574 
575 		/* other error */
576 	default:
577 		_leave(" = %d", ret);
578 		return ret;
579 	}
580 
581 	while (!list_empty(pages)) {
582 		ret = afs_readpages_one(file, mapping, pages);
583 		if (ret < 0)
584 			break;
585 	}
586 
587 	_leave(" = %d [netting]", ret);
588 	return ret;
589 }
590 
591 /*
592  * invalidate part or all of a page
593  * - release a page and clean up its private data if offset is 0 (indicating
594  *   the entire page)
595  */
596 static void afs_invalidatepage(struct page *page, unsigned int offset,
597 			       unsigned int length)
598 {
599 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
600 	unsigned long priv;
601 
602 	_enter("{%lu},%u,%u", page->index, offset, length);
603 
604 	BUG_ON(!PageLocked(page));
605 
606 	/* we clean up only if the entire page is being invalidated */
607 	if (offset == 0 && length == PAGE_SIZE) {
608 #ifdef CONFIG_AFS_FSCACHE
609 		if (PageFsCache(page)) {
610 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
611 			fscache_wait_on_page_write(vnode->cache, page);
612 			fscache_uncache_page(vnode->cache, page);
613 		}
614 #endif
615 
616 		if (PagePrivate(page)) {
617 			priv = page_private(page);
618 			trace_afs_page_dirty(vnode, tracepoint_string("inval"),
619 					     page->index, priv);
620 			set_page_private(page, 0);
621 			ClearPagePrivate(page);
622 		}
623 	}
624 
625 	_leave("");
626 }
627 
628 /*
629  * release a page and clean up its private state if it's not busy
630  * - return true if the page can now be released, false if not
631  */
632 static int afs_releasepage(struct page *page, gfp_t gfp_flags)
633 {
634 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
635 	unsigned long priv;
636 
637 	_enter("{{%x:%u}[%lu],%lx},%x",
638 	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
639 	       gfp_flags);
640 
641 	/* deny if page is being written to the cache and the caller hasn't
642 	 * elected to wait */
643 #ifdef CONFIG_AFS_FSCACHE
644 	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
645 		_leave(" = F [cache busy]");
646 		return 0;
647 	}
648 #endif
649 
650 	if (PagePrivate(page)) {
651 		priv = page_private(page);
652 		trace_afs_page_dirty(vnode, tracepoint_string("rel"),
653 				     page->index, priv);
654 		set_page_private(page, 0);
655 		ClearPagePrivate(page);
656 	}
657 
658 	/* indicate that the page can be released */
659 	_leave(" = T");
660 	return 1;
661 }
662 
663 /*
664  * Handle setting up a memory mapping on an AFS file.
665  */
666 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
667 {
668 	int ret;
669 
670 	ret = generic_file_mmap(file, vma);
671 	if (ret == 0)
672 		vma->vm_ops = &afs_vm_ops;
673 	return ret;
674 }
675