1 /* AFS filesystem file handling 2 * 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/writeback.h> 18 #include <linux/gfp.h> 19 #include <linux/task_io_accounting_ops.h> 20 #include "internal.h" 21 22 static int afs_readpage(struct file *file, struct page *page); 23 static void afs_invalidatepage(struct page *page, unsigned int offset, 24 unsigned int length); 25 static int afs_releasepage(struct page *page, gfp_t gfp_flags); 26 static int afs_launder_page(struct page *page); 27 28 static int afs_readpages(struct file *filp, struct address_space *mapping, 29 struct list_head *pages, unsigned nr_pages); 30 31 const struct file_operations afs_file_operations = { 32 .open = afs_open, 33 .flush = afs_flush, 34 .release = afs_release, 35 .llseek = generic_file_llseek, 36 .read_iter = generic_file_read_iter, 37 .write_iter = afs_file_write, 38 .mmap = generic_file_readonly_mmap, 39 .splice_read = generic_file_splice_read, 40 .fsync = afs_fsync, 41 .lock = afs_lock, 42 .flock = afs_flock, 43 }; 44 45 const struct inode_operations afs_file_inode_operations = { 46 .getattr = afs_getattr, 47 .setattr = afs_setattr, 48 .permission = afs_permission, 49 .listxattr = afs_listxattr, 50 }; 51 52 const struct address_space_operations afs_fs_aops = { 53 .readpage = afs_readpage, 54 .readpages = afs_readpages, 55 .set_page_dirty = afs_set_page_dirty, 56 .launder_page = afs_launder_page, 57 .releasepage = afs_releasepage, 58 .invalidatepage = afs_invalidatepage, 59 .write_begin = afs_write_begin, 60 .write_end = afs_write_end, 61 .writepage = afs_writepage, 62 .writepages = afs_writepages, 63 }; 64 65 /* 66 * open an AFS file or directory and attach a key to it 67 */ 68 int afs_open(struct inode *inode, struct file *file) 69 { 70 struct afs_vnode *vnode = AFS_FS_I(inode); 71 struct key *key; 72 int ret; 73 74 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode); 75 76 key = afs_request_key(vnode->volume->cell); 77 if (IS_ERR(key)) { 78 _leave(" = %ld [key]", PTR_ERR(key)); 79 return PTR_ERR(key); 80 } 81 82 ret = afs_validate(vnode, key); 83 if (ret < 0) { 84 _leave(" = %d [val]", ret); 85 return ret; 86 } 87 88 file->private_data = key; 89 _leave(" = 0"); 90 return 0; 91 } 92 93 /* 94 * release an AFS file or directory and discard its key 95 */ 96 int afs_release(struct inode *inode, struct file *file) 97 { 98 struct afs_vnode *vnode = AFS_FS_I(inode); 99 100 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode); 101 102 key_put(file->private_data); 103 _leave(" = 0"); 104 return 0; 105 } 106 107 /* 108 * Dispose of a ref to a read record. 109 */ 110 void afs_put_read(struct afs_read *req) 111 { 112 int i; 113 114 if (atomic_dec_and_test(&req->usage)) { 115 for (i = 0; i < req->nr_pages; i++) 116 if (req->pages[i]) 117 put_page(req->pages[i]); 118 kfree(req); 119 } 120 } 121 122 #ifdef CONFIG_AFS_FSCACHE 123 /* 124 * deal with notification that a page was read from the cache 125 */ 126 static void afs_file_readpage_read_complete(struct page *page, 127 void *data, 128 int error) 129 { 130 _enter("%p,%p,%d", page, data, error); 131 132 /* if the read completes with an error, we just unlock the page and let 133 * the VM reissue the readpage */ 134 if (!error) 135 SetPageUptodate(page); 136 unlock_page(page); 137 } 138 #endif 139 140 /* 141 * read page from file, directory or symlink, given a key to use 142 */ 143 int afs_page_filler(void *data, struct page *page) 144 { 145 struct inode *inode = page->mapping->host; 146 struct afs_vnode *vnode = AFS_FS_I(inode); 147 struct afs_read *req; 148 struct key *key = data; 149 int ret; 150 151 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); 152 153 BUG_ON(!PageLocked(page)); 154 155 ret = -ESTALE; 156 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 157 goto error; 158 159 /* is it cached? */ 160 #ifdef CONFIG_AFS_FSCACHE 161 ret = fscache_read_or_alloc_page(vnode->cache, 162 page, 163 afs_file_readpage_read_complete, 164 NULL, 165 GFP_KERNEL); 166 #else 167 ret = -ENOBUFS; 168 #endif 169 switch (ret) { 170 /* read BIO submitted (page in cache) */ 171 case 0: 172 break; 173 174 /* page not yet cached */ 175 case -ENODATA: 176 _debug("cache said ENODATA"); 177 goto go_on; 178 179 /* page will not be cached */ 180 case -ENOBUFS: 181 _debug("cache said ENOBUFS"); 182 default: 183 go_on: 184 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), 185 GFP_KERNEL); 186 if (!req) 187 goto enomem; 188 189 /* We request a full page. If the page is a partial one at the 190 * end of the file, the server will return a short read and the 191 * unmarshalling code will clear the unfilled space. 192 */ 193 atomic_set(&req->usage, 1); 194 req->pos = (loff_t)page->index << PAGE_SHIFT; 195 req->len = PAGE_SIZE; 196 req->nr_pages = 1; 197 req->pages[0] = page; 198 get_page(page); 199 200 /* read the contents of the file from the server into the 201 * page */ 202 ret = afs_vnode_fetch_data(vnode, key, req); 203 afs_put_read(req); 204 if (ret < 0) { 205 if (ret == -ENOENT) { 206 _debug("got NOENT from server" 207 " - marking file deleted and stale"); 208 set_bit(AFS_VNODE_DELETED, &vnode->flags); 209 ret = -ESTALE; 210 } 211 212 #ifdef CONFIG_AFS_FSCACHE 213 fscache_uncache_page(vnode->cache, page); 214 #endif 215 BUG_ON(PageFsCache(page)); 216 217 if (ret == -EINTR || 218 ret == -ENOMEM || 219 ret == -ERESTARTSYS || 220 ret == -EAGAIN) 221 goto error; 222 goto io_error; 223 } 224 225 SetPageUptodate(page); 226 227 /* send the page to the cache */ 228 #ifdef CONFIG_AFS_FSCACHE 229 if (PageFsCache(page) && 230 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { 231 fscache_uncache_page(vnode->cache, page); 232 BUG_ON(PageFsCache(page)); 233 } 234 #endif 235 unlock_page(page); 236 } 237 238 _leave(" = 0"); 239 return 0; 240 241 io_error: 242 SetPageError(page); 243 goto error; 244 enomem: 245 ret = -ENOMEM; 246 error: 247 unlock_page(page); 248 _leave(" = %d", ret); 249 return ret; 250 } 251 252 /* 253 * read page from file, directory or symlink, given a file to nominate the key 254 * to be used 255 */ 256 static int afs_readpage(struct file *file, struct page *page) 257 { 258 struct key *key; 259 int ret; 260 261 if (file) { 262 key = file->private_data; 263 ASSERT(key != NULL); 264 ret = afs_page_filler(key, page); 265 } else { 266 struct inode *inode = page->mapping->host; 267 key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell); 268 if (IS_ERR(key)) { 269 ret = PTR_ERR(key); 270 } else { 271 ret = afs_page_filler(key, page); 272 key_put(key); 273 } 274 } 275 return ret; 276 } 277 278 /* 279 * Make pages available as they're filled. 280 */ 281 static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req) 282 { 283 #ifdef CONFIG_AFS_FSCACHE 284 struct afs_vnode *vnode = call->reply; 285 #endif 286 struct page *page = req->pages[req->index]; 287 288 req->pages[req->index] = NULL; 289 SetPageUptodate(page); 290 291 /* send the page to the cache */ 292 #ifdef CONFIG_AFS_FSCACHE 293 if (PageFsCache(page) && 294 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { 295 fscache_uncache_page(vnode->cache, page); 296 BUG_ON(PageFsCache(page)); 297 } 298 #endif 299 unlock_page(page); 300 put_page(page); 301 } 302 303 /* 304 * Read a contiguous set of pages. 305 */ 306 static int afs_readpages_one(struct file *file, struct address_space *mapping, 307 struct list_head *pages) 308 { 309 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 310 struct afs_read *req; 311 struct list_head *p; 312 struct page *first, *page; 313 struct key *key = file->private_data; 314 pgoff_t index; 315 int ret, n, i; 316 317 /* Count the number of contiguous pages at the front of the list. Note 318 * that the list goes prev-wards rather than next-wards. 319 */ 320 first = list_entry(pages->prev, struct page, lru); 321 index = first->index + 1; 322 n = 1; 323 for (p = first->lru.prev; p != pages; p = p->prev) { 324 page = list_entry(p, struct page, lru); 325 if (page->index != index) 326 break; 327 index++; 328 n++; 329 } 330 331 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n, 332 GFP_NOFS); 333 if (!req) 334 return -ENOMEM; 335 336 atomic_set(&req->usage, 1); 337 req->page_done = afs_readpages_page_done; 338 req->pos = first->index; 339 req->pos <<= PAGE_SHIFT; 340 341 /* Transfer the pages to the request. We add them in until one fails 342 * to add to the LRU and then we stop (as that'll make a hole in the 343 * contiguous run. 344 * 345 * Note that it's possible for the file size to change whilst we're 346 * doing this, but we rely on the server returning less than we asked 347 * for if the file shrank. We also rely on this to deal with a partial 348 * page at the end of the file. 349 */ 350 do { 351 page = list_entry(pages->prev, struct page, lru); 352 list_del(&page->lru); 353 index = page->index; 354 if (add_to_page_cache_lru(page, mapping, index, 355 readahead_gfp_mask(mapping))) { 356 #ifdef CONFIG_AFS_FSCACHE 357 fscache_uncache_page(vnode->cache, page); 358 #endif 359 put_page(page); 360 break; 361 } 362 363 req->pages[req->nr_pages++] = page; 364 req->len += PAGE_SIZE; 365 } while (req->nr_pages < n); 366 367 if (req->nr_pages == 0) { 368 kfree(req); 369 return 0; 370 } 371 372 ret = afs_vnode_fetch_data(vnode, key, req); 373 if (ret < 0) 374 goto error; 375 376 task_io_account_read(PAGE_SIZE * req->nr_pages); 377 afs_put_read(req); 378 return 0; 379 380 error: 381 if (ret == -ENOENT) { 382 _debug("got NOENT from server" 383 " - marking file deleted and stale"); 384 set_bit(AFS_VNODE_DELETED, &vnode->flags); 385 ret = -ESTALE; 386 } 387 388 for (i = 0; i < req->nr_pages; i++) { 389 page = req->pages[i]; 390 if (page) { 391 #ifdef CONFIG_AFS_FSCACHE 392 fscache_uncache_page(vnode->cache, page); 393 #endif 394 SetPageError(page); 395 unlock_page(page); 396 } 397 } 398 399 afs_put_read(req); 400 return ret; 401 } 402 403 /* 404 * read a set of pages 405 */ 406 static int afs_readpages(struct file *file, struct address_space *mapping, 407 struct list_head *pages, unsigned nr_pages) 408 { 409 struct key *key = file->private_data; 410 struct afs_vnode *vnode; 411 int ret = 0; 412 413 _enter("{%d},{%lu},,%d", 414 key_serial(key), mapping->host->i_ino, nr_pages); 415 416 ASSERT(key != NULL); 417 418 vnode = AFS_FS_I(mapping->host); 419 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 420 _leave(" = -ESTALE"); 421 return -ESTALE; 422 } 423 424 /* attempt to read as many of the pages as possible */ 425 #ifdef CONFIG_AFS_FSCACHE 426 ret = fscache_read_or_alloc_pages(vnode->cache, 427 mapping, 428 pages, 429 &nr_pages, 430 afs_file_readpage_read_complete, 431 NULL, 432 mapping_gfp_mask(mapping)); 433 #else 434 ret = -ENOBUFS; 435 #endif 436 437 switch (ret) { 438 /* all pages are being read from the cache */ 439 case 0: 440 BUG_ON(!list_empty(pages)); 441 BUG_ON(nr_pages != 0); 442 _leave(" = 0 [reading all]"); 443 return 0; 444 445 /* there were pages that couldn't be read from the cache */ 446 case -ENODATA: 447 case -ENOBUFS: 448 break; 449 450 /* other error */ 451 default: 452 _leave(" = %d", ret); 453 return ret; 454 } 455 456 while (!list_empty(pages)) { 457 ret = afs_readpages_one(file, mapping, pages); 458 if (ret < 0) 459 break; 460 } 461 462 _leave(" = %d [netting]", ret); 463 return ret; 464 } 465 466 /* 467 * write back a dirty page 468 */ 469 static int afs_launder_page(struct page *page) 470 { 471 _enter("{%lu}", page->index); 472 473 return 0; 474 } 475 476 /* 477 * invalidate part or all of a page 478 * - release a page and clean up its private data if offset is 0 (indicating 479 * the entire page) 480 */ 481 static void afs_invalidatepage(struct page *page, unsigned int offset, 482 unsigned int length) 483 { 484 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); 485 486 _enter("{%lu},%u,%u", page->index, offset, length); 487 488 BUG_ON(!PageLocked(page)); 489 490 /* we clean up only if the entire page is being invalidated */ 491 if (offset == 0 && length == PAGE_SIZE) { 492 #ifdef CONFIG_AFS_FSCACHE 493 if (PageFsCache(page)) { 494 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 495 fscache_wait_on_page_write(vnode->cache, page); 496 fscache_uncache_page(vnode->cache, page); 497 } 498 #endif 499 500 if (PagePrivate(page)) { 501 if (wb && !PageWriteback(page)) { 502 set_page_private(page, 0); 503 afs_put_writeback(wb); 504 } 505 506 if (!page_private(page)) 507 ClearPagePrivate(page); 508 } 509 } 510 511 _leave(""); 512 } 513 514 /* 515 * release a page and clean up its private state if it's not busy 516 * - return true if the page can now be released, false if not 517 */ 518 static int afs_releasepage(struct page *page, gfp_t gfp_flags) 519 { 520 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); 521 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 522 523 _enter("{{%x:%u}[%lu],%lx},%x", 524 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, 525 gfp_flags); 526 527 /* deny if page is being written to the cache and the caller hasn't 528 * elected to wait */ 529 #ifdef CONFIG_AFS_FSCACHE 530 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { 531 _leave(" = F [cache busy]"); 532 return 0; 533 } 534 #endif 535 536 if (PagePrivate(page)) { 537 if (wb) { 538 set_page_private(page, 0); 539 afs_put_writeback(wb); 540 } 541 ClearPagePrivate(page); 542 } 543 544 /* indicate that the page can be released */ 545 _leave(" = T"); 546 return 1; 547 } 548