1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 #include <linux/ceph/osd_client.h> 16 17 /* 18 * Ceph address space ops. 19 * 20 * There are a few funny things going on here. 21 * 22 * The page->private field is used to reference a struct 23 * ceph_snap_context for _every_ dirty page. This indicates which 24 * snapshot the page was logically dirtied in, and thus which snap 25 * context needs to be associated with the osd write during writeback. 26 * 27 * Similarly, struct ceph_inode_info maintains a set of counters to 28 * count dirty pages on the inode. In the absence of snapshots, 29 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 30 * 31 * When a snapshot is taken (that is, when the client receives 32 * notification that a snapshot was taken), each inode with caps and 33 * with dirty pages (dirty pages implies there is a cap) gets a new 34 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 35 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 36 * moved to capsnap->dirty. (Unless a sync write is currently in 37 * progress. In that case, the capsnap is said to be "pending", new 38 * writes cannot start, and the capsnap isn't "finalized" until the 39 * write completes (or fails) and a final size/mtime for the inode for 40 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 41 * 42 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 43 * we look for the first capsnap in i_cap_snaps and write out pages in 44 * that snap context _only_. Then we move on to the next capsnap, 45 * eventually reaching the "live" or "head" context (i.e., pages that 46 * are not yet snapped) and are writing the most recently dirtied 47 * pages. 48 * 49 * Invalidate and so forth must take care to ensure the dirty page 50 * accounting is preserved. 51 */ 52 53 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 54 #define CONGESTION_OFF_THRESH(congestion_kb) \ 55 (CONGESTION_ON_THRESH(congestion_kb) - \ 56 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 57 58 static inline struct ceph_snap_context *page_snap_context(struct page *page) 59 { 60 if (PagePrivate(page)) 61 return (void *)page->private; 62 return NULL; 63 } 64 65 /* 66 * Dirty a page. Optimistically adjust accounting, on the assumption 67 * that we won't race with invalidate. If we do, readjust. 68 */ 69 static int ceph_set_page_dirty(struct page *page) 70 { 71 struct address_space *mapping = page->mapping; 72 struct inode *inode; 73 struct ceph_inode_info *ci; 74 struct ceph_snap_context *snapc; 75 int ret; 76 77 if (unlikely(!mapping)) 78 return !TestSetPageDirty(page); 79 80 if (PageDirty(page)) { 81 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 82 mapping->host, page, page->index); 83 BUG_ON(!PagePrivate(page)); 84 return 0; 85 } 86 87 inode = mapping->host; 88 ci = ceph_inode(inode); 89 90 /* dirty the head */ 91 spin_lock(&ci->i_ceph_lock); 92 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 93 if (__ceph_have_pending_cap_snap(ci)) { 94 struct ceph_cap_snap *capsnap = 95 list_last_entry(&ci->i_cap_snaps, 96 struct ceph_cap_snap, 97 ci_item); 98 snapc = ceph_get_snap_context(capsnap->context); 99 capsnap->dirty_pages++; 100 } else { 101 BUG_ON(!ci->i_head_snapc); 102 snapc = ceph_get_snap_context(ci->i_head_snapc); 103 ++ci->i_wrbuffer_ref_head; 104 } 105 if (ci->i_wrbuffer_ref == 0) 106 ihold(inode); 107 ++ci->i_wrbuffer_ref; 108 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 109 "snapc %p seq %lld (%d snaps)\n", 110 mapping->host, page, page->index, 111 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 112 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 113 snapc, snapc->seq, snapc->num_snaps); 114 spin_unlock(&ci->i_ceph_lock); 115 116 /* 117 * Reference snap context in page->private. Also set 118 * PagePrivate so that we get invalidatepage callback. 119 */ 120 BUG_ON(PagePrivate(page)); 121 page->private = (unsigned long)snapc; 122 SetPagePrivate(page); 123 124 ret = __set_page_dirty_nobuffers(page); 125 WARN_ON(!PageLocked(page)); 126 WARN_ON(!page->mapping); 127 128 return ret; 129 } 130 131 /* 132 * If we are truncating the full page (i.e. offset == 0), adjust the 133 * dirty page counters appropriately. Only called if there is private 134 * data on the page. 135 */ 136 static void ceph_invalidatepage(struct page *page, unsigned int offset, 137 unsigned int length) 138 { 139 struct inode *inode; 140 struct ceph_inode_info *ci; 141 struct ceph_snap_context *snapc = page_snap_context(page); 142 143 inode = page->mapping->host; 144 ci = ceph_inode(inode); 145 146 if (offset != 0 || length != PAGE_SIZE) { 147 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 148 inode, page, page->index, offset, length); 149 return; 150 } 151 152 ceph_invalidate_fscache_page(inode, page); 153 154 if (!PagePrivate(page)) 155 return; 156 157 /* 158 * We can get non-dirty pages here due to races between 159 * set_page_dirty and truncate_complete_page; just spit out a 160 * warning, in case we end up with accounting problems later. 161 */ 162 if (!PageDirty(page)) 163 pr_err("%p invalidatepage %p page not dirty\n", inode, page); 164 165 ClearPageChecked(page); 166 167 dout("%p invalidatepage %p idx %lu full dirty page\n", 168 inode, page, page->index); 169 170 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 171 ceph_put_snap_context(snapc); 172 page->private = 0; 173 ClearPagePrivate(page); 174 } 175 176 static int ceph_releasepage(struct page *page, gfp_t g) 177 { 178 dout("%p releasepage %p idx %lu\n", page->mapping->host, 179 page, page->index); 180 WARN_ON(PageDirty(page)); 181 182 /* Can we release the page from the cache? */ 183 if (!ceph_release_fscache_page(page, g)) 184 return 0; 185 186 return !PagePrivate(page); 187 } 188 189 /* 190 * read a single page, without unlocking it. 191 */ 192 static int readpage_nounlock(struct file *filp, struct page *page) 193 { 194 struct inode *inode = file_inode(filp); 195 struct ceph_inode_info *ci = ceph_inode(inode); 196 struct ceph_osd_client *osdc = 197 &ceph_inode_to_client(inode)->client->osdc; 198 int err = 0; 199 u64 off = page_offset(page); 200 u64 len = PAGE_SIZE; 201 202 if (off >= i_size_read(inode)) { 203 zero_user_segment(page, 0, PAGE_SIZE); 204 SetPageUptodate(page); 205 return 0; 206 } 207 208 if (ci->i_inline_version != CEPH_INLINE_NONE) { 209 /* 210 * Uptodate inline data should have been added 211 * into page cache while getting Fcr caps. 212 */ 213 if (off == 0) 214 return -EINVAL; 215 zero_user_segment(page, 0, PAGE_SIZE); 216 SetPageUptodate(page); 217 return 0; 218 } 219 220 err = ceph_readpage_from_fscache(inode, page); 221 if (err == 0) 222 goto out; 223 224 dout("readpage inode %p file %p page %p index %lu\n", 225 inode, filp, page, page->index); 226 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 227 off, &len, 228 ci->i_truncate_seq, ci->i_truncate_size, 229 &page, 1, 0); 230 if (err == -ENOENT) 231 err = 0; 232 if (err < 0) { 233 SetPageError(page); 234 ceph_fscache_readpage_cancel(inode, page); 235 goto out; 236 } 237 if (err < PAGE_SIZE) 238 /* zero fill remainder of page */ 239 zero_user_segment(page, err, PAGE_SIZE); 240 else 241 flush_dcache_page(page); 242 243 SetPageUptodate(page); 244 ceph_readpage_to_fscache(inode, page); 245 246 out: 247 return err < 0 ? err : 0; 248 } 249 250 static int ceph_readpage(struct file *filp, struct page *page) 251 { 252 int r = readpage_nounlock(filp, page); 253 unlock_page(page); 254 return r; 255 } 256 257 /* 258 * Finish an async read(ahead) op. 259 */ 260 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 261 { 262 struct inode *inode = req->r_inode; 263 struct ceph_osd_data *osd_data; 264 int rc = req->r_result; 265 int bytes = le32_to_cpu(msg->hdr.data_len); 266 int num_pages; 267 int i; 268 269 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 270 271 /* unlock all pages, zeroing any data we didn't read */ 272 osd_data = osd_req_op_extent_osd_data(req, 0); 273 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 274 num_pages = calc_pages_for((u64)osd_data->alignment, 275 (u64)osd_data->length); 276 for (i = 0; i < num_pages; i++) { 277 struct page *page = osd_data->pages[i]; 278 279 if (rc < 0 && rc != -ENOENT) 280 goto unlock; 281 if (bytes < (int)PAGE_SIZE) { 282 /* zero (remainder of) page */ 283 int s = bytes < 0 ? 0 : bytes; 284 zero_user_segment(page, s, PAGE_SIZE); 285 } 286 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 287 page->index); 288 flush_dcache_page(page); 289 SetPageUptodate(page); 290 ceph_readpage_to_fscache(inode, page); 291 unlock: 292 unlock_page(page); 293 put_page(page); 294 bytes -= PAGE_SIZE; 295 } 296 kfree(osd_data->pages); 297 } 298 299 static void ceph_unlock_page_vector(struct page **pages, int num_pages) 300 { 301 int i; 302 303 for (i = 0; i < num_pages; i++) 304 unlock_page(pages[i]); 305 } 306 307 /* 308 * start an async read(ahead) operation. return nr_pages we submitted 309 * a read for on success, or negative error code. 310 */ 311 static int start_read(struct inode *inode, struct list_head *page_list, int max) 312 { 313 struct ceph_osd_client *osdc = 314 &ceph_inode_to_client(inode)->client->osdc; 315 struct ceph_inode_info *ci = ceph_inode(inode); 316 struct page *page = list_entry(page_list->prev, struct page, lru); 317 struct ceph_vino vino; 318 struct ceph_osd_request *req; 319 u64 off; 320 u64 len; 321 int i; 322 struct page **pages; 323 pgoff_t next_index; 324 int nr_pages = 0; 325 int ret; 326 327 off = (u64) page_offset(page); 328 329 /* count pages */ 330 next_index = page->index; 331 list_for_each_entry_reverse(page, page_list, lru) { 332 if (page->index != next_index) 333 break; 334 nr_pages++; 335 next_index++; 336 if (max && nr_pages == max) 337 break; 338 } 339 len = nr_pages << PAGE_SHIFT; 340 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 341 off, len); 342 vino = ceph_vino(inode); 343 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 344 0, 1, CEPH_OSD_OP_READ, 345 CEPH_OSD_FLAG_READ, NULL, 346 ci->i_truncate_seq, ci->i_truncate_size, 347 false); 348 if (IS_ERR(req)) 349 return PTR_ERR(req); 350 351 /* build page vector */ 352 nr_pages = calc_pages_for(0, len); 353 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); 354 ret = -ENOMEM; 355 if (!pages) 356 goto out; 357 for (i = 0; i < nr_pages; ++i) { 358 page = list_entry(page_list->prev, struct page, lru); 359 BUG_ON(PageLocked(page)); 360 list_del(&page->lru); 361 362 dout("start_read %p adding %p idx %lu\n", inode, page, 363 page->index); 364 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 365 GFP_KERNEL)) { 366 ceph_fscache_uncache_page(inode, page); 367 put_page(page); 368 dout("start_read %p add_to_page_cache failed %p\n", 369 inode, page); 370 nr_pages = i; 371 goto out_pages; 372 } 373 pages[i] = page; 374 } 375 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 376 req->r_callback = finish_read; 377 req->r_inode = inode; 378 379 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 380 381 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 382 ret = ceph_osdc_start_request(osdc, req, false); 383 if (ret < 0) 384 goto out_pages; 385 ceph_osdc_put_request(req); 386 return nr_pages; 387 388 out_pages: 389 ceph_unlock_page_vector(pages, nr_pages); 390 ceph_release_page_vector(pages, nr_pages); 391 out: 392 ceph_osdc_put_request(req); 393 return ret; 394 } 395 396 397 /* 398 * Read multiple pages. Leave pages we don't read + unlock in page_list; 399 * the caller (VM) cleans them up. 400 */ 401 static int ceph_readpages(struct file *file, struct address_space *mapping, 402 struct list_head *page_list, unsigned nr_pages) 403 { 404 struct inode *inode = file_inode(file); 405 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 406 int rc = 0; 407 int max = 0; 408 409 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 410 return -EINVAL; 411 412 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, 413 &nr_pages); 414 415 if (rc == 0) 416 goto out; 417 418 if (fsc->mount_options->rsize >= PAGE_SIZE) 419 max = (fsc->mount_options->rsize + PAGE_SIZE - 1) 420 >> PAGE_SHIFT; 421 422 dout("readpages %p file %p nr_pages %d max %d\n", inode, 423 file, nr_pages, 424 max); 425 while (!list_empty(page_list)) { 426 rc = start_read(inode, page_list, max); 427 if (rc < 0) 428 goto out; 429 BUG_ON(rc == 0); 430 } 431 out: 432 ceph_fscache_readpages_cancel(inode, page_list); 433 434 dout("readpages %p file %p ret %d\n", inode, file, rc); 435 return rc; 436 } 437 438 /* 439 * Get ref for the oldest snapc for an inode with dirty data... that is, the 440 * only snap context we are allowed to write back. 441 */ 442 static struct ceph_snap_context *get_oldest_context(struct inode *inode, 443 loff_t *snap_size) 444 { 445 struct ceph_inode_info *ci = ceph_inode(inode); 446 struct ceph_snap_context *snapc = NULL; 447 struct ceph_cap_snap *capsnap = NULL; 448 449 spin_lock(&ci->i_ceph_lock); 450 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 451 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 452 capsnap->context, capsnap->dirty_pages); 453 if (capsnap->dirty_pages) { 454 snapc = ceph_get_snap_context(capsnap->context); 455 if (snap_size) 456 *snap_size = capsnap->size; 457 break; 458 } 459 } 460 if (!snapc && ci->i_wrbuffer_ref_head) { 461 snapc = ceph_get_snap_context(ci->i_head_snapc); 462 dout(" head snapc %p has %d dirty pages\n", 463 snapc, ci->i_wrbuffer_ref_head); 464 } 465 spin_unlock(&ci->i_ceph_lock); 466 return snapc; 467 } 468 469 /* 470 * Write a single page, but leave the page locked. 471 * 472 * If we get a write error, set the page error bit, but still adjust the 473 * dirty page accounting (i.e., page is no longer dirty). 474 */ 475 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 476 { 477 struct inode *inode; 478 struct ceph_inode_info *ci; 479 struct ceph_fs_client *fsc; 480 struct ceph_osd_client *osdc; 481 struct ceph_snap_context *snapc, *oldest; 482 loff_t page_off = page_offset(page); 483 loff_t snap_size = -1; 484 long writeback_stat; 485 u64 truncate_size; 486 u32 truncate_seq; 487 int err = 0, len = PAGE_SIZE; 488 489 dout("writepage %p idx %lu\n", page, page->index); 490 491 if (!page->mapping || !page->mapping->host) { 492 dout("writepage %p - no mapping\n", page); 493 return -EFAULT; 494 } 495 inode = page->mapping->host; 496 ci = ceph_inode(inode); 497 fsc = ceph_inode_to_client(inode); 498 osdc = &fsc->client->osdc; 499 500 /* verify this is a writeable snap context */ 501 snapc = page_snap_context(page); 502 if (snapc == NULL) { 503 dout("writepage %p page %p not dirty?\n", inode, page); 504 goto out; 505 } 506 oldest = get_oldest_context(inode, &snap_size); 507 if (snapc->seq > oldest->seq) { 508 dout("writepage %p page %p snapc %p not writeable - noop\n", 509 inode, page, snapc); 510 /* we should only noop if called by kswapd */ 511 WARN_ON((current->flags & PF_MEMALLOC) == 0); 512 ceph_put_snap_context(oldest); 513 goto out; 514 } 515 ceph_put_snap_context(oldest); 516 517 spin_lock(&ci->i_ceph_lock); 518 truncate_seq = ci->i_truncate_seq; 519 truncate_size = ci->i_truncate_size; 520 if (snap_size == -1) 521 snap_size = i_size_read(inode); 522 spin_unlock(&ci->i_ceph_lock); 523 524 /* is this a partial page at end of file? */ 525 if (page_off >= snap_size) { 526 dout("%p page eof %llu\n", page, snap_size); 527 goto out; 528 } 529 if (snap_size < page_off + len) 530 len = snap_size - page_off; 531 532 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 533 inode, page, page->index, page_off, len, snapc); 534 535 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 536 if (writeback_stat > 537 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 538 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 539 540 ceph_readpage_to_fscache(inode, page); 541 542 set_page_writeback(page); 543 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 544 &ci->i_layout, snapc, 545 page_off, len, 546 truncate_seq, truncate_size, 547 &inode->i_mtime, &page, 1); 548 if (err < 0) { 549 dout("writepage setting page/mapping error %d %p\n", err, page); 550 SetPageError(page); 551 mapping_set_error(&inode->i_data, err); 552 if (wbc) 553 wbc->pages_skipped++; 554 } else { 555 dout("writepage cleaned page %p\n", page); 556 err = 0; /* vfs expects us to return 0 */ 557 } 558 page->private = 0; 559 ClearPagePrivate(page); 560 end_page_writeback(page); 561 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 562 ceph_put_snap_context(snapc); /* page's reference */ 563 out: 564 return err; 565 } 566 567 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 568 { 569 int err; 570 struct inode *inode = page->mapping->host; 571 BUG_ON(!inode); 572 ihold(inode); 573 err = writepage_nounlock(page, wbc); 574 unlock_page(page); 575 iput(inode); 576 return err; 577 } 578 579 580 /* 581 * lame release_pages helper. release_pages() isn't exported to 582 * modules. 583 */ 584 static void ceph_release_pages(struct page **pages, int num) 585 { 586 struct pagevec pvec; 587 int i; 588 589 pagevec_init(&pvec, 0); 590 for (i = 0; i < num; i++) { 591 if (pagevec_add(&pvec, pages[i]) == 0) 592 pagevec_release(&pvec); 593 } 594 pagevec_release(&pvec); 595 } 596 597 /* 598 * async writeback completion handler. 599 * 600 * If we get an error, set the mapping error bit, but not the individual 601 * page error bits. 602 */ 603 static void writepages_finish(struct ceph_osd_request *req, 604 struct ceph_msg *msg) 605 { 606 struct inode *inode = req->r_inode; 607 struct ceph_inode_info *ci = ceph_inode(inode); 608 struct ceph_osd_data *osd_data; 609 struct page *page; 610 int num_pages, total_pages = 0; 611 int i, j; 612 int rc = req->r_result; 613 struct ceph_snap_context *snapc = req->r_snapc; 614 struct address_space *mapping = inode->i_mapping; 615 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 616 bool remove_page; 617 618 619 dout("writepages_finish %p rc %d\n", inode, rc); 620 if (rc < 0) 621 mapping_set_error(mapping, rc); 622 623 /* 624 * We lost the cache cap, need to truncate the page before 625 * it is unlocked, otherwise we'd truncate it later in the 626 * page truncation thread, possibly losing some data that 627 * raced its way in 628 */ 629 remove_page = !(ceph_caps_issued(ci) & 630 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 631 632 /* clean all pages */ 633 for (i = 0; i < req->r_num_ops; i++) { 634 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 635 break; 636 637 osd_data = osd_req_op_extent_osd_data(req, i); 638 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 639 num_pages = calc_pages_for((u64)osd_data->alignment, 640 (u64)osd_data->length); 641 total_pages += num_pages; 642 for (j = 0; j < num_pages; j++) { 643 page = osd_data->pages[j]; 644 BUG_ON(!page); 645 WARN_ON(!PageUptodate(page)); 646 647 if (atomic_long_dec_return(&fsc->writeback_count) < 648 CONGESTION_OFF_THRESH( 649 fsc->mount_options->congestion_kb)) 650 clear_bdi_congested(&fsc->backing_dev_info, 651 BLK_RW_ASYNC); 652 653 ceph_put_snap_context(page_snap_context(page)); 654 page->private = 0; 655 ClearPagePrivate(page); 656 dout("unlocking %p\n", page); 657 end_page_writeback(page); 658 659 if (remove_page) 660 generic_error_remove_page(inode->i_mapping, 661 page); 662 663 unlock_page(page); 664 } 665 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 666 inode, osd_data->length, rc >= 0 ? num_pages : 0); 667 668 ceph_release_pages(osd_data->pages, num_pages); 669 } 670 671 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 672 673 osd_data = osd_req_op_extent_osd_data(req, 0); 674 if (osd_data->pages_from_pool) 675 mempool_free(osd_data->pages, 676 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 677 else 678 kfree(osd_data->pages); 679 ceph_osdc_put_request(req); 680 } 681 682 /* 683 * initiate async writeback 684 */ 685 static int ceph_writepages_start(struct address_space *mapping, 686 struct writeback_control *wbc) 687 { 688 struct inode *inode = mapping->host; 689 struct ceph_inode_info *ci = ceph_inode(inode); 690 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 691 struct ceph_vino vino = ceph_vino(inode); 692 pgoff_t index, start, end; 693 int range_whole = 0; 694 int should_loop = 1; 695 pgoff_t max_pages = 0, max_pages_ever = 0; 696 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 697 struct pagevec pvec; 698 int done = 0; 699 int rc = 0; 700 unsigned wsize = 1 << inode->i_blkbits; 701 struct ceph_osd_request *req = NULL; 702 int do_sync = 0; 703 loff_t snap_size, i_size; 704 u64 truncate_size; 705 u32 truncate_seq; 706 707 /* 708 * Include a 'sync' in the OSD request if this is a data 709 * integrity write (e.g., O_SYNC write or fsync()), or if our 710 * cap is being revoked. 711 */ 712 if ((wbc->sync_mode == WB_SYNC_ALL) || 713 ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) 714 do_sync = 1; 715 dout("writepages_start %p dosync=%d (mode=%s)\n", 716 inode, do_sync, 717 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 718 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 719 720 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 721 pr_warn("writepage_start %p on forced umount\n", inode); 722 truncate_pagecache(inode, 0); 723 mapping_set_error(mapping, -EIO); 724 return -EIO; /* we're in a forced umount, don't write! */ 725 } 726 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 727 wsize = fsc->mount_options->wsize; 728 if (wsize < PAGE_SIZE) 729 wsize = PAGE_SIZE; 730 max_pages_ever = wsize >> PAGE_SHIFT; 731 732 pagevec_init(&pvec, 0); 733 734 /* where to start/end? */ 735 if (wbc->range_cyclic) { 736 start = mapping->writeback_index; /* Start from prev offset */ 737 end = -1; 738 dout(" cyclic, start at %lu\n", start); 739 } else { 740 start = wbc->range_start >> PAGE_SHIFT; 741 end = wbc->range_end >> PAGE_SHIFT; 742 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 743 range_whole = 1; 744 should_loop = 0; 745 dout(" not cyclic, %lu to %lu\n", start, end); 746 } 747 index = start; 748 749 retry: 750 /* find oldest snap context with dirty data */ 751 ceph_put_snap_context(snapc); 752 snap_size = -1; 753 snapc = get_oldest_context(inode, &snap_size); 754 if (!snapc) { 755 /* hmm, why does writepages get called when there 756 is no dirty data? */ 757 dout(" no snap context with dirty data?\n"); 758 goto out; 759 } 760 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 761 snapc, snapc->seq, snapc->num_snaps); 762 763 spin_lock(&ci->i_ceph_lock); 764 truncate_seq = ci->i_truncate_seq; 765 truncate_size = ci->i_truncate_size; 766 i_size = i_size_read(inode); 767 spin_unlock(&ci->i_ceph_lock); 768 769 if (last_snapc && snapc != last_snapc) { 770 /* if we switched to a newer snapc, restart our scan at the 771 * start of the original file range. */ 772 dout(" snapc differs from last pass, restarting at %lu\n", 773 index); 774 index = start; 775 } 776 last_snapc = snapc; 777 778 while (!done && index <= end) { 779 unsigned i; 780 int first; 781 pgoff_t strip_unit_end = 0; 782 int num_ops = 0, op_idx; 783 int pvec_pages, locked_pages = 0; 784 struct page **pages = NULL, **data_pages; 785 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 786 struct page *page; 787 int want; 788 u64 offset = 0, len = 0; 789 790 max_pages = max_pages_ever; 791 792 get_more_pages: 793 first = -1; 794 want = min(end - index, 795 min((pgoff_t)PAGEVEC_SIZE, 796 max_pages - (pgoff_t)locked_pages) - 1) 797 + 1; 798 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 799 PAGECACHE_TAG_DIRTY, 800 want); 801 dout("pagevec_lookup_tag got %d\n", pvec_pages); 802 if (!pvec_pages && !locked_pages) 803 break; 804 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 805 page = pvec.pages[i]; 806 dout("? %p idx %lu\n", page, page->index); 807 if (locked_pages == 0) 808 lock_page(page); /* first page */ 809 else if (!trylock_page(page)) 810 break; 811 812 /* only dirty pages, or our accounting breaks */ 813 if (unlikely(!PageDirty(page)) || 814 unlikely(page->mapping != mapping)) { 815 dout("!dirty or !mapping %p\n", page); 816 unlock_page(page); 817 break; 818 } 819 if (!wbc->range_cyclic && page->index > end) { 820 dout("end of range %p\n", page); 821 done = 1; 822 unlock_page(page); 823 break; 824 } 825 if (strip_unit_end && (page->index > strip_unit_end)) { 826 dout("end of strip unit %p\n", page); 827 unlock_page(page); 828 break; 829 } 830 if (wbc->sync_mode != WB_SYNC_NONE) { 831 dout("waiting on writeback %p\n", page); 832 wait_on_page_writeback(page); 833 } 834 if (page_offset(page) >= 835 (snap_size == -1 ? i_size : snap_size)) { 836 dout("%p page eof %llu\n", page, 837 (snap_size == -1 ? i_size : snap_size)); 838 done = 1; 839 unlock_page(page); 840 break; 841 } 842 if (PageWriteback(page)) { 843 dout("%p under writeback\n", page); 844 unlock_page(page); 845 break; 846 } 847 848 /* only if matching snap context */ 849 pgsnapc = page_snap_context(page); 850 if (pgsnapc->seq > snapc->seq) { 851 dout("page snapc %p %lld > oldest %p %lld\n", 852 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 853 unlock_page(page); 854 if (!locked_pages) 855 continue; /* keep looking for snap */ 856 break; 857 } 858 859 if (!clear_page_dirty_for_io(page)) { 860 dout("%p !clear_page_dirty_for_io\n", page); 861 unlock_page(page); 862 break; 863 } 864 865 /* 866 * We have something to write. If this is 867 * the first locked page this time through, 868 * calculate max possinle write size and 869 * allocate a page array 870 */ 871 if (locked_pages == 0) { 872 u64 objnum; 873 u64 objoff; 874 875 /* prepare async write request */ 876 offset = (u64)page_offset(page); 877 len = wsize; 878 879 rc = ceph_calc_file_object_mapping(&ci->i_layout, 880 offset, len, 881 &objnum, &objoff, 882 &len); 883 if (rc < 0) { 884 unlock_page(page); 885 break; 886 } 887 888 num_ops = 1 + do_sync; 889 strip_unit_end = page->index + 890 ((len - 1) >> PAGE_SHIFT); 891 892 BUG_ON(pages); 893 max_pages = calc_pages_for(0, (u64)len); 894 pages = kmalloc(max_pages * sizeof (*pages), 895 GFP_NOFS); 896 if (!pages) { 897 pool = fsc->wb_pagevec_pool; 898 pages = mempool_alloc(pool, GFP_NOFS); 899 BUG_ON(!pages); 900 } 901 902 len = 0; 903 } else if (page->index != 904 (offset + len) >> PAGE_SHIFT) { 905 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 906 CEPH_OSD_MAX_OPS)) { 907 redirty_page_for_writepage(wbc, page); 908 unlock_page(page); 909 break; 910 } 911 912 num_ops++; 913 offset = (u64)page_offset(page); 914 len = 0; 915 } 916 917 /* note position of first page in pvec */ 918 if (first < 0) 919 first = i; 920 dout("%p will write page %p idx %lu\n", 921 inode, page, page->index); 922 923 if (atomic_long_inc_return(&fsc->writeback_count) > 924 CONGESTION_ON_THRESH( 925 fsc->mount_options->congestion_kb)) { 926 set_bdi_congested(&fsc->backing_dev_info, 927 BLK_RW_ASYNC); 928 } 929 930 pages[locked_pages] = page; 931 locked_pages++; 932 len += PAGE_SIZE; 933 } 934 935 /* did we get anything? */ 936 if (!locked_pages) 937 goto release_pvec_pages; 938 if (i) { 939 int j; 940 BUG_ON(!locked_pages || first < 0); 941 942 if (pvec_pages && i == pvec_pages && 943 locked_pages < max_pages) { 944 dout("reached end pvec, trying for more\n"); 945 pagevec_reinit(&pvec); 946 goto get_more_pages; 947 } 948 949 /* shift unused pages over in the pvec... we 950 * will need to release them below. */ 951 for (j = i; j < pvec_pages; j++) { 952 dout(" pvec leftover page %p\n", pvec.pages[j]); 953 pvec.pages[j-i+first] = pvec.pages[j]; 954 } 955 pvec.nr -= i-first; 956 } 957 958 new_request: 959 offset = page_offset(pages[0]); 960 len = wsize; 961 962 req = ceph_osdc_new_request(&fsc->client->osdc, 963 &ci->i_layout, vino, 964 offset, &len, 0, num_ops, 965 CEPH_OSD_OP_WRITE, 966 CEPH_OSD_FLAG_WRITE | 967 CEPH_OSD_FLAG_ONDISK, 968 snapc, truncate_seq, 969 truncate_size, false); 970 if (IS_ERR(req)) { 971 req = ceph_osdc_new_request(&fsc->client->osdc, 972 &ci->i_layout, vino, 973 offset, &len, 0, 974 min(num_ops, 975 CEPH_OSD_SLAB_OPS), 976 CEPH_OSD_OP_WRITE, 977 CEPH_OSD_FLAG_WRITE | 978 CEPH_OSD_FLAG_ONDISK, 979 snapc, truncate_seq, 980 truncate_size, true); 981 BUG_ON(IS_ERR(req)); 982 } 983 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 984 PAGE_SIZE - offset); 985 986 req->r_callback = writepages_finish; 987 req->r_inode = inode; 988 989 /* Format the osd request message and submit the write */ 990 len = 0; 991 data_pages = pages; 992 op_idx = 0; 993 for (i = 0; i < locked_pages; i++) { 994 u64 cur_offset = page_offset(pages[i]); 995 if (offset + len != cur_offset) { 996 if (op_idx + do_sync + 1 == req->r_num_ops) 997 break; 998 osd_req_op_extent_dup_last(req, op_idx, 999 cur_offset - offset); 1000 dout("writepages got pages at %llu~%llu\n", 1001 offset, len); 1002 osd_req_op_extent_osd_data_pages(req, op_idx, 1003 data_pages, len, 0, 1004 !!pool, false); 1005 osd_req_op_extent_update(req, op_idx, len); 1006 1007 len = 0; 1008 offset = cur_offset; 1009 data_pages = pages + i; 1010 op_idx++; 1011 } 1012 1013 set_page_writeback(pages[i]); 1014 len += PAGE_SIZE; 1015 } 1016 1017 if (snap_size != -1) { 1018 len = min(len, snap_size - offset); 1019 } else if (i == locked_pages) { 1020 /* writepages_finish() clears writeback pages 1021 * according to the data length, so make sure 1022 * data length covers all locked pages */ 1023 u64 min_len = len + 1 - PAGE_SIZE; 1024 len = min(len, (u64)i_size_read(inode) - offset); 1025 len = max(len, min_len); 1026 } 1027 dout("writepages got pages at %llu~%llu\n", offset, len); 1028 1029 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1030 0, !!pool, false); 1031 osd_req_op_extent_update(req, op_idx, len); 1032 1033 if (do_sync) { 1034 op_idx++; 1035 osd_req_op_init(req, op_idx, CEPH_OSD_OP_STARTSYNC, 0); 1036 } 1037 BUG_ON(op_idx + 1 != req->r_num_ops); 1038 1039 pool = NULL; 1040 if (i < locked_pages) { 1041 BUG_ON(num_ops <= req->r_num_ops); 1042 num_ops -= req->r_num_ops; 1043 num_ops += do_sync; 1044 locked_pages -= i; 1045 1046 /* allocate new pages array for next request */ 1047 data_pages = pages; 1048 pages = kmalloc(locked_pages * sizeof (*pages), 1049 GFP_NOFS); 1050 if (!pages) { 1051 pool = fsc->wb_pagevec_pool; 1052 pages = mempool_alloc(pool, GFP_NOFS); 1053 BUG_ON(!pages); 1054 } 1055 memcpy(pages, data_pages + i, 1056 locked_pages * sizeof(*pages)); 1057 memset(data_pages + i, 0, 1058 locked_pages * sizeof(*pages)); 1059 } else { 1060 BUG_ON(num_ops != req->r_num_ops); 1061 index = pages[i - 1]->index + 1; 1062 /* request message now owns the pages array */ 1063 pages = NULL; 1064 } 1065 1066 vino = ceph_vino(inode); 1067 ceph_osdc_build_request(req, offset, snapc, vino.snap, 1068 &inode->i_mtime); 1069 1070 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1071 BUG_ON(rc); 1072 req = NULL; 1073 1074 wbc->nr_to_write -= i; 1075 if (pages) 1076 goto new_request; 1077 1078 if (wbc->nr_to_write <= 0) 1079 done = 1; 1080 1081 release_pvec_pages: 1082 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1083 pvec.nr ? pvec.pages[0] : NULL); 1084 pagevec_release(&pvec); 1085 1086 if (locked_pages && !done) 1087 goto retry; 1088 } 1089 1090 if (should_loop && !done) { 1091 /* more to do; loop back to beginning of file */ 1092 dout("writepages looping back to beginning of file\n"); 1093 should_loop = 0; 1094 index = 0; 1095 goto retry; 1096 } 1097 1098 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1099 mapping->writeback_index = index; 1100 1101 out: 1102 if (req) 1103 ceph_osdc_put_request(req); 1104 ceph_put_snap_context(snapc); 1105 dout("writepages done, rc = %d\n", rc); 1106 return rc; 1107 } 1108 1109 1110 1111 /* 1112 * See if a given @snapc is either writeable, or already written. 1113 */ 1114 static int context_is_writeable_or_written(struct inode *inode, 1115 struct ceph_snap_context *snapc) 1116 { 1117 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 1118 int ret = !oldest || snapc->seq <= oldest->seq; 1119 1120 ceph_put_snap_context(oldest); 1121 return ret; 1122 } 1123 1124 /* 1125 * We are only allowed to write into/dirty the page if the page is 1126 * clean, or already dirty within the same snap context. 1127 * 1128 * called with page locked. 1129 * return success with page locked, 1130 * or any failure (incl -EAGAIN) with page unlocked. 1131 */ 1132 static int ceph_update_writeable_page(struct file *file, 1133 loff_t pos, unsigned len, 1134 struct page *page) 1135 { 1136 struct inode *inode = file_inode(file); 1137 struct ceph_inode_info *ci = ceph_inode(inode); 1138 loff_t page_off = pos & PAGE_MASK; 1139 int pos_in_page = pos & ~PAGE_MASK; 1140 int end_in_page = pos_in_page + len; 1141 loff_t i_size; 1142 int r; 1143 struct ceph_snap_context *snapc, *oldest; 1144 1145 retry_locked: 1146 /* writepages currently holds page lock, but if we change that later, */ 1147 wait_on_page_writeback(page); 1148 1149 snapc = page_snap_context(page); 1150 if (snapc && snapc != ci->i_head_snapc) { 1151 /* 1152 * this page is already dirty in another (older) snap 1153 * context! is it writeable now? 1154 */ 1155 oldest = get_oldest_context(inode, NULL); 1156 1157 if (snapc->seq > oldest->seq) { 1158 ceph_put_snap_context(oldest); 1159 dout(" page %p snapc %p not current or oldest\n", 1160 page, snapc); 1161 /* 1162 * queue for writeback, and wait for snapc to 1163 * be writeable or written 1164 */ 1165 snapc = ceph_get_snap_context(snapc); 1166 unlock_page(page); 1167 ceph_queue_writeback(inode); 1168 r = wait_event_interruptible(ci->i_cap_wq, 1169 context_is_writeable_or_written(inode, snapc)); 1170 ceph_put_snap_context(snapc); 1171 if (r == -ERESTARTSYS) 1172 return r; 1173 return -EAGAIN; 1174 } 1175 ceph_put_snap_context(oldest); 1176 1177 /* yay, writeable, do it now (without dropping page lock) */ 1178 dout(" page %p snapc %p not current, but oldest\n", 1179 page, snapc); 1180 if (!clear_page_dirty_for_io(page)) 1181 goto retry_locked; 1182 r = writepage_nounlock(page, NULL); 1183 if (r < 0) 1184 goto fail_nosnap; 1185 goto retry_locked; 1186 } 1187 1188 if (PageUptodate(page)) { 1189 dout(" page %p already uptodate\n", page); 1190 return 0; 1191 } 1192 1193 /* full page? */ 1194 if (pos_in_page == 0 && len == PAGE_SIZE) 1195 return 0; 1196 1197 /* past end of file? */ 1198 i_size = i_size_read(inode); 1199 1200 if (page_off >= i_size || 1201 (pos_in_page == 0 && (pos+len) >= i_size && 1202 end_in_page - pos_in_page != PAGE_SIZE)) { 1203 dout(" zeroing %p 0 - %d and %d - %d\n", 1204 page, pos_in_page, end_in_page, (int)PAGE_SIZE); 1205 zero_user_segments(page, 1206 0, pos_in_page, 1207 end_in_page, PAGE_SIZE); 1208 return 0; 1209 } 1210 1211 /* we need to read it. */ 1212 r = readpage_nounlock(file, page); 1213 if (r < 0) 1214 goto fail_nosnap; 1215 goto retry_locked; 1216 fail_nosnap: 1217 unlock_page(page); 1218 return r; 1219 } 1220 1221 /* 1222 * We are only allowed to write into/dirty the page if the page is 1223 * clean, or already dirty within the same snap context. 1224 */ 1225 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1226 loff_t pos, unsigned len, unsigned flags, 1227 struct page **pagep, void **fsdata) 1228 { 1229 struct inode *inode = file_inode(file); 1230 struct page *page; 1231 pgoff_t index = pos >> PAGE_SHIFT; 1232 int r; 1233 1234 do { 1235 /* get a page */ 1236 page = grab_cache_page_write_begin(mapping, index, 0); 1237 if (!page) 1238 return -ENOMEM; 1239 1240 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1241 inode, page, (int)pos, (int)len); 1242 1243 r = ceph_update_writeable_page(file, pos, len, page); 1244 if (r < 0) 1245 put_page(page); 1246 else 1247 *pagep = page; 1248 } while (r == -EAGAIN); 1249 1250 return r; 1251 } 1252 1253 /* 1254 * we don't do anything in here that simple_write_end doesn't do 1255 * except adjust dirty page accounting 1256 */ 1257 static int ceph_write_end(struct file *file, struct address_space *mapping, 1258 loff_t pos, unsigned len, unsigned copied, 1259 struct page *page, void *fsdata) 1260 { 1261 struct inode *inode = file_inode(file); 1262 unsigned from = pos & (PAGE_SIZE - 1); 1263 int check_cap = 0; 1264 1265 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1266 inode, page, (int)pos, (int)copied, (int)len); 1267 1268 /* zero the stale part of the page if we did a short copy */ 1269 if (copied < len) 1270 zero_user_segment(page, from+copied, len); 1271 1272 /* did file size increase? */ 1273 if (pos+copied > i_size_read(inode)) 1274 check_cap = ceph_inode_set_size(inode, pos+copied); 1275 1276 if (!PageUptodate(page)) 1277 SetPageUptodate(page); 1278 1279 set_page_dirty(page); 1280 1281 unlock_page(page); 1282 put_page(page); 1283 1284 if (check_cap) 1285 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1286 1287 return copied; 1288 } 1289 1290 /* 1291 * we set .direct_IO to indicate direct io is supported, but since we 1292 * intercept O_DIRECT reads and writes early, this function should 1293 * never get called. 1294 */ 1295 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter, 1296 loff_t pos) 1297 { 1298 WARN_ON(1); 1299 return -EINVAL; 1300 } 1301 1302 const struct address_space_operations ceph_aops = { 1303 .readpage = ceph_readpage, 1304 .readpages = ceph_readpages, 1305 .writepage = ceph_writepage, 1306 .writepages = ceph_writepages_start, 1307 .write_begin = ceph_write_begin, 1308 .write_end = ceph_write_end, 1309 .set_page_dirty = ceph_set_page_dirty, 1310 .invalidatepage = ceph_invalidatepage, 1311 .releasepage = ceph_releasepage, 1312 .direct_IO = ceph_direct_io, 1313 }; 1314 1315 1316 /* 1317 * vm ops 1318 */ 1319 static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1320 { 1321 struct inode *inode = file_inode(vma->vm_file); 1322 struct ceph_inode_info *ci = ceph_inode(inode); 1323 struct ceph_file_info *fi = vma->vm_file->private_data; 1324 struct page *pinned_page = NULL; 1325 loff_t off = vmf->pgoff << PAGE_SHIFT; 1326 int want, got, ret; 1327 1328 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1329 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); 1330 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1331 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1332 else 1333 want = CEPH_CAP_FILE_CACHE; 1334 while (1) { 1335 got = 0; 1336 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, 1337 -1, &got, &pinned_page); 1338 if (ret == 0) 1339 break; 1340 if (ret != -ERESTARTSYS) { 1341 WARN_ON(1); 1342 return VM_FAULT_SIGBUS; 1343 } 1344 } 1345 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1346 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); 1347 1348 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1349 ci->i_inline_version == CEPH_INLINE_NONE) 1350 ret = filemap_fault(vma, vmf); 1351 else 1352 ret = -EAGAIN; 1353 1354 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1355 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); 1356 if (pinned_page) 1357 put_page(pinned_page); 1358 ceph_put_cap_refs(ci, got); 1359 1360 if (ret != -EAGAIN) 1361 return ret; 1362 1363 /* read inline data */ 1364 if (off >= PAGE_SIZE) { 1365 /* does not support inline data > PAGE_SIZE */ 1366 ret = VM_FAULT_SIGBUS; 1367 } else { 1368 int ret1; 1369 struct address_space *mapping = inode->i_mapping; 1370 struct page *page = find_or_create_page(mapping, 0, 1371 mapping_gfp_constraint(mapping, 1372 ~__GFP_FS)); 1373 if (!page) { 1374 ret = VM_FAULT_OOM; 1375 goto out; 1376 } 1377 ret1 = __ceph_do_getattr(inode, page, 1378 CEPH_STAT_CAP_INLINE_DATA, true); 1379 if (ret1 < 0 || off >= i_size_read(inode)) { 1380 unlock_page(page); 1381 put_page(page); 1382 ret = VM_FAULT_SIGBUS; 1383 goto out; 1384 } 1385 if (ret1 < PAGE_SIZE) 1386 zero_user_segment(page, ret1, PAGE_SIZE); 1387 else 1388 flush_dcache_page(page); 1389 SetPageUptodate(page); 1390 vmf->page = page; 1391 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1392 } 1393 out: 1394 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1395 inode, off, (size_t)PAGE_SIZE, ret); 1396 return ret; 1397 } 1398 1399 /* 1400 * Reuse write_begin here for simplicity. 1401 */ 1402 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1403 { 1404 struct inode *inode = file_inode(vma->vm_file); 1405 struct ceph_inode_info *ci = ceph_inode(inode); 1406 struct ceph_file_info *fi = vma->vm_file->private_data; 1407 struct ceph_cap_flush *prealloc_cf; 1408 struct page *page = vmf->page; 1409 loff_t off = page_offset(page); 1410 loff_t size = i_size_read(inode); 1411 size_t len; 1412 int want, got, ret; 1413 1414 prealloc_cf = ceph_alloc_cap_flush(); 1415 if (!prealloc_cf) 1416 return VM_FAULT_SIGBUS; 1417 1418 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1419 struct page *locked_page = NULL; 1420 if (off == 0) { 1421 lock_page(page); 1422 locked_page = page; 1423 } 1424 ret = ceph_uninline_data(vma->vm_file, locked_page); 1425 if (locked_page) 1426 unlock_page(locked_page); 1427 if (ret < 0) { 1428 ret = VM_FAULT_SIGBUS; 1429 goto out_free; 1430 } 1431 } 1432 1433 if (off + PAGE_SIZE <= size) 1434 len = PAGE_SIZE; 1435 else 1436 len = size & ~PAGE_MASK; 1437 1438 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1439 inode, ceph_vinop(inode), off, len, size); 1440 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1441 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1442 else 1443 want = CEPH_CAP_FILE_BUFFER; 1444 while (1) { 1445 got = 0; 1446 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len, 1447 &got, NULL); 1448 if (ret == 0) 1449 break; 1450 if (ret != -ERESTARTSYS) { 1451 WARN_ON(1); 1452 ret = VM_FAULT_SIGBUS; 1453 goto out_free; 1454 } 1455 } 1456 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1457 inode, off, len, ceph_cap_string(got)); 1458 1459 /* Update time before taking page lock */ 1460 file_update_time(vma->vm_file); 1461 1462 lock_page(page); 1463 1464 ret = VM_FAULT_NOPAGE; 1465 if ((off > size) || 1466 (page->mapping != inode->i_mapping)) { 1467 unlock_page(page); 1468 goto out; 1469 } 1470 1471 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1472 if (ret >= 0) { 1473 /* success. we'll keep the page locked. */ 1474 set_page_dirty(page); 1475 ret = VM_FAULT_LOCKED; 1476 } else { 1477 if (ret == -ENOMEM) 1478 ret = VM_FAULT_OOM; 1479 else 1480 ret = VM_FAULT_SIGBUS; 1481 } 1482 out: 1483 if (ret == VM_FAULT_LOCKED || 1484 ci->i_inline_version != CEPH_INLINE_NONE) { 1485 int dirty; 1486 spin_lock(&ci->i_ceph_lock); 1487 ci->i_inline_version = CEPH_INLINE_NONE; 1488 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1489 &prealloc_cf); 1490 spin_unlock(&ci->i_ceph_lock); 1491 if (dirty) 1492 __mark_inode_dirty(inode, dirty); 1493 } 1494 1495 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", 1496 inode, off, len, ceph_cap_string(got), ret); 1497 ceph_put_cap_refs(ci, got); 1498 out_free: 1499 ceph_free_cap_flush(prealloc_cf); 1500 1501 return ret; 1502 } 1503 1504 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1505 char *data, size_t len) 1506 { 1507 struct address_space *mapping = inode->i_mapping; 1508 struct page *page; 1509 1510 if (locked_page) { 1511 page = locked_page; 1512 } else { 1513 if (i_size_read(inode) == 0) 1514 return; 1515 page = find_or_create_page(mapping, 0, 1516 mapping_gfp_constraint(mapping, 1517 ~__GFP_FS)); 1518 if (!page) 1519 return; 1520 if (PageUptodate(page)) { 1521 unlock_page(page); 1522 put_page(page); 1523 return; 1524 } 1525 } 1526 1527 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1528 inode, ceph_vinop(inode), len, locked_page); 1529 1530 if (len > 0) { 1531 void *kaddr = kmap_atomic(page); 1532 memcpy(kaddr, data, len); 1533 kunmap_atomic(kaddr); 1534 } 1535 1536 if (page != locked_page) { 1537 if (len < PAGE_SIZE) 1538 zero_user_segment(page, len, PAGE_SIZE); 1539 else 1540 flush_dcache_page(page); 1541 1542 SetPageUptodate(page); 1543 unlock_page(page); 1544 put_page(page); 1545 } 1546 } 1547 1548 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1549 { 1550 struct inode *inode = file_inode(filp); 1551 struct ceph_inode_info *ci = ceph_inode(inode); 1552 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1553 struct ceph_osd_request *req; 1554 struct page *page = NULL; 1555 u64 len, inline_version; 1556 int err = 0; 1557 bool from_pagecache = false; 1558 1559 spin_lock(&ci->i_ceph_lock); 1560 inline_version = ci->i_inline_version; 1561 spin_unlock(&ci->i_ceph_lock); 1562 1563 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1564 inode, ceph_vinop(inode), inline_version); 1565 1566 if (inline_version == 1 || /* initial version, no data */ 1567 inline_version == CEPH_INLINE_NONE) 1568 goto out; 1569 1570 if (locked_page) { 1571 page = locked_page; 1572 WARN_ON(!PageUptodate(page)); 1573 } else if (ceph_caps_issued(ci) & 1574 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1575 page = find_get_page(inode->i_mapping, 0); 1576 if (page) { 1577 if (PageUptodate(page)) { 1578 from_pagecache = true; 1579 lock_page(page); 1580 } else { 1581 put_page(page); 1582 page = NULL; 1583 } 1584 } 1585 } 1586 1587 if (page) { 1588 len = i_size_read(inode); 1589 if (len > PAGE_SIZE) 1590 len = PAGE_SIZE; 1591 } else { 1592 page = __page_cache_alloc(GFP_NOFS); 1593 if (!page) { 1594 err = -ENOMEM; 1595 goto out; 1596 } 1597 err = __ceph_do_getattr(inode, page, 1598 CEPH_STAT_CAP_INLINE_DATA, true); 1599 if (err < 0) { 1600 /* no inline data */ 1601 if (err == -ENODATA) 1602 err = 0; 1603 goto out; 1604 } 1605 len = err; 1606 } 1607 1608 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1609 ceph_vino(inode), 0, &len, 0, 1, 1610 CEPH_OSD_OP_CREATE, 1611 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 1612 NULL, 0, 0, false); 1613 if (IS_ERR(req)) { 1614 err = PTR_ERR(req); 1615 goto out; 1616 } 1617 1618 ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime); 1619 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1620 if (!err) 1621 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1622 ceph_osdc_put_request(req); 1623 if (err < 0) 1624 goto out; 1625 1626 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1627 ceph_vino(inode), 0, &len, 1, 3, 1628 CEPH_OSD_OP_WRITE, 1629 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 1630 NULL, ci->i_truncate_seq, 1631 ci->i_truncate_size, false); 1632 if (IS_ERR(req)) { 1633 err = PTR_ERR(req); 1634 goto out; 1635 } 1636 1637 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1638 1639 { 1640 __le64 xattr_buf = cpu_to_le64(inline_version); 1641 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1642 "inline_version", &xattr_buf, 1643 sizeof(xattr_buf), 1644 CEPH_OSD_CMPXATTR_OP_GT, 1645 CEPH_OSD_CMPXATTR_MODE_U64); 1646 if (err) 1647 goto out_put; 1648 } 1649 1650 { 1651 char xattr_buf[32]; 1652 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1653 "%llu", inline_version); 1654 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1655 "inline_version", 1656 xattr_buf, xattr_len, 0, 0); 1657 if (err) 1658 goto out_put; 1659 } 1660 1661 ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime); 1662 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1663 if (!err) 1664 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1665 out_put: 1666 ceph_osdc_put_request(req); 1667 if (err == -ECANCELED) 1668 err = 0; 1669 out: 1670 if (page && page != locked_page) { 1671 if (from_pagecache) { 1672 unlock_page(page); 1673 put_page(page); 1674 } else 1675 __free_pages(page, 0); 1676 } 1677 1678 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1679 inode, ceph_vinop(inode), inline_version, err); 1680 return err; 1681 } 1682 1683 static const struct vm_operations_struct ceph_vmops = { 1684 .fault = ceph_filemap_fault, 1685 .page_mkwrite = ceph_page_mkwrite, 1686 }; 1687 1688 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1689 { 1690 struct address_space *mapping = file->f_mapping; 1691 1692 if (!mapping->a_ops->readpage) 1693 return -ENOEXEC; 1694 file_accessed(file); 1695 vma->vm_ops = &ceph_vmops; 1696 return 0; 1697 } 1698 1699 enum { 1700 POOL_READ = 1, 1701 POOL_WRITE = 2, 1702 }; 1703 1704 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) 1705 { 1706 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1707 struct ceph_mds_client *mdsc = fsc->mdsc; 1708 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1709 struct rb_node **p, *parent; 1710 struct ceph_pool_perm *perm; 1711 struct page **pages; 1712 int err = 0, err2 = 0, have = 0; 1713 1714 down_read(&mdsc->pool_perm_rwsem); 1715 p = &mdsc->pool_perm_tree.rb_node; 1716 while (*p) { 1717 perm = rb_entry(*p, struct ceph_pool_perm, node); 1718 if (pool < perm->pool) 1719 p = &(*p)->rb_left; 1720 else if (pool > perm->pool) 1721 p = &(*p)->rb_right; 1722 else { 1723 have = perm->perm; 1724 break; 1725 } 1726 } 1727 up_read(&mdsc->pool_perm_rwsem); 1728 if (*p) 1729 goto out; 1730 1731 dout("__ceph_pool_perm_get pool %u no perm cached\n", pool); 1732 1733 down_write(&mdsc->pool_perm_rwsem); 1734 parent = NULL; 1735 while (*p) { 1736 parent = *p; 1737 perm = rb_entry(parent, struct ceph_pool_perm, node); 1738 if (pool < perm->pool) 1739 p = &(*p)->rb_left; 1740 else if (pool > perm->pool) 1741 p = &(*p)->rb_right; 1742 else { 1743 have = perm->perm; 1744 break; 1745 } 1746 } 1747 if (*p) { 1748 up_write(&mdsc->pool_perm_rwsem); 1749 goto out; 1750 } 1751 1752 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1753 1, false, GFP_NOFS); 1754 if (!rd_req) { 1755 err = -ENOMEM; 1756 goto out_unlock; 1757 } 1758 1759 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1760 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1761 rd_req->r_base_oloc.pool = pool; 1762 snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name), 1763 "%llx.00000000", ci->i_vino.ino); 1764 rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name); 1765 1766 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1767 1, false, GFP_NOFS); 1768 if (!wr_req) { 1769 err = -ENOMEM; 1770 goto out_unlock; 1771 } 1772 1773 wr_req->r_flags = CEPH_OSD_FLAG_WRITE | 1774 CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK; 1775 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1776 wr_req->r_base_oloc.pool = pool; 1777 wr_req->r_base_oid = rd_req->r_base_oid; 1778 1779 /* one page should be large enough for STAT data */ 1780 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1781 if (IS_ERR(pages)) { 1782 err = PTR_ERR(pages); 1783 goto out_unlock; 1784 } 1785 1786 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1787 0, false, true); 1788 ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP, 1789 &ci->vfs_inode.i_mtime); 1790 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1791 1792 ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP, 1793 &ci->vfs_inode.i_mtime); 1794 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1795 1796 if (!err) 1797 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1798 if (!err2) 1799 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1800 1801 if (err >= 0 || err == -ENOENT) 1802 have |= POOL_READ; 1803 else if (err != -EPERM) 1804 goto out_unlock; 1805 1806 if (err2 == 0 || err2 == -EEXIST) 1807 have |= POOL_WRITE; 1808 else if (err2 != -EPERM) { 1809 err = err2; 1810 goto out_unlock; 1811 } 1812 1813 perm = kmalloc(sizeof(*perm), GFP_NOFS); 1814 if (!perm) { 1815 err = -ENOMEM; 1816 goto out_unlock; 1817 } 1818 1819 perm->pool = pool; 1820 perm->perm = have; 1821 rb_link_node(&perm->node, parent, p); 1822 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1823 err = 0; 1824 out_unlock: 1825 up_write(&mdsc->pool_perm_rwsem); 1826 1827 if (rd_req) 1828 ceph_osdc_put_request(rd_req); 1829 if (wr_req) 1830 ceph_osdc_put_request(wr_req); 1831 out: 1832 if (!err) 1833 err = have; 1834 dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err); 1835 return err; 1836 } 1837 1838 int ceph_pool_perm_check(struct ceph_inode_info *ci, int need) 1839 { 1840 u32 pool; 1841 int ret, flags; 1842 1843 /* does not support pool namespace yet */ 1844 if (ci->i_pool_ns_len) 1845 return -EIO; 1846 1847 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), 1848 NOPOOLPERM)) 1849 return 0; 1850 1851 spin_lock(&ci->i_ceph_lock); 1852 flags = ci->i_ceph_flags; 1853 pool = ceph_file_layout_pg_pool(ci->i_layout); 1854 spin_unlock(&ci->i_ceph_lock); 1855 check: 1856 if (flags & CEPH_I_POOL_PERM) { 1857 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 1858 dout("ceph_pool_perm_check pool %u no read perm\n", 1859 pool); 1860 return -EPERM; 1861 } 1862 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 1863 dout("ceph_pool_perm_check pool %u no write perm\n", 1864 pool); 1865 return -EPERM; 1866 } 1867 return 0; 1868 } 1869 1870 ret = __ceph_pool_perm_get(ci, pool); 1871 if (ret < 0) 1872 return ret; 1873 1874 flags = CEPH_I_POOL_PERM; 1875 if (ret & POOL_READ) 1876 flags |= CEPH_I_POOL_RD; 1877 if (ret & POOL_WRITE) 1878 flags |= CEPH_I_POOL_WR; 1879 1880 spin_lock(&ci->i_ceph_lock); 1881 if (pool == ceph_file_layout_pg_pool(ci->i_layout)) { 1882 ci->i_ceph_flags = flags; 1883 } else { 1884 pool = ceph_file_layout_pg_pool(ci->i_layout); 1885 flags = ci->i_ceph_flags; 1886 } 1887 spin_unlock(&ci->i_ceph_lock); 1888 goto check; 1889 } 1890 1891 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 1892 { 1893 struct ceph_pool_perm *perm; 1894 struct rb_node *n; 1895 1896 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 1897 n = rb_first(&mdsc->pool_perm_tree); 1898 perm = rb_entry(n, struct ceph_pool_perm, node); 1899 rb_erase(n, &mdsc->pool_perm_tree); 1900 kfree(perm); 1901 } 1902 } 1903