1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> /* generic_writepages */ 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 15 #include "super.h" 16 #include "mds_client.h" 17 #include "cache.h" 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/striper.h> 20 21 /* 22 * Ceph address space ops. 23 * 24 * There are a few funny things going on here. 25 * 26 * The page->private field is used to reference a struct 27 * ceph_snap_context for _every_ dirty page. This indicates which 28 * snapshot the page was logically dirtied in, and thus which snap 29 * context needs to be associated with the osd write during writeback. 30 * 31 * Similarly, struct ceph_inode_info maintains a set of counters to 32 * count dirty pages on the inode. In the absence of snapshots, 33 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 34 * 35 * When a snapshot is taken (that is, when the client receives 36 * notification that a snapshot was taken), each inode with caps and 37 * with dirty pages (dirty pages implies there is a cap) gets a new 38 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 39 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 40 * moved to capsnap->dirty. (Unless a sync write is currently in 41 * progress. In that case, the capsnap is said to be "pending", new 42 * writes cannot start, and the capsnap isn't "finalized" until the 43 * write completes (or fails) and a final size/mtime for the inode for 44 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 45 * 46 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 47 * we look for the first capsnap in i_cap_snaps and write out pages in 48 * that snap context _only_. Then we move on to the next capsnap, 49 * eventually reaching the "live" or "head" context (i.e., pages that 50 * are not yet snapped) and are writing the most recently dirtied 51 * pages. 52 * 53 * Invalidate and so forth must take care to ensure the dirty page 54 * accounting is preserved. 55 */ 56 57 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 58 #define CONGESTION_OFF_THRESH(congestion_kb) \ 59 (CONGESTION_ON_THRESH(congestion_kb) - \ 60 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 61 62 static inline struct ceph_snap_context *page_snap_context(struct page *page) 63 { 64 if (PagePrivate(page)) 65 return (void *)page->private; 66 return NULL; 67 } 68 69 /* 70 * Dirty a page. Optimistically adjust accounting, on the assumption 71 * that we won't race with invalidate. If we do, readjust. 72 */ 73 static int ceph_set_page_dirty(struct page *page) 74 { 75 struct address_space *mapping = page->mapping; 76 struct inode *inode; 77 struct ceph_inode_info *ci; 78 struct ceph_snap_context *snapc; 79 int ret; 80 81 if (unlikely(!mapping)) 82 return !TestSetPageDirty(page); 83 84 if (PageDirty(page)) { 85 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 86 mapping->host, page, page->index); 87 BUG_ON(!PagePrivate(page)); 88 return 0; 89 } 90 91 inode = mapping->host; 92 ci = ceph_inode(inode); 93 94 /* dirty the head */ 95 spin_lock(&ci->i_ceph_lock); 96 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 97 if (__ceph_have_pending_cap_snap(ci)) { 98 struct ceph_cap_snap *capsnap = 99 list_last_entry(&ci->i_cap_snaps, 100 struct ceph_cap_snap, 101 ci_item); 102 snapc = ceph_get_snap_context(capsnap->context); 103 capsnap->dirty_pages++; 104 } else { 105 BUG_ON(!ci->i_head_snapc); 106 snapc = ceph_get_snap_context(ci->i_head_snapc); 107 ++ci->i_wrbuffer_ref_head; 108 } 109 if (ci->i_wrbuffer_ref == 0) 110 ihold(inode); 111 ++ci->i_wrbuffer_ref; 112 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 113 "snapc %p seq %lld (%d snaps)\n", 114 mapping->host, page, page->index, 115 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 116 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 117 snapc, snapc->seq, snapc->num_snaps); 118 spin_unlock(&ci->i_ceph_lock); 119 120 /* 121 * Reference snap context in page->private. Also set 122 * PagePrivate so that we get invalidatepage callback. 123 */ 124 BUG_ON(PagePrivate(page)); 125 page->private = (unsigned long)snapc; 126 SetPagePrivate(page); 127 128 ret = __set_page_dirty_nobuffers(page); 129 WARN_ON(!PageLocked(page)); 130 WARN_ON(!page->mapping); 131 132 return ret; 133 } 134 135 /* 136 * If we are truncating the full page (i.e. offset == 0), adjust the 137 * dirty page counters appropriately. Only called if there is private 138 * data on the page. 139 */ 140 static void ceph_invalidatepage(struct page *page, unsigned int offset, 141 unsigned int length) 142 { 143 struct inode *inode; 144 struct ceph_inode_info *ci; 145 struct ceph_snap_context *snapc = page_snap_context(page); 146 147 inode = page->mapping->host; 148 ci = ceph_inode(inode); 149 150 if (offset != 0 || length != PAGE_SIZE) { 151 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 152 inode, page, page->index, offset, length); 153 return; 154 } 155 156 ceph_invalidate_fscache_page(inode, page); 157 158 WARN_ON(!PageLocked(page)); 159 if (!PagePrivate(page)) 160 return; 161 162 ClearPageChecked(page); 163 164 dout("%p invalidatepage %p idx %lu full dirty page\n", 165 inode, page, page->index); 166 167 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 168 ceph_put_snap_context(snapc); 169 page->private = 0; 170 ClearPagePrivate(page); 171 } 172 173 static int ceph_releasepage(struct page *page, gfp_t g) 174 { 175 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 176 page, page->index, PageDirty(page) ? "" : "not "); 177 178 /* Can we release the page from the cache? */ 179 if (!ceph_release_fscache_page(page, g)) 180 return 0; 181 182 return !PagePrivate(page); 183 } 184 185 /* 186 * read a single page, without unlocking it. 187 */ 188 static int ceph_do_readpage(struct file *filp, struct page *page) 189 { 190 struct inode *inode = file_inode(filp); 191 struct ceph_inode_info *ci = ceph_inode(inode); 192 struct ceph_osd_client *osdc = 193 &ceph_inode_to_client(inode)->client->osdc; 194 int err = 0; 195 u64 off = page_offset(page); 196 u64 len = PAGE_SIZE; 197 198 if (off >= i_size_read(inode)) { 199 zero_user_segment(page, 0, PAGE_SIZE); 200 SetPageUptodate(page); 201 return 0; 202 } 203 204 if (ci->i_inline_version != CEPH_INLINE_NONE) { 205 /* 206 * Uptodate inline data should have been added 207 * into page cache while getting Fcr caps. 208 */ 209 if (off == 0) 210 return -EINVAL; 211 zero_user_segment(page, 0, PAGE_SIZE); 212 SetPageUptodate(page); 213 return 0; 214 } 215 216 err = ceph_readpage_from_fscache(inode, page); 217 if (err == 0) 218 return -EINPROGRESS; 219 220 dout("readpage inode %p file %p page %p index %lu\n", 221 inode, filp, page, page->index); 222 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 223 off, &len, 224 ci->i_truncate_seq, ci->i_truncate_size, 225 &page, 1, 0); 226 if (err == -ENOENT) 227 err = 0; 228 if (err < 0) { 229 SetPageError(page); 230 ceph_fscache_readpage_cancel(inode, page); 231 goto out; 232 } 233 if (err < PAGE_SIZE) 234 /* zero fill remainder of page */ 235 zero_user_segment(page, err, PAGE_SIZE); 236 else 237 flush_dcache_page(page); 238 239 SetPageUptodate(page); 240 ceph_readpage_to_fscache(inode, page); 241 242 out: 243 return err < 0 ? err : 0; 244 } 245 246 static int ceph_readpage(struct file *filp, struct page *page) 247 { 248 int r = ceph_do_readpage(filp, page); 249 if (r != -EINPROGRESS) 250 unlock_page(page); 251 else 252 r = 0; 253 return r; 254 } 255 256 /* 257 * Finish an async read(ahead) op. 258 */ 259 static void finish_read(struct ceph_osd_request *req) 260 { 261 struct inode *inode = req->r_inode; 262 struct ceph_osd_data *osd_data; 263 int rc = req->r_result <= 0 ? req->r_result : 0; 264 int bytes = req->r_result >= 0 ? req->r_result : 0; 265 int num_pages; 266 int i; 267 268 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 269 270 /* unlock all pages, zeroing any data we didn't read */ 271 osd_data = osd_req_op_extent_osd_data(req, 0); 272 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 273 num_pages = calc_pages_for((u64)osd_data->alignment, 274 (u64)osd_data->length); 275 for (i = 0; i < num_pages; i++) { 276 struct page *page = osd_data->pages[i]; 277 278 if (rc < 0 && rc != -ENOENT) { 279 ceph_fscache_readpage_cancel(inode, page); 280 goto unlock; 281 } 282 if (bytes < (int)PAGE_SIZE) { 283 /* zero (remainder of) page */ 284 int s = bytes < 0 ? 0 : bytes; 285 zero_user_segment(page, s, PAGE_SIZE); 286 } 287 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 288 page->index); 289 flush_dcache_page(page); 290 SetPageUptodate(page); 291 ceph_readpage_to_fscache(inode, page); 292 unlock: 293 unlock_page(page); 294 put_page(page); 295 bytes -= PAGE_SIZE; 296 } 297 kfree(osd_data->pages); 298 } 299 300 /* 301 * start an async read(ahead) operation. return nr_pages we submitted 302 * a read for on success, or negative error code. 303 */ 304 static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, 305 struct list_head *page_list, int max) 306 { 307 struct ceph_osd_client *osdc = 308 &ceph_inode_to_client(inode)->client->osdc; 309 struct ceph_inode_info *ci = ceph_inode(inode); 310 struct page *page = lru_to_page(page_list); 311 struct ceph_vino vino; 312 struct ceph_osd_request *req; 313 u64 off; 314 u64 len; 315 int i; 316 struct page **pages; 317 pgoff_t next_index; 318 int nr_pages = 0; 319 int got = 0; 320 int ret = 0; 321 322 if (!rw_ctx) { 323 /* caller of readpages does not hold buffer and read caps 324 * (fadvise, madvise and readahead cases) */ 325 int want = CEPH_CAP_FILE_CACHE; 326 ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got); 327 if (ret < 0) { 328 dout("start_read %p, error getting cap\n", inode); 329 } else if (!(got & want)) { 330 dout("start_read %p, no cache cap\n", inode); 331 ret = 0; 332 } 333 if (ret <= 0) { 334 if (got) 335 ceph_put_cap_refs(ci, got); 336 while (!list_empty(page_list)) { 337 page = lru_to_page(page_list); 338 list_del(&page->lru); 339 put_page(page); 340 } 341 return ret; 342 } 343 } 344 345 off = (u64) page_offset(page); 346 347 /* count pages */ 348 next_index = page->index; 349 list_for_each_entry_reverse(page, page_list, lru) { 350 if (page->index != next_index) 351 break; 352 nr_pages++; 353 next_index++; 354 if (max && nr_pages == max) 355 break; 356 } 357 len = nr_pages << PAGE_SHIFT; 358 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 359 off, len); 360 vino = ceph_vino(inode); 361 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 362 0, 1, CEPH_OSD_OP_READ, 363 CEPH_OSD_FLAG_READ, NULL, 364 ci->i_truncate_seq, ci->i_truncate_size, 365 false); 366 if (IS_ERR(req)) { 367 ret = PTR_ERR(req); 368 goto out; 369 } 370 371 /* build page vector */ 372 nr_pages = calc_pages_for(0, len); 373 pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); 374 if (!pages) { 375 ret = -ENOMEM; 376 goto out_put; 377 } 378 for (i = 0; i < nr_pages; ++i) { 379 page = list_entry(page_list->prev, struct page, lru); 380 BUG_ON(PageLocked(page)); 381 list_del(&page->lru); 382 383 dout("start_read %p adding %p idx %lu\n", inode, page, 384 page->index); 385 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 386 GFP_KERNEL)) { 387 ceph_fscache_uncache_page(inode, page); 388 put_page(page); 389 dout("start_read %p add_to_page_cache failed %p\n", 390 inode, page); 391 nr_pages = i; 392 if (nr_pages > 0) { 393 len = nr_pages << PAGE_SHIFT; 394 osd_req_op_extent_update(req, 0, len); 395 break; 396 } 397 goto out_pages; 398 } 399 pages[i] = page; 400 } 401 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 402 req->r_callback = finish_read; 403 req->r_inode = inode; 404 405 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 406 ret = ceph_osdc_start_request(osdc, req, false); 407 if (ret < 0) 408 goto out_pages; 409 ceph_osdc_put_request(req); 410 411 /* After adding locked pages to page cache, the inode holds cache cap. 412 * So we can drop our cap refs. */ 413 if (got) 414 ceph_put_cap_refs(ci, got); 415 416 return nr_pages; 417 418 out_pages: 419 for (i = 0; i < nr_pages; ++i) { 420 ceph_fscache_readpage_cancel(inode, pages[i]); 421 unlock_page(pages[i]); 422 } 423 ceph_put_page_vector(pages, nr_pages, false); 424 out_put: 425 ceph_osdc_put_request(req); 426 out: 427 if (got) 428 ceph_put_cap_refs(ci, got); 429 return ret; 430 } 431 432 433 /* 434 * Read multiple pages. Leave pages we don't read + unlock in page_list; 435 * the caller (VM) cleans them up. 436 */ 437 static int ceph_readpages(struct file *file, struct address_space *mapping, 438 struct list_head *page_list, unsigned nr_pages) 439 { 440 struct inode *inode = file_inode(file); 441 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 442 struct ceph_file_info *fi = file->private_data; 443 struct ceph_rw_context *rw_ctx; 444 int rc = 0; 445 int max = 0; 446 447 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 448 return -EINVAL; 449 450 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, 451 &nr_pages); 452 453 if (rc == 0) 454 goto out; 455 456 rw_ctx = ceph_find_rw_context(fi); 457 max = fsc->mount_options->rsize >> PAGE_SHIFT; 458 dout("readpages %p file %p ctx %p nr_pages %d max %d\n", 459 inode, file, rw_ctx, nr_pages, max); 460 while (!list_empty(page_list)) { 461 rc = start_read(inode, rw_ctx, page_list, max); 462 if (rc < 0) 463 goto out; 464 } 465 out: 466 ceph_fscache_readpages_cancel(inode, page_list); 467 468 dout("readpages %p file %p ret %d\n", inode, file, rc); 469 return rc; 470 } 471 472 struct ceph_writeback_ctl 473 { 474 loff_t i_size; 475 u64 truncate_size; 476 u32 truncate_seq; 477 bool size_stable; 478 bool head_snapc; 479 }; 480 481 /* 482 * Get ref for the oldest snapc for an inode with dirty data... that is, the 483 * only snap context we are allowed to write back. 484 */ 485 static struct ceph_snap_context * 486 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 487 struct ceph_snap_context *page_snapc) 488 { 489 struct ceph_inode_info *ci = ceph_inode(inode); 490 struct ceph_snap_context *snapc = NULL; 491 struct ceph_cap_snap *capsnap = NULL; 492 493 spin_lock(&ci->i_ceph_lock); 494 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 495 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 496 capsnap->context, capsnap->dirty_pages); 497 if (!capsnap->dirty_pages) 498 continue; 499 500 /* get i_size, truncate_{seq,size} for page_snapc? */ 501 if (snapc && capsnap->context != page_snapc) 502 continue; 503 504 if (ctl) { 505 if (capsnap->writing) { 506 ctl->i_size = i_size_read(inode); 507 ctl->size_stable = false; 508 } else { 509 ctl->i_size = capsnap->size; 510 ctl->size_stable = true; 511 } 512 ctl->truncate_size = capsnap->truncate_size; 513 ctl->truncate_seq = capsnap->truncate_seq; 514 ctl->head_snapc = false; 515 } 516 517 if (snapc) 518 break; 519 520 snapc = ceph_get_snap_context(capsnap->context); 521 if (!page_snapc || 522 page_snapc == snapc || 523 page_snapc->seq > snapc->seq) 524 break; 525 } 526 if (!snapc && ci->i_wrbuffer_ref_head) { 527 snapc = ceph_get_snap_context(ci->i_head_snapc); 528 dout(" head snapc %p has %d dirty pages\n", 529 snapc, ci->i_wrbuffer_ref_head); 530 if (ctl) { 531 ctl->i_size = i_size_read(inode); 532 ctl->truncate_size = ci->i_truncate_size; 533 ctl->truncate_seq = ci->i_truncate_seq; 534 ctl->size_stable = false; 535 ctl->head_snapc = true; 536 } 537 } 538 spin_unlock(&ci->i_ceph_lock); 539 return snapc; 540 } 541 542 static u64 get_writepages_data_length(struct inode *inode, 543 struct page *page, u64 start) 544 { 545 struct ceph_inode_info *ci = ceph_inode(inode); 546 struct ceph_snap_context *snapc = page_snap_context(page); 547 struct ceph_cap_snap *capsnap = NULL; 548 u64 end = i_size_read(inode); 549 550 if (snapc != ci->i_head_snapc) { 551 bool found = false; 552 spin_lock(&ci->i_ceph_lock); 553 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 554 if (capsnap->context == snapc) { 555 if (!capsnap->writing) 556 end = capsnap->size; 557 found = true; 558 break; 559 } 560 } 561 spin_unlock(&ci->i_ceph_lock); 562 WARN_ON(!found); 563 } 564 if (end > page_offset(page) + PAGE_SIZE) 565 end = page_offset(page) + PAGE_SIZE; 566 return end > start ? end - start : 0; 567 } 568 569 /* 570 * Write a single page, but leave the page locked. 571 * 572 * If we get a write error, set the page error bit, but still adjust the 573 * dirty page accounting (i.e., page is no longer dirty). 574 */ 575 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 576 { 577 struct inode *inode; 578 struct ceph_inode_info *ci; 579 struct ceph_fs_client *fsc; 580 struct ceph_snap_context *snapc, *oldest; 581 loff_t page_off = page_offset(page); 582 int err, len = PAGE_SIZE; 583 struct ceph_writeback_ctl ceph_wbc; 584 585 dout("writepage %p idx %lu\n", page, page->index); 586 587 inode = page->mapping->host; 588 ci = ceph_inode(inode); 589 fsc = ceph_inode_to_client(inode); 590 591 /* verify this is a writeable snap context */ 592 snapc = page_snap_context(page); 593 if (!snapc) { 594 dout("writepage %p page %p not dirty?\n", inode, page); 595 return 0; 596 } 597 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 598 if (snapc->seq > oldest->seq) { 599 dout("writepage %p page %p snapc %p not writeable - noop\n", 600 inode, page, snapc); 601 /* we should only noop if called by kswapd */ 602 WARN_ON(!(current->flags & PF_MEMALLOC)); 603 ceph_put_snap_context(oldest); 604 redirty_page_for_writepage(wbc, page); 605 return 0; 606 } 607 ceph_put_snap_context(oldest); 608 609 /* is this a partial page at end of file? */ 610 if (page_off >= ceph_wbc.i_size) { 611 dout("%p page eof %llu\n", page, ceph_wbc.i_size); 612 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 613 return 0; 614 } 615 616 if (ceph_wbc.i_size < page_off + len) 617 len = ceph_wbc.i_size - page_off; 618 619 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n", 620 inode, page, page->index, page_off, len, snapc, snapc->seq); 621 622 if (atomic_long_inc_return(&fsc->writeback_count) > 623 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 624 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 625 626 set_page_writeback(page); 627 err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode), 628 &ci->i_layout, snapc, page_off, len, 629 ceph_wbc.truncate_seq, 630 ceph_wbc.truncate_size, 631 &inode->i_mtime, &page, 1); 632 if (err < 0) { 633 struct writeback_control tmp_wbc; 634 if (!wbc) 635 wbc = &tmp_wbc; 636 if (err == -ERESTARTSYS) { 637 /* killed by SIGKILL */ 638 dout("writepage interrupted page %p\n", page); 639 redirty_page_for_writepage(wbc, page); 640 end_page_writeback(page); 641 return err; 642 } 643 dout("writepage setting page/mapping error %d %p\n", 644 err, page); 645 SetPageError(page); 646 mapping_set_error(&inode->i_data, err); 647 wbc->pages_skipped++; 648 } else { 649 dout("writepage cleaned page %p\n", page); 650 err = 0; /* vfs expects us to return 0 */ 651 } 652 page->private = 0; 653 ClearPagePrivate(page); 654 end_page_writeback(page); 655 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 656 ceph_put_snap_context(snapc); /* page's reference */ 657 658 if (atomic_long_dec_return(&fsc->writeback_count) < 659 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 660 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 661 662 return err; 663 } 664 665 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 666 { 667 int err; 668 struct inode *inode = page->mapping->host; 669 BUG_ON(!inode); 670 ihold(inode); 671 err = writepage_nounlock(page, wbc); 672 if (err == -ERESTARTSYS) { 673 /* direct memory reclaimer was killed by SIGKILL. return 0 674 * to prevent caller from setting mapping/page error */ 675 err = 0; 676 } 677 unlock_page(page); 678 iput(inode); 679 return err; 680 } 681 682 /* 683 * lame release_pages helper. release_pages() isn't exported to 684 * modules. 685 */ 686 static void ceph_release_pages(struct page **pages, int num) 687 { 688 struct pagevec pvec; 689 int i; 690 691 pagevec_init(&pvec); 692 for (i = 0; i < num; i++) { 693 if (pagevec_add(&pvec, pages[i]) == 0) 694 pagevec_release(&pvec); 695 } 696 pagevec_release(&pvec); 697 } 698 699 /* 700 * async writeback completion handler. 701 * 702 * If we get an error, set the mapping error bit, but not the individual 703 * page error bits. 704 */ 705 static void writepages_finish(struct ceph_osd_request *req) 706 { 707 struct inode *inode = req->r_inode; 708 struct ceph_inode_info *ci = ceph_inode(inode); 709 struct ceph_osd_data *osd_data; 710 struct page *page; 711 int num_pages, total_pages = 0; 712 int i, j; 713 int rc = req->r_result; 714 struct ceph_snap_context *snapc = req->r_snapc; 715 struct address_space *mapping = inode->i_mapping; 716 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 717 bool remove_page; 718 719 dout("writepages_finish %p rc %d\n", inode, rc); 720 if (rc < 0) { 721 mapping_set_error(mapping, rc); 722 ceph_set_error_write(ci); 723 } else { 724 ceph_clear_error_write(ci); 725 } 726 727 /* 728 * We lost the cache cap, need to truncate the page before 729 * it is unlocked, otherwise we'd truncate it later in the 730 * page truncation thread, possibly losing some data that 731 * raced its way in 732 */ 733 remove_page = !(ceph_caps_issued(ci) & 734 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 735 736 /* clean all pages */ 737 for (i = 0; i < req->r_num_ops; i++) { 738 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 739 break; 740 741 osd_data = osd_req_op_extent_osd_data(req, i); 742 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 743 num_pages = calc_pages_for((u64)osd_data->alignment, 744 (u64)osd_data->length); 745 total_pages += num_pages; 746 for (j = 0; j < num_pages; j++) { 747 page = osd_data->pages[j]; 748 BUG_ON(!page); 749 WARN_ON(!PageUptodate(page)); 750 751 if (atomic_long_dec_return(&fsc->writeback_count) < 752 CONGESTION_OFF_THRESH( 753 fsc->mount_options->congestion_kb)) 754 clear_bdi_congested(inode_to_bdi(inode), 755 BLK_RW_ASYNC); 756 757 ceph_put_snap_context(page_snap_context(page)); 758 page->private = 0; 759 ClearPagePrivate(page); 760 dout("unlocking %p\n", page); 761 end_page_writeback(page); 762 763 if (remove_page) 764 generic_error_remove_page(inode->i_mapping, 765 page); 766 767 unlock_page(page); 768 } 769 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 770 inode, osd_data->length, rc >= 0 ? num_pages : 0); 771 772 ceph_release_pages(osd_data->pages, num_pages); 773 } 774 775 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 776 777 osd_data = osd_req_op_extent_osd_data(req, 0); 778 if (osd_data->pages_from_pool) 779 mempool_free(osd_data->pages, 780 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 781 else 782 kfree(osd_data->pages); 783 ceph_osdc_put_request(req); 784 } 785 786 /* 787 * initiate async writeback 788 */ 789 static int ceph_writepages_start(struct address_space *mapping, 790 struct writeback_control *wbc) 791 { 792 struct inode *inode = mapping->host; 793 struct ceph_inode_info *ci = ceph_inode(inode); 794 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 795 struct ceph_vino vino = ceph_vino(inode); 796 pgoff_t index, start_index, end = -1; 797 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 798 struct pagevec pvec; 799 int rc = 0; 800 unsigned int wsize = i_blocksize(inode); 801 struct ceph_osd_request *req = NULL; 802 struct ceph_writeback_ctl ceph_wbc; 803 bool should_loop, range_whole = false; 804 bool done = false; 805 806 dout("writepages_start %p (mode=%s)\n", inode, 807 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 808 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 809 810 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 811 if (ci->i_wrbuffer_ref > 0) { 812 pr_warn_ratelimited( 813 "writepage_start %p %lld forced umount\n", 814 inode, ceph_ino(inode)); 815 } 816 mapping_set_error(mapping, -EIO); 817 return -EIO; /* we're in a forced umount, don't write! */ 818 } 819 if (fsc->mount_options->wsize < wsize) 820 wsize = fsc->mount_options->wsize; 821 822 pagevec_init(&pvec); 823 824 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 825 index = start_index; 826 827 retry: 828 /* find oldest snap context with dirty data */ 829 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 830 if (!snapc) { 831 /* hmm, why does writepages get called when there 832 is no dirty data? */ 833 dout(" no snap context with dirty data?\n"); 834 goto out; 835 } 836 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 837 snapc, snapc->seq, snapc->num_snaps); 838 839 should_loop = false; 840 if (ceph_wbc.head_snapc && snapc != last_snapc) { 841 /* where to start/end? */ 842 if (wbc->range_cyclic) { 843 index = start_index; 844 end = -1; 845 if (index > 0) 846 should_loop = true; 847 dout(" cyclic, start at %lu\n", index); 848 } else { 849 index = wbc->range_start >> PAGE_SHIFT; 850 end = wbc->range_end >> PAGE_SHIFT; 851 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 852 range_whole = true; 853 dout(" not cyclic, %lu to %lu\n", index, end); 854 } 855 } else if (!ceph_wbc.head_snapc) { 856 /* Do not respect wbc->range_{start,end}. Dirty pages 857 * in that range can be associated with newer snapc. 858 * They are not writeable until we write all dirty pages 859 * associated with 'snapc' get written */ 860 if (index > 0) 861 should_loop = true; 862 dout(" non-head snapc, range whole\n"); 863 } 864 865 ceph_put_snap_context(last_snapc); 866 last_snapc = snapc; 867 868 while (!done && index <= end) { 869 int num_ops = 0, op_idx; 870 unsigned i, pvec_pages, max_pages, locked_pages = 0; 871 struct page **pages = NULL, **data_pages; 872 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 873 struct page *page; 874 pgoff_t strip_unit_end = 0; 875 u64 offset = 0, len = 0; 876 877 max_pages = wsize >> PAGE_SHIFT; 878 879 get_more_pages: 880 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, 881 end, PAGECACHE_TAG_DIRTY, 882 max_pages - locked_pages); 883 dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 884 if (!pvec_pages && !locked_pages) 885 break; 886 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 887 page = pvec.pages[i]; 888 dout("? %p idx %lu\n", page, page->index); 889 if (locked_pages == 0) 890 lock_page(page); /* first page */ 891 else if (!trylock_page(page)) 892 break; 893 894 /* only dirty pages, or our accounting breaks */ 895 if (unlikely(!PageDirty(page)) || 896 unlikely(page->mapping != mapping)) { 897 dout("!dirty or !mapping %p\n", page); 898 unlock_page(page); 899 continue; 900 } 901 /* only if matching snap context */ 902 pgsnapc = page_snap_context(page); 903 if (pgsnapc != snapc) { 904 dout("page snapc %p %lld != oldest %p %lld\n", 905 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 906 if (!should_loop && 907 !ceph_wbc.head_snapc && 908 wbc->sync_mode != WB_SYNC_NONE) 909 should_loop = true; 910 unlock_page(page); 911 continue; 912 } 913 if (page_offset(page) >= ceph_wbc.i_size) { 914 dout("%p page eof %llu\n", 915 page, ceph_wbc.i_size); 916 if (ceph_wbc.size_stable || 917 page_offset(page) >= i_size_read(inode)) 918 mapping->a_ops->invalidatepage(page, 919 0, PAGE_SIZE); 920 unlock_page(page); 921 continue; 922 } 923 if (strip_unit_end && (page->index > strip_unit_end)) { 924 dout("end of strip unit %p\n", page); 925 unlock_page(page); 926 break; 927 } 928 if (PageWriteback(page)) { 929 if (wbc->sync_mode == WB_SYNC_NONE) { 930 dout("%p under writeback\n", page); 931 unlock_page(page); 932 continue; 933 } 934 dout("waiting on writeback %p\n", page); 935 wait_on_page_writeback(page); 936 } 937 938 if (!clear_page_dirty_for_io(page)) { 939 dout("%p !clear_page_dirty_for_io\n", page); 940 unlock_page(page); 941 continue; 942 } 943 944 /* 945 * We have something to write. If this is 946 * the first locked page this time through, 947 * calculate max possinle write size and 948 * allocate a page array 949 */ 950 if (locked_pages == 0) { 951 u64 objnum; 952 u64 objoff; 953 u32 xlen; 954 955 /* prepare async write request */ 956 offset = (u64)page_offset(page); 957 ceph_calc_file_object_mapping(&ci->i_layout, 958 offset, wsize, 959 &objnum, &objoff, 960 &xlen); 961 len = xlen; 962 963 num_ops = 1; 964 strip_unit_end = page->index + 965 ((len - 1) >> PAGE_SHIFT); 966 967 BUG_ON(pages); 968 max_pages = calc_pages_for(0, (u64)len); 969 pages = kmalloc_array(max_pages, 970 sizeof(*pages), 971 GFP_NOFS); 972 if (!pages) { 973 pool = fsc->wb_pagevec_pool; 974 pages = mempool_alloc(pool, GFP_NOFS); 975 BUG_ON(!pages); 976 } 977 978 len = 0; 979 } else if (page->index != 980 (offset + len) >> PAGE_SHIFT) { 981 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 982 CEPH_OSD_MAX_OPS)) { 983 redirty_page_for_writepage(wbc, page); 984 unlock_page(page); 985 break; 986 } 987 988 num_ops++; 989 offset = (u64)page_offset(page); 990 len = 0; 991 } 992 993 /* note position of first page in pvec */ 994 dout("%p will write page %p idx %lu\n", 995 inode, page, page->index); 996 997 if (atomic_long_inc_return(&fsc->writeback_count) > 998 CONGESTION_ON_THRESH( 999 fsc->mount_options->congestion_kb)) { 1000 set_bdi_congested(inode_to_bdi(inode), 1001 BLK_RW_ASYNC); 1002 } 1003 1004 1005 pages[locked_pages++] = page; 1006 pvec.pages[i] = NULL; 1007 1008 len += PAGE_SIZE; 1009 } 1010 1011 /* did we get anything? */ 1012 if (!locked_pages) 1013 goto release_pvec_pages; 1014 if (i) { 1015 unsigned j, n = 0; 1016 /* shift unused page to beginning of pvec */ 1017 for (j = 0; j < pvec_pages; j++) { 1018 if (!pvec.pages[j]) 1019 continue; 1020 if (n < j) 1021 pvec.pages[n] = pvec.pages[j]; 1022 n++; 1023 } 1024 pvec.nr = n; 1025 1026 if (pvec_pages && i == pvec_pages && 1027 locked_pages < max_pages) { 1028 dout("reached end pvec, trying for more\n"); 1029 pagevec_release(&pvec); 1030 goto get_more_pages; 1031 } 1032 } 1033 1034 new_request: 1035 offset = page_offset(pages[0]); 1036 len = wsize; 1037 1038 req = ceph_osdc_new_request(&fsc->client->osdc, 1039 &ci->i_layout, vino, 1040 offset, &len, 0, num_ops, 1041 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1042 snapc, ceph_wbc.truncate_seq, 1043 ceph_wbc.truncate_size, false); 1044 if (IS_ERR(req)) { 1045 req = ceph_osdc_new_request(&fsc->client->osdc, 1046 &ci->i_layout, vino, 1047 offset, &len, 0, 1048 min(num_ops, 1049 CEPH_OSD_SLAB_OPS), 1050 CEPH_OSD_OP_WRITE, 1051 CEPH_OSD_FLAG_WRITE, 1052 snapc, ceph_wbc.truncate_seq, 1053 ceph_wbc.truncate_size, true); 1054 BUG_ON(IS_ERR(req)); 1055 } 1056 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 1057 PAGE_SIZE - offset); 1058 1059 req->r_callback = writepages_finish; 1060 req->r_inode = inode; 1061 1062 /* Format the osd request message and submit the write */ 1063 len = 0; 1064 data_pages = pages; 1065 op_idx = 0; 1066 for (i = 0; i < locked_pages; i++) { 1067 u64 cur_offset = page_offset(pages[i]); 1068 if (offset + len != cur_offset) { 1069 if (op_idx + 1 == req->r_num_ops) 1070 break; 1071 osd_req_op_extent_dup_last(req, op_idx, 1072 cur_offset - offset); 1073 dout("writepages got pages at %llu~%llu\n", 1074 offset, len); 1075 osd_req_op_extent_osd_data_pages(req, op_idx, 1076 data_pages, len, 0, 1077 !!pool, false); 1078 osd_req_op_extent_update(req, op_idx, len); 1079 1080 len = 0; 1081 offset = cur_offset; 1082 data_pages = pages + i; 1083 op_idx++; 1084 } 1085 1086 set_page_writeback(pages[i]); 1087 len += PAGE_SIZE; 1088 } 1089 1090 if (ceph_wbc.size_stable) { 1091 len = min(len, ceph_wbc.i_size - offset); 1092 } else if (i == locked_pages) { 1093 /* writepages_finish() clears writeback pages 1094 * according to the data length, so make sure 1095 * data length covers all locked pages */ 1096 u64 min_len = len + 1 - PAGE_SIZE; 1097 len = get_writepages_data_length(inode, pages[i - 1], 1098 offset); 1099 len = max(len, min_len); 1100 } 1101 dout("writepages got pages at %llu~%llu\n", offset, len); 1102 1103 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1104 0, !!pool, false); 1105 osd_req_op_extent_update(req, op_idx, len); 1106 1107 BUG_ON(op_idx + 1 != req->r_num_ops); 1108 1109 pool = NULL; 1110 if (i < locked_pages) { 1111 BUG_ON(num_ops <= req->r_num_ops); 1112 num_ops -= req->r_num_ops; 1113 locked_pages -= i; 1114 1115 /* allocate new pages array for next request */ 1116 data_pages = pages; 1117 pages = kmalloc_array(locked_pages, sizeof(*pages), 1118 GFP_NOFS); 1119 if (!pages) { 1120 pool = fsc->wb_pagevec_pool; 1121 pages = mempool_alloc(pool, GFP_NOFS); 1122 BUG_ON(!pages); 1123 } 1124 memcpy(pages, data_pages + i, 1125 locked_pages * sizeof(*pages)); 1126 memset(data_pages + i, 0, 1127 locked_pages * sizeof(*pages)); 1128 } else { 1129 BUG_ON(num_ops != req->r_num_ops); 1130 index = pages[i - 1]->index + 1; 1131 /* request message now owns the pages array */ 1132 pages = NULL; 1133 } 1134 1135 req->r_mtime = inode->i_mtime; 1136 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1137 BUG_ON(rc); 1138 req = NULL; 1139 1140 wbc->nr_to_write -= i; 1141 if (pages) 1142 goto new_request; 1143 1144 /* 1145 * We stop writing back only if we are not doing 1146 * integrity sync. In case of integrity sync we have to 1147 * keep going until we have written all the pages 1148 * we tagged for writeback prior to entering this loop. 1149 */ 1150 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1151 done = true; 1152 1153 release_pvec_pages: 1154 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1155 pvec.nr ? pvec.pages[0] : NULL); 1156 pagevec_release(&pvec); 1157 } 1158 1159 if (should_loop && !done) { 1160 /* more to do; loop back to beginning of file */ 1161 dout("writepages looping back to beginning of file\n"); 1162 end = start_index - 1; /* OK even when start_index == 0 */ 1163 1164 /* to write dirty pages associated with next snapc, 1165 * we need to wait until current writes complete */ 1166 if (wbc->sync_mode != WB_SYNC_NONE && 1167 start_index == 0 && /* all dirty pages were checked */ 1168 !ceph_wbc.head_snapc) { 1169 struct page *page; 1170 unsigned i, nr; 1171 index = 0; 1172 while ((index <= end) && 1173 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1174 PAGECACHE_TAG_WRITEBACK))) { 1175 for (i = 0; i < nr; i++) { 1176 page = pvec.pages[i]; 1177 if (page_snap_context(page) != snapc) 1178 continue; 1179 wait_on_page_writeback(page); 1180 } 1181 pagevec_release(&pvec); 1182 cond_resched(); 1183 } 1184 } 1185 1186 start_index = 0; 1187 index = 0; 1188 goto retry; 1189 } 1190 1191 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1192 mapping->writeback_index = index; 1193 1194 out: 1195 ceph_osdc_put_request(req); 1196 ceph_put_snap_context(last_snapc); 1197 dout("writepages dend - startone, rc = %d\n", rc); 1198 return rc; 1199 } 1200 1201 1202 1203 /* 1204 * See if a given @snapc is either writeable, or already written. 1205 */ 1206 static int context_is_writeable_or_written(struct inode *inode, 1207 struct ceph_snap_context *snapc) 1208 { 1209 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1210 int ret = !oldest || snapc->seq <= oldest->seq; 1211 1212 ceph_put_snap_context(oldest); 1213 return ret; 1214 } 1215 1216 /* 1217 * We are only allowed to write into/dirty the page if the page is 1218 * clean, or already dirty within the same snap context. 1219 * 1220 * called with page locked. 1221 * return success with page locked, 1222 * or any failure (incl -EAGAIN) with page unlocked. 1223 */ 1224 static int ceph_update_writeable_page(struct file *file, 1225 loff_t pos, unsigned len, 1226 struct page *page) 1227 { 1228 struct inode *inode = file_inode(file); 1229 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1230 struct ceph_inode_info *ci = ceph_inode(inode); 1231 loff_t page_off = pos & PAGE_MASK; 1232 int pos_in_page = pos & ~PAGE_MASK; 1233 int end_in_page = pos_in_page + len; 1234 loff_t i_size; 1235 int r; 1236 struct ceph_snap_context *snapc, *oldest; 1237 1238 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1239 dout(" page %p forced umount\n", page); 1240 unlock_page(page); 1241 return -EIO; 1242 } 1243 1244 retry_locked: 1245 /* writepages currently holds page lock, but if we change that later, */ 1246 wait_on_page_writeback(page); 1247 1248 snapc = page_snap_context(page); 1249 if (snapc && snapc != ci->i_head_snapc) { 1250 /* 1251 * this page is already dirty in another (older) snap 1252 * context! is it writeable now? 1253 */ 1254 oldest = get_oldest_context(inode, NULL, NULL); 1255 if (snapc->seq > oldest->seq) { 1256 ceph_put_snap_context(oldest); 1257 dout(" page %p snapc %p not current or oldest\n", 1258 page, snapc); 1259 /* 1260 * queue for writeback, and wait for snapc to 1261 * be writeable or written 1262 */ 1263 snapc = ceph_get_snap_context(snapc); 1264 unlock_page(page); 1265 ceph_queue_writeback(inode); 1266 r = wait_event_killable(ci->i_cap_wq, 1267 context_is_writeable_or_written(inode, snapc)); 1268 ceph_put_snap_context(snapc); 1269 if (r == -ERESTARTSYS) 1270 return r; 1271 return -EAGAIN; 1272 } 1273 ceph_put_snap_context(oldest); 1274 1275 /* yay, writeable, do it now (without dropping page lock) */ 1276 dout(" page %p snapc %p not current, but oldest\n", 1277 page, snapc); 1278 if (!clear_page_dirty_for_io(page)) 1279 goto retry_locked; 1280 r = writepage_nounlock(page, NULL); 1281 if (r < 0) 1282 goto fail_unlock; 1283 goto retry_locked; 1284 } 1285 1286 if (PageUptodate(page)) { 1287 dout(" page %p already uptodate\n", page); 1288 return 0; 1289 } 1290 1291 /* full page? */ 1292 if (pos_in_page == 0 && len == PAGE_SIZE) 1293 return 0; 1294 1295 /* past end of file? */ 1296 i_size = i_size_read(inode); 1297 1298 if (page_off >= i_size || 1299 (pos_in_page == 0 && (pos+len) >= i_size && 1300 end_in_page - pos_in_page != PAGE_SIZE)) { 1301 dout(" zeroing %p 0 - %d and %d - %d\n", 1302 page, pos_in_page, end_in_page, (int)PAGE_SIZE); 1303 zero_user_segments(page, 1304 0, pos_in_page, 1305 end_in_page, PAGE_SIZE); 1306 return 0; 1307 } 1308 1309 /* we need to read it. */ 1310 r = ceph_do_readpage(file, page); 1311 if (r < 0) { 1312 if (r == -EINPROGRESS) 1313 return -EAGAIN; 1314 goto fail_unlock; 1315 } 1316 goto retry_locked; 1317 fail_unlock: 1318 unlock_page(page); 1319 return r; 1320 } 1321 1322 /* 1323 * We are only allowed to write into/dirty the page if the page is 1324 * clean, or already dirty within the same snap context. 1325 */ 1326 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1327 loff_t pos, unsigned len, unsigned flags, 1328 struct page **pagep, void **fsdata) 1329 { 1330 struct inode *inode = file_inode(file); 1331 struct page *page; 1332 pgoff_t index = pos >> PAGE_SHIFT; 1333 int r; 1334 1335 do { 1336 /* get a page */ 1337 page = grab_cache_page_write_begin(mapping, index, 0); 1338 if (!page) 1339 return -ENOMEM; 1340 1341 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1342 inode, page, (int)pos, (int)len); 1343 1344 r = ceph_update_writeable_page(file, pos, len, page); 1345 if (r < 0) 1346 put_page(page); 1347 else 1348 *pagep = page; 1349 } while (r == -EAGAIN); 1350 1351 return r; 1352 } 1353 1354 /* 1355 * we don't do anything in here that simple_write_end doesn't do 1356 * except adjust dirty page accounting 1357 */ 1358 static int ceph_write_end(struct file *file, struct address_space *mapping, 1359 loff_t pos, unsigned len, unsigned copied, 1360 struct page *page, void *fsdata) 1361 { 1362 struct inode *inode = file_inode(file); 1363 bool check_cap = false; 1364 1365 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1366 inode, page, (int)pos, (int)copied, (int)len); 1367 1368 /* zero the stale part of the page if we did a short copy */ 1369 if (!PageUptodate(page)) { 1370 if (copied < len) { 1371 copied = 0; 1372 goto out; 1373 } 1374 SetPageUptodate(page); 1375 } 1376 1377 /* did file size increase? */ 1378 if (pos+copied > i_size_read(inode)) 1379 check_cap = ceph_inode_set_size(inode, pos+copied); 1380 1381 set_page_dirty(page); 1382 1383 out: 1384 unlock_page(page); 1385 put_page(page); 1386 1387 if (check_cap) 1388 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1389 1390 return copied; 1391 } 1392 1393 /* 1394 * we set .direct_IO to indicate direct io is supported, but since we 1395 * intercept O_DIRECT reads and writes early, this function should 1396 * never get called. 1397 */ 1398 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) 1399 { 1400 WARN_ON(1); 1401 return -EINVAL; 1402 } 1403 1404 const struct address_space_operations ceph_aops = { 1405 .readpage = ceph_readpage, 1406 .readpages = ceph_readpages, 1407 .writepage = ceph_writepage, 1408 .writepages = ceph_writepages_start, 1409 .write_begin = ceph_write_begin, 1410 .write_end = ceph_write_end, 1411 .set_page_dirty = ceph_set_page_dirty, 1412 .invalidatepage = ceph_invalidatepage, 1413 .releasepage = ceph_releasepage, 1414 .direct_IO = ceph_direct_io, 1415 }; 1416 1417 static void ceph_block_sigs(sigset_t *oldset) 1418 { 1419 sigset_t mask; 1420 siginitsetinv(&mask, sigmask(SIGKILL)); 1421 sigprocmask(SIG_BLOCK, &mask, oldset); 1422 } 1423 1424 static void ceph_restore_sigs(sigset_t *oldset) 1425 { 1426 sigprocmask(SIG_SETMASK, oldset, NULL); 1427 } 1428 1429 /* 1430 * vm ops 1431 */ 1432 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1433 { 1434 struct vm_area_struct *vma = vmf->vma; 1435 struct inode *inode = file_inode(vma->vm_file); 1436 struct ceph_inode_info *ci = ceph_inode(inode); 1437 struct ceph_file_info *fi = vma->vm_file->private_data; 1438 struct page *pinned_page = NULL; 1439 loff_t off = vmf->pgoff << PAGE_SHIFT; 1440 int want, got, err; 1441 sigset_t oldset; 1442 vm_fault_t ret = VM_FAULT_SIGBUS; 1443 1444 ceph_block_sigs(&oldset); 1445 1446 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1447 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); 1448 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1449 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1450 else 1451 want = CEPH_CAP_FILE_CACHE; 1452 1453 got = 0; 1454 err = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1455 if (err < 0) 1456 goto out_restore; 1457 1458 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1459 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); 1460 1461 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1462 ci->i_inline_version == CEPH_INLINE_NONE) { 1463 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1464 ceph_add_rw_context(fi, &rw_ctx); 1465 ret = filemap_fault(vmf); 1466 ceph_del_rw_context(fi, &rw_ctx); 1467 dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n", 1468 inode, off, (size_t)PAGE_SIZE, 1469 ceph_cap_string(got), ret); 1470 } else 1471 err = -EAGAIN; 1472 1473 if (pinned_page) 1474 put_page(pinned_page); 1475 ceph_put_cap_refs(ci, got); 1476 1477 if (err != -EAGAIN) 1478 goto out_restore; 1479 1480 /* read inline data */ 1481 if (off >= PAGE_SIZE) { 1482 /* does not support inline data > PAGE_SIZE */ 1483 ret = VM_FAULT_SIGBUS; 1484 } else { 1485 struct address_space *mapping = inode->i_mapping; 1486 struct page *page = find_or_create_page(mapping, 0, 1487 mapping_gfp_constraint(mapping, 1488 ~__GFP_FS)); 1489 if (!page) { 1490 ret = VM_FAULT_OOM; 1491 goto out_inline; 1492 } 1493 err = __ceph_do_getattr(inode, page, 1494 CEPH_STAT_CAP_INLINE_DATA, true); 1495 if (err < 0 || off >= i_size_read(inode)) { 1496 unlock_page(page); 1497 put_page(page); 1498 ret = vmf_error(err); 1499 goto out_inline; 1500 } 1501 if (err < PAGE_SIZE) 1502 zero_user_segment(page, err, PAGE_SIZE); 1503 else 1504 flush_dcache_page(page); 1505 SetPageUptodate(page); 1506 vmf->page = page; 1507 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1508 out_inline: 1509 dout("filemap_fault %p %llu~%zd read inline data ret %x\n", 1510 inode, off, (size_t)PAGE_SIZE, ret); 1511 } 1512 out_restore: 1513 ceph_restore_sigs(&oldset); 1514 if (err < 0) 1515 ret = vmf_error(err); 1516 1517 return ret; 1518 } 1519 1520 /* 1521 * Reuse write_begin here for simplicity. 1522 */ 1523 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1524 { 1525 struct vm_area_struct *vma = vmf->vma; 1526 struct inode *inode = file_inode(vma->vm_file); 1527 struct ceph_inode_info *ci = ceph_inode(inode); 1528 struct ceph_file_info *fi = vma->vm_file->private_data; 1529 struct ceph_cap_flush *prealloc_cf; 1530 struct page *page = vmf->page; 1531 loff_t off = page_offset(page); 1532 loff_t size = i_size_read(inode); 1533 size_t len; 1534 int want, got, err; 1535 sigset_t oldset; 1536 vm_fault_t ret = VM_FAULT_SIGBUS; 1537 1538 prealloc_cf = ceph_alloc_cap_flush(); 1539 if (!prealloc_cf) 1540 return VM_FAULT_OOM; 1541 1542 ceph_block_sigs(&oldset); 1543 1544 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1545 struct page *locked_page = NULL; 1546 if (off == 0) { 1547 lock_page(page); 1548 locked_page = page; 1549 } 1550 err = ceph_uninline_data(vma->vm_file, locked_page); 1551 if (locked_page) 1552 unlock_page(locked_page); 1553 if (err < 0) 1554 goto out_free; 1555 } 1556 1557 if (off + PAGE_SIZE <= size) 1558 len = PAGE_SIZE; 1559 else 1560 len = size & ~PAGE_MASK; 1561 1562 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1563 inode, ceph_vinop(inode), off, len, size); 1564 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1565 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1566 else 1567 want = CEPH_CAP_FILE_BUFFER; 1568 1569 got = 0; 1570 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len, 1571 &got, NULL); 1572 if (err < 0) 1573 goto out_free; 1574 1575 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1576 inode, off, len, ceph_cap_string(got)); 1577 1578 /* Update time before taking page lock */ 1579 file_update_time(vma->vm_file); 1580 inode_inc_iversion_raw(inode); 1581 1582 do { 1583 lock_page(page); 1584 1585 if ((off > size) || (page->mapping != inode->i_mapping)) { 1586 unlock_page(page); 1587 ret = VM_FAULT_NOPAGE; 1588 break; 1589 } 1590 1591 err = ceph_update_writeable_page(vma->vm_file, off, len, page); 1592 if (err >= 0) { 1593 /* success. we'll keep the page locked. */ 1594 set_page_dirty(page); 1595 ret = VM_FAULT_LOCKED; 1596 } 1597 } while (err == -EAGAIN); 1598 1599 if (ret == VM_FAULT_LOCKED || 1600 ci->i_inline_version != CEPH_INLINE_NONE) { 1601 int dirty; 1602 spin_lock(&ci->i_ceph_lock); 1603 ci->i_inline_version = CEPH_INLINE_NONE; 1604 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1605 &prealloc_cf); 1606 spin_unlock(&ci->i_ceph_lock); 1607 if (dirty) 1608 __mark_inode_dirty(inode, dirty); 1609 } 1610 1611 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1612 inode, off, len, ceph_cap_string(got), ret); 1613 ceph_put_cap_refs(ci, got); 1614 out_free: 1615 ceph_restore_sigs(&oldset); 1616 ceph_free_cap_flush(prealloc_cf); 1617 if (err < 0) 1618 ret = vmf_error(err); 1619 return ret; 1620 } 1621 1622 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1623 char *data, size_t len) 1624 { 1625 struct address_space *mapping = inode->i_mapping; 1626 struct page *page; 1627 1628 if (locked_page) { 1629 page = locked_page; 1630 } else { 1631 if (i_size_read(inode) == 0) 1632 return; 1633 page = find_or_create_page(mapping, 0, 1634 mapping_gfp_constraint(mapping, 1635 ~__GFP_FS)); 1636 if (!page) 1637 return; 1638 if (PageUptodate(page)) { 1639 unlock_page(page); 1640 put_page(page); 1641 return; 1642 } 1643 } 1644 1645 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1646 inode, ceph_vinop(inode), len, locked_page); 1647 1648 if (len > 0) { 1649 void *kaddr = kmap_atomic(page); 1650 memcpy(kaddr, data, len); 1651 kunmap_atomic(kaddr); 1652 } 1653 1654 if (page != locked_page) { 1655 if (len < PAGE_SIZE) 1656 zero_user_segment(page, len, PAGE_SIZE); 1657 else 1658 flush_dcache_page(page); 1659 1660 SetPageUptodate(page); 1661 unlock_page(page); 1662 put_page(page); 1663 } 1664 } 1665 1666 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1667 { 1668 struct inode *inode = file_inode(filp); 1669 struct ceph_inode_info *ci = ceph_inode(inode); 1670 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1671 struct ceph_osd_request *req; 1672 struct page *page = NULL; 1673 u64 len, inline_version; 1674 int err = 0; 1675 bool from_pagecache = false; 1676 1677 spin_lock(&ci->i_ceph_lock); 1678 inline_version = ci->i_inline_version; 1679 spin_unlock(&ci->i_ceph_lock); 1680 1681 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1682 inode, ceph_vinop(inode), inline_version); 1683 1684 if (inline_version == 1 || /* initial version, no data */ 1685 inline_version == CEPH_INLINE_NONE) 1686 goto out; 1687 1688 if (locked_page) { 1689 page = locked_page; 1690 WARN_ON(!PageUptodate(page)); 1691 } else if (ceph_caps_issued(ci) & 1692 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1693 page = find_get_page(inode->i_mapping, 0); 1694 if (page) { 1695 if (PageUptodate(page)) { 1696 from_pagecache = true; 1697 lock_page(page); 1698 } else { 1699 put_page(page); 1700 page = NULL; 1701 } 1702 } 1703 } 1704 1705 if (page) { 1706 len = i_size_read(inode); 1707 if (len > PAGE_SIZE) 1708 len = PAGE_SIZE; 1709 } else { 1710 page = __page_cache_alloc(GFP_NOFS); 1711 if (!page) { 1712 err = -ENOMEM; 1713 goto out; 1714 } 1715 err = __ceph_do_getattr(inode, page, 1716 CEPH_STAT_CAP_INLINE_DATA, true); 1717 if (err < 0) { 1718 /* no inline data */ 1719 if (err == -ENODATA) 1720 err = 0; 1721 goto out; 1722 } 1723 len = err; 1724 } 1725 1726 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1727 ceph_vino(inode), 0, &len, 0, 1, 1728 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1729 NULL, 0, 0, false); 1730 if (IS_ERR(req)) { 1731 err = PTR_ERR(req); 1732 goto out; 1733 } 1734 1735 req->r_mtime = inode->i_mtime; 1736 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1737 if (!err) 1738 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1739 ceph_osdc_put_request(req); 1740 if (err < 0) 1741 goto out; 1742 1743 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1744 ceph_vino(inode), 0, &len, 1, 3, 1745 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1746 NULL, ci->i_truncate_seq, 1747 ci->i_truncate_size, false); 1748 if (IS_ERR(req)) { 1749 err = PTR_ERR(req); 1750 goto out; 1751 } 1752 1753 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1754 1755 { 1756 __le64 xattr_buf = cpu_to_le64(inline_version); 1757 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1758 "inline_version", &xattr_buf, 1759 sizeof(xattr_buf), 1760 CEPH_OSD_CMPXATTR_OP_GT, 1761 CEPH_OSD_CMPXATTR_MODE_U64); 1762 if (err) 1763 goto out_put; 1764 } 1765 1766 { 1767 char xattr_buf[32]; 1768 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1769 "%llu", inline_version); 1770 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1771 "inline_version", 1772 xattr_buf, xattr_len, 0, 0); 1773 if (err) 1774 goto out_put; 1775 } 1776 1777 req->r_mtime = inode->i_mtime; 1778 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1779 if (!err) 1780 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1781 out_put: 1782 ceph_osdc_put_request(req); 1783 if (err == -ECANCELED) 1784 err = 0; 1785 out: 1786 if (page && page != locked_page) { 1787 if (from_pagecache) { 1788 unlock_page(page); 1789 put_page(page); 1790 } else 1791 __free_pages(page, 0); 1792 } 1793 1794 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1795 inode, ceph_vinop(inode), inline_version, err); 1796 return err; 1797 } 1798 1799 static const struct vm_operations_struct ceph_vmops = { 1800 .fault = ceph_filemap_fault, 1801 .page_mkwrite = ceph_page_mkwrite, 1802 }; 1803 1804 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1805 { 1806 struct address_space *mapping = file->f_mapping; 1807 1808 if (!mapping->a_ops->readpage) 1809 return -ENOEXEC; 1810 file_accessed(file); 1811 vma->vm_ops = &ceph_vmops; 1812 return 0; 1813 } 1814 1815 enum { 1816 POOL_READ = 1, 1817 POOL_WRITE = 2, 1818 }; 1819 1820 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1821 s64 pool, struct ceph_string *pool_ns) 1822 { 1823 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1824 struct ceph_mds_client *mdsc = fsc->mdsc; 1825 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1826 struct rb_node **p, *parent; 1827 struct ceph_pool_perm *perm; 1828 struct page **pages; 1829 size_t pool_ns_len; 1830 int err = 0, err2 = 0, have = 0; 1831 1832 down_read(&mdsc->pool_perm_rwsem); 1833 p = &mdsc->pool_perm_tree.rb_node; 1834 while (*p) { 1835 perm = rb_entry(*p, struct ceph_pool_perm, node); 1836 if (pool < perm->pool) 1837 p = &(*p)->rb_left; 1838 else if (pool > perm->pool) 1839 p = &(*p)->rb_right; 1840 else { 1841 int ret = ceph_compare_string(pool_ns, 1842 perm->pool_ns, 1843 perm->pool_ns_len); 1844 if (ret < 0) 1845 p = &(*p)->rb_left; 1846 else if (ret > 0) 1847 p = &(*p)->rb_right; 1848 else { 1849 have = perm->perm; 1850 break; 1851 } 1852 } 1853 } 1854 up_read(&mdsc->pool_perm_rwsem); 1855 if (*p) 1856 goto out; 1857 1858 if (pool_ns) 1859 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1860 pool, (int)pool_ns->len, pool_ns->str); 1861 else 1862 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1863 1864 down_write(&mdsc->pool_perm_rwsem); 1865 p = &mdsc->pool_perm_tree.rb_node; 1866 parent = NULL; 1867 while (*p) { 1868 parent = *p; 1869 perm = rb_entry(parent, struct ceph_pool_perm, node); 1870 if (pool < perm->pool) 1871 p = &(*p)->rb_left; 1872 else if (pool > perm->pool) 1873 p = &(*p)->rb_right; 1874 else { 1875 int ret = ceph_compare_string(pool_ns, 1876 perm->pool_ns, 1877 perm->pool_ns_len); 1878 if (ret < 0) 1879 p = &(*p)->rb_left; 1880 else if (ret > 0) 1881 p = &(*p)->rb_right; 1882 else { 1883 have = perm->perm; 1884 break; 1885 } 1886 } 1887 } 1888 if (*p) { 1889 up_write(&mdsc->pool_perm_rwsem); 1890 goto out; 1891 } 1892 1893 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1894 1, false, GFP_NOFS); 1895 if (!rd_req) { 1896 err = -ENOMEM; 1897 goto out_unlock; 1898 } 1899 1900 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1901 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1902 rd_req->r_base_oloc.pool = pool; 1903 if (pool_ns) 1904 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1905 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1906 1907 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1908 if (err) 1909 goto out_unlock; 1910 1911 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1912 1, false, GFP_NOFS); 1913 if (!wr_req) { 1914 err = -ENOMEM; 1915 goto out_unlock; 1916 } 1917 1918 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1919 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1920 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1921 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1922 1923 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1924 if (err) 1925 goto out_unlock; 1926 1927 /* one page should be large enough for STAT data */ 1928 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1929 if (IS_ERR(pages)) { 1930 err = PTR_ERR(pages); 1931 goto out_unlock; 1932 } 1933 1934 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1935 0, false, true); 1936 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1937 1938 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1939 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1940 1941 if (!err) 1942 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1943 if (!err2) 1944 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1945 1946 if (err >= 0 || err == -ENOENT) 1947 have |= POOL_READ; 1948 else if (err != -EPERM) 1949 goto out_unlock; 1950 1951 if (err2 == 0 || err2 == -EEXIST) 1952 have |= POOL_WRITE; 1953 else if (err2 != -EPERM) { 1954 err = err2; 1955 goto out_unlock; 1956 } 1957 1958 pool_ns_len = pool_ns ? pool_ns->len : 0; 1959 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1960 if (!perm) { 1961 err = -ENOMEM; 1962 goto out_unlock; 1963 } 1964 1965 perm->pool = pool; 1966 perm->perm = have; 1967 perm->pool_ns_len = pool_ns_len; 1968 if (pool_ns_len > 0) 1969 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1970 perm->pool_ns[pool_ns_len] = 0; 1971 1972 rb_link_node(&perm->node, parent, p); 1973 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1974 err = 0; 1975 out_unlock: 1976 up_write(&mdsc->pool_perm_rwsem); 1977 1978 ceph_osdc_put_request(rd_req); 1979 ceph_osdc_put_request(wr_req); 1980 out: 1981 if (!err) 1982 err = have; 1983 if (pool_ns) 1984 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1985 pool, (int)pool_ns->len, pool_ns->str, err); 1986 else 1987 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1988 return err; 1989 } 1990 1991 int ceph_pool_perm_check(struct ceph_inode_info *ci, int need) 1992 { 1993 s64 pool; 1994 struct ceph_string *pool_ns; 1995 int ret, flags; 1996 1997 if (ci->i_vino.snap != CEPH_NOSNAP) { 1998 /* 1999 * Pool permission check needs to write to the first object. 2000 * But for snapshot, head of the first object may have alread 2001 * been deleted. Skip check to avoid creating orphan object. 2002 */ 2003 return 0; 2004 } 2005 2006 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), 2007 NOPOOLPERM)) 2008 return 0; 2009 2010 spin_lock(&ci->i_ceph_lock); 2011 flags = ci->i_ceph_flags; 2012 pool = ci->i_layout.pool_id; 2013 spin_unlock(&ci->i_ceph_lock); 2014 check: 2015 if (flags & CEPH_I_POOL_PERM) { 2016 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2017 dout("ceph_pool_perm_check pool %lld no read perm\n", 2018 pool); 2019 return -EPERM; 2020 } 2021 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2022 dout("ceph_pool_perm_check pool %lld no write perm\n", 2023 pool); 2024 return -EPERM; 2025 } 2026 return 0; 2027 } 2028 2029 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2030 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2031 ceph_put_string(pool_ns); 2032 if (ret < 0) 2033 return ret; 2034 2035 flags = CEPH_I_POOL_PERM; 2036 if (ret & POOL_READ) 2037 flags |= CEPH_I_POOL_RD; 2038 if (ret & POOL_WRITE) 2039 flags |= CEPH_I_POOL_WR; 2040 2041 spin_lock(&ci->i_ceph_lock); 2042 if (pool == ci->i_layout.pool_id && 2043 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2044 ci->i_ceph_flags |= flags; 2045 } else { 2046 pool = ci->i_layout.pool_id; 2047 flags = ci->i_ceph_flags; 2048 } 2049 spin_unlock(&ci->i_ceph_lock); 2050 goto check; 2051 } 2052 2053 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2054 { 2055 struct ceph_pool_perm *perm; 2056 struct rb_node *n; 2057 2058 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2059 n = rb_first(&mdsc->pool_perm_tree); 2060 perm = rb_entry(n, struct ceph_pool_perm, node); 2061 rb_erase(n, &mdsc->pool_perm_tree); 2062 kfree(perm); 2063 } 2064 } 2065