1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> /* generic_writepages */ 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 17 #include "super.h" 18 #include "mds_client.h" 19 #include "cache.h" 20 #include "metric.h" 21 #include <linux/ceph/osd_client.h> 22 #include <linux/ceph/striper.h> 23 24 /* 25 * Ceph address space ops. 26 * 27 * There are a few funny things going on here. 28 * 29 * The page->private field is used to reference a struct 30 * ceph_snap_context for _every_ dirty page. This indicates which 31 * snapshot the page was logically dirtied in, and thus which snap 32 * context needs to be associated with the osd write during writeback. 33 * 34 * Similarly, struct ceph_inode_info maintains a set of counters to 35 * count dirty pages on the inode. In the absence of snapshots, 36 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 37 * 38 * When a snapshot is taken (that is, when the client receives 39 * notification that a snapshot was taken), each inode with caps and 40 * with dirty pages (dirty pages implies there is a cap) gets a new 41 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 42 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 43 * moved to capsnap->dirty. (Unless a sync write is currently in 44 * progress. In that case, the capsnap is said to be "pending", new 45 * writes cannot start, and the capsnap isn't "finalized" until the 46 * write completes (or fails) and a final size/mtime for the inode for 47 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 48 * 49 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 50 * we look for the first capsnap in i_cap_snaps and write out pages in 51 * that snap context _only_. Then we move on to the next capsnap, 52 * eventually reaching the "live" or "head" context (i.e., pages that 53 * are not yet snapped) and are writing the most recently dirtied 54 * pages. 55 * 56 * Invalidate and so forth must take care to ensure the dirty page 57 * accounting is preserved. 58 */ 59 60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 61 #define CONGESTION_OFF_THRESH(congestion_kb) \ 62 (CONGESTION_ON_THRESH(congestion_kb) - \ 63 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 64 65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 66 struct page *page, void **_fsdata); 67 68 static inline struct ceph_snap_context *page_snap_context(struct page *page) 69 { 70 if (PagePrivate(page)) 71 return (void *)page->private; 72 return NULL; 73 } 74 75 /* 76 * Dirty a page. Optimistically adjust accounting, on the assumption 77 * that we won't race with invalidate. If we do, readjust. 78 */ 79 static int ceph_set_page_dirty(struct page *page) 80 { 81 struct address_space *mapping = page->mapping; 82 struct inode *inode; 83 struct ceph_inode_info *ci; 84 struct ceph_snap_context *snapc; 85 int ret; 86 87 if (unlikely(!mapping)) 88 return !TestSetPageDirty(page); 89 90 if (PageDirty(page)) { 91 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 92 mapping->host, page, page->index); 93 BUG_ON(!PagePrivate(page)); 94 return 0; 95 } 96 97 inode = mapping->host; 98 ci = ceph_inode(inode); 99 100 /* dirty the head */ 101 spin_lock(&ci->i_ceph_lock); 102 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 103 if (__ceph_have_pending_cap_snap(ci)) { 104 struct ceph_cap_snap *capsnap = 105 list_last_entry(&ci->i_cap_snaps, 106 struct ceph_cap_snap, 107 ci_item); 108 snapc = ceph_get_snap_context(capsnap->context); 109 capsnap->dirty_pages++; 110 } else { 111 BUG_ON(!ci->i_head_snapc); 112 snapc = ceph_get_snap_context(ci->i_head_snapc); 113 ++ci->i_wrbuffer_ref_head; 114 } 115 if (ci->i_wrbuffer_ref == 0) 116 ihold(inode); 117 ++ci->i_wrbuffer_ref; 118 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 119 "snapc %p seq %lld (%d snaps)\n", 120 mapping->host, page, page->index, 121 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 122 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 123 snapc, snapc->seq, snapc->num_snaps); 124 spin_unlock(&ci->i_ceph_lock); 125 126 /* 127 * Reference snap context in page->private. Also set 128 * PagePrivate so that we get invalidatepage callback. 129 */ 130 BUG_ON(PagePrivate(page)); 131 attach_page_private(page, snapc); 132 133 ret = __set_page_dirty_nobuffers(page); 134 WARN_ON(!PageLocked(page)); 135 WARN_ON(!page->mapping); 136 137 return ret; 138 } 139 140 /* 141 * If we are truncating the full page (i.e. offset == 0), adjust the 142 * dirty page counters appropriately. Only called if there is private 143 * data on the page. 144 */ 145 static void ceph_invalidatepage(struct page *page, unsigned int offset, 146 unsigned int length) 147 { 148 struct inode *inode; 149 struct ceph_inode_info *ci; 150 struct ceph_snap_context *snapc; 151 152 wait_on_page_fscache(page); 153 154 inode = page->mapping->host; 155 ci = ceph_inode(inode); 156 157 if (offset != 0 || length != thp_size(page)) { 158 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 159 inode, page, page->index, offset, length); 160 return; 161 } 162 163 WARN_ON(!PageLocked(page)); 164 if (!PagePrivate(page)) 165 return; 166 167 dout("%p invalidatepage %p idx %lu full dirty page\n", 168 inode, page, page->index); 169 170 snapc = detach_page_private(page); 171 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 172 ceph_put_snap_context(snapc); 173 } 174 175 static int ceph_releasepage(struct page *page, gfp_t gfp) 176 { 177 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 178 page, page->index, PageDirty(page) ? "" : "not "); 179 180 if (PageFsCache(page)) { 181 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) 182 return 0; 183 wait_on_page_fscache(page); 184 } 185 return !PagePrivate(page); 186 } 187 188 static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) 189 { 190 struct inode *inode = rreq->mapping->host; 191 struct ceph_inode_info *ci = ceph_inode(inode); 192 struct ceph_file_layout *lo = &ci->i_layout; 193 u32 blockoff; 194 u64 blockno; 195 196 /* Expand the start downward */ 197 blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 198 rreq->start = blockno * lo->stripe_unit; 199 rreq->len += blockoff; 200 201 /* Now, round up the length to the next block */ 202 rreq->len = roundup(rreq->len, lo->stripe_unit); 203 } 204 205 static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) 206 { 207 struct inode *inode = subreq->rreq->mapping->host; 208 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 209 struct ceph_inode_info *ci = ceph_inode(inode); 210 u64 objno, objoff; 211 u32 xlen; 212 213 /* Truncate the extent at the end of the current block */ 214 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 215 &objno, &objoff, &xlen); 216 subreq->len = min(xlen, fsc->mount_options->rsize); 217 return true; 218 } 219 220 static void finish_netfs_read(struct ceph_osd_request *req) 221 { 222 struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 223 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 224 struct netfs_read_subrequest *subreq = req->r_priv; 225 int num_pages; 226 int err = req->r_result; 227 228 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 229 req->r_end_latency, err); 230 231 dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 232 subreq->len, i_size_read(req->r_inode)); 233 234 /* no object means success but no data */ 235 if (err == -ENOENT) 236 err = 0; 237 else if (err == -EBLOCKLISTED) 238 fsc->blocklisted = true; 239 240 if (err >= 0 && err < subreq->len) 241 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 242 243 netfs_subreq_terminated(subreq, err, true); 244 245 num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 246 ceph_put_page_vector(osd_data->pages, num_pages, false); 247 iput(req->r_inode); 248 } 249 250 static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) 251 { 252 struct netfs_read_request *rreq = subreq->rreq; 253 struct inode *inode = rreq->mapping->host; 254 struct ceph_inode_info *ci = ceph_inode(inode); 255 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 256 struct ceph_osd_request *req; 257 struct ceph_vino vino = ceph_vino(inode); 258 struct iov_iter iter; 259 struct page **pages; 260 size_t page_off; 261 int err = 0; 262 u64 len = subreq->len; 263 264 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 265 0, 1, CEPH_OSD_OP_READ, 266 CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 267 NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 268 if (IS_ERR(req)) { 269 err = PTR_ERR(req); 270 req = NULL; 271 goto out; 272 } 273 274 dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 275 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 276 err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); 277 if (err < 0) { 278 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 279 goto out; 280 } 281 282 /* should always give us a page-aligned read */ 283 WARN_ON_ONCE(page_off); 284 len = err; 285 286 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 287 req->r_callback = finish_netfs_read; 288 req->r_priv = subreq; 289 req->r_inode = inode; 290 ihold(inode); 291 292 err = ceph_osdc_start_request(req->r_osdc, req, false); 293 if (err) 294 iput(inode); 295 out: 296 ceph_osdc_put_request(req); 297 if (err) 298 netfs_subreq_terminated(subreq, err, false); 299 dout("%s: result %d\n", __func__, err); 300 } 301 302 static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file) 303 { 304 } 305 306 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) 307 { 308 struct inode *inode = mapping->host; 309 struct ceph_inode_info *ci = ceph_inode(inode); 310 int got = (uintptr_t)priv; 311 312 if (got) 313 ceph_put_cap_refs(ci, got); 314 } 315 316 const struct netfs_read_request_ops ceph_netfs_read_ops = { 317 .init_rreq = ceph_init_rreq, 318 .is_cache_enabled = ceph_is_cache_enabled, 319 .begin_cache_operation = ceph_begin_cache_operation, 320 .issue_op = ceph_netfs_issue_op, 321 .expand_readahead = ceph_netfs_expand_readahead, 322 .clamp_length = ceph_netfs_clamp_length, 323 .check_write_begin = ceph_netfs_check_write_begin, 324 .cleanup = ceph_readahead_cleanup, 325 }; 326 327 /* read a single page, without unlocking it. */ 328 static int ceph_readpage(struct file *file, struct page *page) 329 { 330 struct inode *inode = file_inode(file); 331 struct ceph_inode_info *ci = ceph_inode(inode); 332 struct ceph_vino vino = ceph_vino(inode); 333 u64 off = page_offset(page); 334 u64 len = thp_size(page); 335 336 if (ci->i_inline_version != CEPH_INLINE_NONE) { 337 /* 338 * Uptodate inline data should have been added 339 * into page cache while getting Fcr caps. 340 */ 341 if (off == 0) { 342 unlock_page(page); 343 return -EINVAL; 344 } 345 zero_user_segment(page, 0, thp_size(page)); 346 SetPageUptodate(page); 347 unlock_page(page); 348 return 0; 349 } 350 351 dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n", 352 vino.ino, vino.snap, file, off, len, page, page->index); 353 354 return netfs_readpage(file, page, &ceph_netfs_read_ops, NULL); 355 } 356 357 static void ceph_readahead(struct readahead_control *ractl) 358 { 359 struct inode *inode = file_inode(ractl->file); 360 struct ceph_file_info *fi = ractl->file->private_data; 361 struct ceph_rw_context *rw_ctx; 362 int got = 0; 363 int ret = 0; 364 365 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 366 return; 367 368 rw_ctx = ceph_find_rw_context(fi); 369 if (!rw_ctx) { 370 /* 371 * readahead callers do not necessarily hold Fcb caps 372 * (e.g. fadvise, madvise). 373 */ 374 int want = CEPH_CAP_FILE_CACHE; 375 376 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 377 if (ret < 0) 378 dout("start_read %p, error getting cap\n", inode); 379 else if (!(got & want)) 380 dout("start_read %p, no cache cap\n", inode); 381 382 if (ret <= 0) 383 return; 384 } 385 netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); 386 } 387 388 struct ceph_writeback_ctl 389 { 390 loff_t i_size; 391 u64 truncate_size; 392 u32 truncate_seq; 393 bool size_stable; 394 bool head_snapc; 395 }; 396 397 /* 398 * Get ref for the oldest snapc for an inode with dirty data... that is, the 399 * only snap context we are allowed to write back. 400 */ 401 static struct ceph_snap_context * 402 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 403 struct ceph_snap_context *page_snapc) 404 { 405 struct ceph_inode_info *ci = ceph_inode(inode); 406 struct ceph_snap_context *snapc = NULL; 407 struct ceph_cap_snap *capsnap = NULL; 408 409 spin_lock(&ci->i_ceph_lock); 410 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 411 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 412 capsnap->context, capsnap->dirty_pages); 413 if (!capsnap->dirty_pages) 414 continue; 415 416 /* get i_size, truncate_{seq,size} for page_snapc? */ 417 if (snapc && capsnap->context != page_snapc) 418 continue; 419 420 if (ctl) { 421 if (capsnap->writing) { 422 ctl->i_size = i_size_read(inode); 423 ctl->size_stable = false; 424 } else { 425 ctl->i_size = capsnap->size; 426 ctl->size_stable = true; 427 } 428 ctl->truncate_size = capsnap->truncate_size; 429 ctl->truncate_seq = capsnap->truncate_seq; 430 ctl->head_snapc = false; 431 } 432 433 if (snapc) 434 break; 435 436 snapc = ceph_get_snap_context(capsnap->context); 437 if (!page_snapc || 438 page_snapc == snapc || 439 page_snapc->seq > snapc->seq) 440 break; 441 } 442 if (!snapc && ci->i_wrbuffer_ref_head) { 443 snapc = ceph_get_snap_context(ci->i_head_snapc); 444 dout(" head snapc %p has %d dirty pages\n", 445 snapc, ci->i_wrbuffer_ref_head); 446 if (ctl) { 447 ctl->i_size = i_size_read(inode); 448 ctl->truncate_size = ci->i_truncate_size; 449 ctl->truncate_seq = ci->i_truncate_seq; 450 ctl->size_stable = false; 451 ctl->head_snapc = true; 452 } 453 } 454 spin_unlock(&ci->i_ceph_lock); 455 return snapc; 456 } 457 458 static u64 get_writepages_data_length(struct inode *inode, 459 struct page *page, u64 start) 460 { 461 struct ceph_inode_info *ci = ceph_inode(inode); 462 struct ceph_snap_context *snapc = page_snap_context(page); 463 struct ceph_cap_snap *capsnap = NULL; 464 u64 end = i_size_read(inode); 465 466 if (snapc != ci->i_head_snapc) { 467 bool found = false; 468 spin_lock(&ci->i_ceph_lock); 469 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 470 if (capsnap->context == snapc) { 471 if (!capsnap->writing) 472 end = capsnap->size; 473 found = true; 474 break; 475 } 476 } 477 spin_unlock(&ci->i_ceph_lock); 478 WARN_ON(!found); 479 } 480 if (end > page_offset(page) + thp_size(page)) 481 end = page_offset(page) + thp_size(page); 482 return end > start ? end - start : 0; 483 } 484 485 /* 486 * Write a single page, but leave the page locked. 487 * 488 * If we get a write error, mark the mapping for error, but still adjust the 489 * dirty page accounting (i.e., page is no longer dirty). 490 */ 491 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 492 { 493 struct inode *inode = page->mapping->host; 494 struct ceph_inode_info *ci = ceph_inode(inode); 495 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 496 struct ceph_snap_context *snapc, *oldest; 497 loff_t page_off = page_offset(page); 498 int err; 499 loff_t len = thp_size(page); 500 struct ceph_writeback_ctl ceph_wbc; 501 struct ceph_osd_client *osdc = &fsc->client->osdc; 502 struct ceph_osd_request *req; 503 504 dout("writepage %p idx %lu\n", page, page->index); 505 506 /* verify this is a writeable snap context */ 507 snapc = page_snap_context(page); 508 if (!snapc) { 509 dout("writepage %p page %p not dirty?\n", inode, page); 510 return 0; 511 } 512 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 513 if (snapc->seq > oldest->seq) { 514 dout("writepage %p page %p snapc %p not writeable - noop\n", 515 inode, page, snapc); 516 /* we should only noop if called by kswapd */ 517 WARN_ON(!(current->flags & PF_MEMALLOC)); 518 ceph_put_snap_context(oldest); 519 redirty_page_for_writepage(wbc, page); 520 return 0; 521 } 522 ceph_put_snap_context(oldest); 523 524 /* is this a partial page at end of file? */ 525 if (page_off >= ceph_wbc.i_size) { 526 dout("%p page eof %llu\n", page, ceph_wbc.i_size); 527 page->mapping->a_ops->invalidatepage(page, 0, thp_size(page)); 528 return 0; 529 } 530 531 if (ceph_wbc.i_size < page_off + len) 532 len = ceph_wbc.i_size - page_off; 533 534 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 535 inode, page, page->index, page_off, len, snapc, snapc->seq); 536 537 if (atomic_long_inc_return(&fsc->writeback_count) > 538 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 539 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 540 541 set_page_writeback(page); 542 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 543 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 544 ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 545 true); 546 if (IS_ERR(req)) { 547 redirty_page_for_writepage(wbc, page); 548 end_page_writeback(page); 549 return PTR_ERR(req); 550 } 551 552 /* it may be a short write due to an object boundary */ 553 WARN_ON_ONCE(len > thp_size(page)); 554 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 555 dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 556 557 req->r_mtime = inode->i_mtime; 558 err = ceph_osdc_start_request(osdc, req, true); 559 if (!err) 560 err = ceph_osdc_wait_request(osdc, req); 561 562 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 563 req->r_end_latency, err); 564 565 ceph_osdc_put_request(req); 566 if (err == 0) 567 err = len; 568 569 if (err < 0) { 570 struct writeback_control tmp_wbc; 571 if (!wbc) 572 wbc = &tmp_wbc; 573 if (err == -ERESTARTSYS) { 574 /* killed by SIGKILL */ 575 dout("writepage interrupted page %p\n", page); 576 redirty_page_for_writepage(wbc, page); 577 end_page_writeback(page); 578 return err; 579 } 580 if (err == -EBLOCKLISTED) 581 fsc->blocklisted = true; 582 dout("writepage setting page/mapping error %d %p\n", 583 err, page); 584 mapping_set_error(&inode->i_data, err); 585 wbc->pages_skipped++; 586 } else { 587 dout("writepage cleaned page %p\n", page); 588 err = 0; /* vfs expects us to return 0 */ 589 } 590 oldest = detach_page_private(page); 591 WARN_ON_ONCE(oldest != snapc); 592 end_page_writeback(page); 593 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 594 ceph_put_snap_context(snapc); /* page's reference */ 595 596 if (atomic_long_dec_return(&fsc->writeback_count) < 597 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 598 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 599 600 return err; 601 } 602 603 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 604 { 605 int err; 606 struct inode *inode = page->mapping->host; 607 BUG_ON(!inode); 608 ihold(inode); 609 err = writepage_nounlock(page, wbc); 610 if (err == -ERESTARTSYS) { 611 /* direct memory reclaimer was killed by SIGKILL. return 0 612 * to prevent caller from setting mapping/page error */ 613 err = 0; 614 } 615 unlock_page(page); 616 iput(inode); 617 return err; 618 } 619 620 /* 621 * async writeback completion handler. 622 * 623 * If we get an error, set the mapping error bit, but not the individual 624 * page error bits. 625 */ 626 static void writepages_finish(struct ceph_osd_request *req) 627 { 628 struct inode *inode = req->r_inode; 629 struct ceph_inode_info *ci = ceph_inode(inode); 630 struct ceph_osd_data *osd_data; 631 struct page *page; 632 int num_pages, total_pages = 0; 633 int i, j; 634 int rc = req->r_result; 635 struct ceph_snap_context *snapc = req->r_snapc; 636 struct address_space *mapping = inode->i_mapping; 637 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 638 bool remove_page; 639 640 dout("writepages_finish %p rc %d\n", inode, rc); 641 if (rc < 0) { 642 mapping_set_error(mapping, rc); 643 ceph_set_error_write(ci); 644 if (rc == -EBLOCKLISTED) 645 fsc->blocklisted = true; 646 } else { 647 ceph_clear_error_write(ci); 648 } 649 650 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 651 req->r_end_latency, rc); 652 653 /* 654 * We lost the cache cap, need to truncate the page before 655 * it is unlocked, otherwise we'd truncate it later in the 656 * page truncation thread, possibly losing some data that 657 * raced its way in 658 */ 659 remove_page = !(ceph_caps_issued(ci) & 660 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 661 662 /* clean all pages */ 663 for (i = 0; i < req->r_num_ops; i++) { 664 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 665 break; 666 667 osd_data = osd_req_op_extent_osd_data(req, i); 668 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 669 num_pages = calc_pages_for((u64)osd_data->alignment, 670 (u64)osd_data->length); 671 total_pages += num_pages; 672 for (j = 0; j < num_pages; j++) { 673 page = osd_data->pages[j]; 674 BUG_ON(!page); 675 WARN_ON(!PageUptodate(page)); 676 677 if (atomic_long_dec_return(&fsc->writeback_count) < 678 CONGESTION_OFF_THRESH( 679 fsc->mount_options->congestion_kb)) 680 clear_bdi_congested(inode_to_bdi(inode), 681 BLK_RW_ASYNC); 682 683 ceph_put_snap_context(detach_page_private(page)); 684 end_page_writeback(page); 685 dout("unlocking %p\n", page); 686 687 if (remove_page) 688 generic_error_remove_page(inode->i_mapping, 689 page); 690 691 unlock_page(page); 692 } 693 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 694 inode, osd_data->length, rc >= 0 ? num_pages : 0); 695 696 release_pages(osd_data->pages, num_pages); 697 } 698 699 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 700 701 osd_data = osd_req_op_extent_osd_data(req, 0); 702 if (osd_data->pages_from_pool) 703 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 704 else 705 kfree(osd_data->pages); 706 ceph_osdc_put_request(req); 707 } 708 709 /* 710 * initiate async writeback 711 */ 712 static int ceph_writepages_start(struct address_space *mapping, 713 struct writeback_control *wbc) 714 { 715 struct inode *inode = mapping->host; 716 struct ceph_inode_info *ci = ceph_inode(inode); 717 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 718 struct ceph_vino vino = ceph_vino(inode); 719 pgoff_t index, start_index, end = -1; 720 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 721 struct pagevec pvec; 722 int rc = 0; 723 unsigned int wsize = i_blocksize(inode); 724 struct ceph_osd_request *req = NULL; 725 struct ceph_writeback_ctl ceph_wbc; 726 bool should_loop, range_whole = false; 727 bool done = false; 728 729 dout("writepages_start %p (mode=%s)\n", inode, 730 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 731 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 732 733 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { 734 if (ci->i_wrbuffer_ref > 0) { 735 pr_warn_ratelimited( 736 "writepage_start %p %lld forced umount\n", 737 inode, ceph_ino(inode)); 738 } 739 mapping_set_error(mapping, -EIO); 740 return -EIO; /* we're in a forced umount, don't write! */ 741 } 742 if (fsc->mount_options->wsize < wsize) 743 wsize = fsc->mount_options->wsize; 744 745 pagevec_init(&pvec); 746 747 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 748 index = start_index; 749 750 retry: 751 /* find oldest snap context with dirty data */ 752 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 753 if (!snapc) { 754 /* hmm, why does writepages get called when there 755 is no dirty data? */ 756 dout(" no snap context with dirty data?\n"); 757 goto out; 758 } 759 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 760 snapc, snapc->seq, snapc->num_snaps); 761 762 should_loop = false; 763 if (ceph_wbc.head_snapc && snapc != last_snapc) { 764 /* where to start/end? */ 765 if (wbc->range_cyclic) { 766 index = start_index; 767 end = -1; 768 if (index > 0) 769 should_loop = true; 770 dout(" cyclic, start at %lu\n", index); 771 } else { 772 index = wbc->range_start >> PAGE_SHIFT; 773 end = wbc->range_end >> PAGE_SHIFT; 774 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 775 range_whole = true; 776 dout(" not cyclic, %lu to %lu\n", index, end); 777 } 778 } else if (!ceph_wbc.head_snapc) { 779 /* Do not respect wbc->range_{start,end}. Dirty pages 780 * in that range can be associated with newer snapc. 781 * They are not writeable until we write all dirty pages 782 * associated with 'snapc' get written */ 783 if (index > 0) 784 should_loop = true; 785 dout(" non-head snapc, range whole\n"); 786 } 787 788 ceph_put_snap_context(last_snapc); 789 last_snapc = snapc; 790 791 while (!done && index <= end) { 792 int num_ops = 0, op_idx; 793 unsigned i, pvec_pages, max_pages, locked_pages = 0; 794 struct page **pages = NULL, **data_pages; 795 struct page *page; 796 pgoff_t strip_unit_end = 0; 797 u64 offset = 0, len = 0; 798 bool from_pool = false; 799 800 max_pages = wsize >> PAGE_SHIFT; 801 802 get_more_pages: 803 pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 804 end, PAGECACHE_TAG_DIRTY); 805 dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 806 if (!pvec_pages && !locked_pages) 807 break; 808 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 809 page = pvec.pages[i]; 810 dout("? %p idx %lu\n", page, page->index); 811 if (locked_pages == 0) 812 lock_page(page); /* first page */ 813 else if (!trylock_page(page)) 814 break; 815 816 /* only dirty pages, or our accounting breaks */ 817 if (unlikely(!PageDirty(page)) || 818 unlikely(page->mapping != mapping)) { 819 dout("!dirty or !mapping %p\n", page); 820 unlock_page(page); 821 continue; 822 } 823 /* only if matching snap context */ 824 pgsnapc = page_snap_context(page); 825 if (pgsnapc != snapc) { 826 dout("page snapc %p %lld != oldest %p %lld\n", 827 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 828 if (!should_loop && 829 !ceph_wbc.head_snapc && 830 wbc->sync_mode != WB_SYNC_NONE) 831 should_loop = true; 832 unlock_page(page); 833 continue; 834 } 835 if (page_offset(page) >= ceph_wbc.i_size) { 836 dout("%p page eof %llu\n", 837 page, ceph_wbc.i_size); 838 if ((ceph_wbc.size_stable || 839 page_offset(page) >= i_size_read(inode)) && 840 clear_page_dirty_for_io(page)) 841 mapping->a_ops->invalidatepage(page, 842 0, thp_size(page)); 843 unlock_page(page); 844 continue; 845 } 846 if (strip_unit_end && (page->index > strip_unit_end)) { 847 dout("end of strip unit %p\n", page); 848 unlock_page(page); 849 break; 850 } 851 if (PageWriteback(page)) { 852 if (wbc->sync_mode == WB_SYNC_NONE) { 853 dout("%p under writeback\n", page); 854 unlock_page(page); 855 continue; 856 } 857 dout("waiting on writeback %p\n", page); 858 wait_on_page_writeback(page); 859 } 860 861 if (!clear_page_dirty_for_io(page)) { 862 dout("%p !clear_page_dirty_for_io\n", page); 863 unlock_page(page); 864 continue; 865 } 866 867 /* 868 * We have something to write. If this is 869 * the first locked page this time through, 870 * calculate max possinle write size and 871 * allocate a page array 872 */ 873 if (locked_pages == 0) { 874 u64 objnum; 875 u64 objoff; 876 u32 xlen; 877 878 /* prepare async write request */ 879 offset = (u64)page_offset(page); 880 ceph_calc_file_object_mapping(&ci->i_layout, 881 offset, wsize, 882 &objnum, &objoff, 883 &xlen); 884 len = xlen; 885 886 num_ops = 1; 887 strip_unit_end = page->index + 888 ((len - 1) >> PAGE_SHIFT); 889 890 BUG_ON(pages); 891 max_pages = calc_pages_for(0, (u64)len); 892 pages = kmalloc_array(max_pages, 893 sizeof(*pages), 894 GFP_NOFS); 895 if (!pages) { 896 from_pool = true; 897 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 898 BUG_ON(!pages); 899 } 900 901 len = 0; 902 } else if (page->index != 903 (offset + len) >> PAGE_SHIFT) { 904 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 905 CEPH_OSD_MAX_OPS)) { 906 redirty_page_for_writepage(wbc, page); 907 unlock_page(page); 908 break; 909 } 910 911 num_ops++; 912 offset = (u64)page_offset(page); 913 len = 0; 914 } 915 916 /* note position of first page in pvec */ 917 dout("%p will write page %p idx %lu\n", 918 inode, page, page->index); 919 920 if (atomic_long_inc_return(&fsc->writeback_count) > 921 CONGESTION_ON_THRESH( 922 fsc->mount_options->congestion_kb)) { 923 set_bdi_congested(inode_to_bdi(inode), 924 BLK_RW_ASYNC); 925 } 926 927 928 pages[locked_pages++] = page; 929 pvec.pages[i] = NULL; 930 931 len += thp_size(page); 932 } 933 934 /* did we get anything? */ 935 if (!locked_pages) 936 goto release_pvec_pages; 937 if (i) { 938 unsigned j, n = 0; 939 /* shift unused page to beginning of pvec */ 940 for (j = 0; j < pvec_pages; j++) { 941 if (!pvec.pages[j]) 942 continue; 943 if (n < j) 944 pvec.pages[n] = pvec.pages[j]; 945 n++; 946 } 947 pvec.nr = n; 948 949 if (pvec_pages && i == pvec_pages && 950 locked_pages < max_pages) { 951 dout("reached end pvec, trying for more\n"); 952 pagevec_release(&pvec); 953 goto get_more_pages; 954 } 955 } 956 957 new_request: 958 offset = page_offset(pages[0]); 959 len = wsize; 960 961 req = ceph_osdc_new_request(&fsc->client->osdc, 962 &ci->i_layout, vino, 963 offset, &len, 0, num_ops, 964 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 965 snapc, ceph_wbc.truncate_seq, 966 ceph_wbc.truncate_size, false); 967 if (IS_ERR(req)) { 968 req = ceph_osdc_new_request(&fsc->client->osdc, 969 &ci->i_layout, vino, 970 offset, &len, 0, 971 min(num_ops, 972 CEPH_OSD_SLAB_OPS), 973 CEPH_OSD_OP_WRITE, 974 CEPH_OSD_FLAG_WRITE, 975 snapc, ceph_wbc.truncate_seq, 976 ceph_wbc.truncate_size, true); 977 BUG_ON(IS_ERR(req)); 978 } 979 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 980 thp_size(page) - offset); 981 982 req->r_callback = writepages_finish; 983 req->r_inode = inode; 984 985 /* Format the osd request message and submit the write */ 986 len = 0; 987 data_pages = pages; 988 op_idx = 0; 989 for (i = 0; i < locked_pages; i++) { 990 u64 cur_offset = page_offset(pages[i]); 991 if (offset + len != cur_offset) { 992 if (op_idx + 1 == req->r_num_ops) 993 break; 994 osd_req_op_extent_dup_last(req, op_idx, 995 cur_offset - offset); 996 dout("writepages got pages at %llu~%llu\n", 997 offset, len); 998 osd_req_op_extent_osd_data_pages(req, op_idx, 999 data_pages, len, 0, 1000 from_pool, false); 1001 osd_req_op_extent_update(req, op_idx, len); 1002 1003 len = 0; 1004 offset = cur_offset; 1005 data_pages = pages + i; 1006 op_idx++; 1007 } 1008 1009 set_page_writeback(pages[i]); 1010 len += thp_size(page); 1011 } 1012 1013 if (ceph_wbc.size_stable) { 1014 len = min(len, ceph_wbc.i_size - offset); 1015 } else if (i == locked_pages) { 1016 /* writepages_finish() clears writeback pages 1017 * according to the data length, so make sure 1018 * data length covers all locked pages */ 1019 u64 min_len = len + 1 - thp_size(page); 1020 len = get_writepages_data_length(inode, pages[i - 1], 1021 offset); 1022 len = max(len, min_len); 1023 } 1024 dout("writepages got pages at %llu~%llu\n", offset, len); 1025 1026 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1027 0, from_pool, false); 1028 osd_req_op_extent_update(req, op_idx, len); 1029 1030 BUG_ON(op_idx + 1 != req->r_num_ops); 1031 1032 from_pool = false; 1033 if (i < locked_pages) { 1034 BUG_ON(num_ops <= req->r_num_ops); 1035 num_ops -= req->r_num_ops; 1036 locked_pages -= i; 1037 1038 /* allocate new pages array for next request */ 1039 data_pages = pages; 1040 pages = kmalloc_array(locked_pages, sizeof(*pages), 1041 GFP_NOFS); 1042 if (!pages) { 1043 from_pool = true; 1044 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1045 BUG_ON(!pages); 1046 } 1047 memcpy(pages, data_pages + i, 1048 locked_pages * sizeof(*pages)); 1049 memset(data_pages + i, 0, 1050 locked_pages * sizeof(*pages)); 1051 } else { 1052 BUG_ON(num_ops != req->r_num_ops); 1053 index = pages[i - 1]->index + 1; 1054 /* request message now owns the pages array */ 1055 pages = NULL; 1056 } 1057 1058 req->r_mtime = inode->i_mtime; 1059 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1060 BUG_ON(rc); 1061 req = NULL; 1062 1063 wbc->nr_to_write -= i; 1064 if (pages) 1065 goto new_request; 1066 1067 /* 1068 * We stop writing back only if we are not doing 1069 * integrity sync. In case of integrity sync we have to 1070 * keep going until we have written all the pages 1071 * we tagged for writeback prior to entering this loop. 1072 */ 1073 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1074 done = true; 1075 1076 release_pvec_pages: 1077 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1078 pvec.nr ? pvec.pages[0] : NULL); 1079 pagevec_release(&pvec); 1080 } 1081 1082 if (should_loop && !done) { 1083 /* more to do; loop back to beginning of file */ 1084 dout("writepages looping back to beginning of file\n"); 1085 end = start_index - 1; /* OK even when start_index == 0 */ 1086 1087 /* to write dirty pages associated with next snapc, 1088 * we need to wait until current writes complete */ 1089 if (wbc->sync_mode != WB_SYNC_NONE && 1090 start_index == 0 && /* all dirty pages were checked */ 1091 !ceph_wbc.head_snapc) { 1092 struct page *page; 1093 unsigned i, nr; 1094 index = 0; 1095 while ((index <= end) && 1096 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1097 PAGECACHE_TAG_WRITEBACK))) { 1098 for (i = 0; i < nr; i++) { 1099 page = pvec.pages[i]; 1100 if (page_snap_context(page) != snapc) 1101 continue; 1102 wait_on_page_writeback(page); 1103 } 1104 pagevec_release(&pvec); 1105 cond_resched(); 1106 } 1107 } 1108 1109 start_index = 0; 1110 index = 0; 1111 goto retry; 1112 } 1113 1114 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1115 mapping->writeback_index = index; 1116 1117 out: 1118 ceph_osdc_put_request(req); 1119 ceph_put_snap_context(last_snapc); 1120 dout("writepages dend - startone, rc = %d\n", rc); 1121 return rc; 1122 } 1123 1124 1125 1126 /* 1127 * See if a given @snapc is either writeable, or already written. 1128 */ 1129 static int context_is_writeable_or_written(struct inode *inode, 1130 struct ceph_snap_context *snapc) 1131 { 1132 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1133 int ret = !oldest || snapc->seq <= oldest->seq; 1134 1135 ceph_put_snap_context(oldest); 1136 return ret; 1137 } 1138 1139 /** 1140 * ceph_find_incompatible - find an incompatible context and return it 1141 * @page: page being dirtied 1142 * 1143 * We are only allowed to write into/dirty a page if the page is 1144 * clean, or already dirty within the same snap context. Returns a 1145 * conflicting context if there is one, NULL if there isn't, or a 1146 * negative error code on other errors. 1147 * 1148 * Must be called with page lock held. 1149 */ 1150 static struct ceph_snap_context * 1151 ceph_find_incompatible(struct page *page) 1152 { 1153 struct inode *inode = page->mapping->host; 1154 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1155 struct ceph_inode_info *ci = ceph_inode(inode); 1156 1157 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { 1158 dout(" page %p forced umount\n", page); 1159 return ERR_PTR(-EIO); 1160 } 1161 1162 for (;;) { 1163 struct ceph_snap_context *snapc, *oldest; 1164 1165 wait_on_page_writeback(page); 1166 1167 snapc = page_snap_context(page); 1168 if (!snapc || snapc == ci->i_head_snapc) 1169 break; 1170 1171 /* 1172 * this page is already dirty in another (older) snap 1173 * context! is it writeable now? 1174 */ 1175 oldest = get_oldest_context(inode, NULL, NULL); 1176 if (snapc->seq > oldest->seq) { 1177 /* not writeable -- return it for the caller to deal with */ 1178 ceph_put_snap_context(oldest); 1179 dout(" page %p snapc %p not current or oldest\n", page, snapc); 1180 return ceph_get_snap_context(snapc); 1181 } 1182 ceph_put_snap_context(oldest); 1183 1184 /* yay, writeable, do it now (without dropping page lock) */ 1185 dout(" page %p snapc %p not current, but oldest\n", page, snapc); 1186 if (clear_page_dirty_for_io(page)) { 1187 int r = writepage_nounlock(page, NULL); 1188 if (r < 0) 1189 return ERR_PTR(r); 1190 } 1191 } 1192 return NULL; 1193 } 1194 1195 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1196 struct page *page, void **_fsdata) 1197 { 1198 struct inode *inode = file_inode(file); 1199 struct ceph_inode_info *ci = ceph_inode(inode); 1200 struct ceph_snap_context *snapc; 1201 1202 snapc = ceph_find_incompatible(page); 1203 if (snapc) { 1204 int r; 1205 1206 unlock_page(page); 1207 put_page(page); 1208 if (IS_ERR(snapc)) 1209 return PTR_ERR(snapc); 1210 1211 ceph_queue_writeback(inode); 1212 r = wait_event_killable(ci->i_cap_wq, 1213 context_is_writeable_or_written(inode, snapc)); 1214 ceph_put_snap_context(snapc); 1215 return r == 0 ? -EAGAIN : r; 1216 } 1217 return 0; 1218 } 1219 1220 /* 1221 * We are only allowed to write into/dirty the page if the page is 1222 * clean, or already dirty within the same snap context. 1223 */ 1224 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1225 loff_t pos, unsigned len, unsigned flags, 1226 struct page **pagep, void **fsdata) 1227 { 1228 struct inode *inode = file_inode(file); 1229 struct ceph_inode_info *ci = ceph_inode(inode); 1230 struct page *page = NULL; 1231 pgoff_t index = pos >> PAGE_SHIFT; 1232 int r; 1233 1234 /* 1235 * Uninlining should have already been done and everything updated, EXCEPT 1236 * for inline_version sent to the MDS. 1237 */ 1238 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1239 page = grab_cache_page_write_begin(mapping, index, flags); 1240 if (!page) 1241 return -ENOMEM; 1242 1243 /* 1244 * The inline_version on a new inode is set to 1. If that's the 1245 * case, then the page is brand new and isn't yet Uptodate. 1246 */ 1247 r = 0; 1248 if (index == 0 && ci->i_inline_version != 1) { 1249 if (!PageUptodate(page)) { 1250 WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", 1251 ci->i_inline_version); 1252 r = -EINVAL; 1253 } 1254 goto out; 1255 } 1256 zero_user_segment(page, 0, thp_size(page)); 1257 SetPageUptodate(page); 1258 goto out; 1259 } 1260 1261 r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &page, NULL, 1262 &ceph_netfs_read_ops, NULL); 1263 out: 1264 if (r == 0) 1265 wait_on_page_fscache(page); 1266 if (r < 0) { 1267 if (page) 1268 put_page(page); 1269 } else { 1270 WARN_ON_ONCE(!PageLocked(page)); 1271 *pagep = page; 1272 } 1273 return r; 1274 } 1275 1276 /* 1277 * we don't do anything in here that simple_write_end doesn't do 1278 * except adjust dirty page accounting 1279 */ 1280 static int ceph_write_end(struct file *file, struct address_space *mapping, 1281 loff_t pos, unsigned len, unsigned copied, 1282 struct page *page, void *fsdata) 1283 { 1284 struct inode *inode = file_inode(file); 1285 bool check_cap = false; 1286 1287 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1288 inode, page, (int)pos, (int)copied, (int)len); 1289 1290 /* zero the stale part of the page if we did a short copy */ 1291 if (!PageUptodate(page)) { 1292 if (copied < len) { 1293 copied = 0; 1294 goto out; 1295 } 1296 SetPageUptodate(page); 1297 } 1298 1299 /* did file size increase? */ 1300 if (pos+copied > i_size_read(inode)) 1301 check_cap = ceph_inode_set_size(inode, pos+copied); 1302 1303 set_page_dirty(page); 1304 1305 out: 1306 unlock_page(page); 1307 put_page(page); 1308 1309 if (check_cap) 1310 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1311 1312 return copied; 1313 } 1314 1315 /* 1316 * we set .direct_IO to indicate direct io is supported, but since we 1317 * intercept O_DIRECT reads and writes early, this function should 1318 * never get called. 1319 */ 1320 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) 1321 { 1322 WARN_ON(1); 1323 return -EINVAL; 1324 } 1325 1326 const struct address_space_operations ceph_aops = { 1327 .readpage = ceph_readpage, 1328 .readahead = ceph_readahead, 1329 .writepage = ceph_writepage, 1330 .writepages = ceph_writepages_start, 1331 .write_begin = ceph_write_begin, 1332 .write_end = ceph_write_end, 1333 .set_page_dirty = ceph_set_page_dirty, 1334 .invalidatepage = ceph_invalidatepage, 1335 .releasepage = ceph_releasepage, 1336 .direct_IO = ceph_direct_io, 1337 }; 1338 1339 static void ceph_block_sigs(sigset_t *oldset) 1340 { 1341 sigset_t mask; 1342 siginitsetinv(&mask, sigmask(SIGKILL)); 1343 sigprocmask(SIG_BLOCK, &mask, oldset); 1344 } 1345 1346 static void ceph_restore_sigs(sigset_t *oldset) 1347 { 1348 sigprocmask(SIG_SETMASK, oldset, NULL); 1349 } 1350 1351 /* 1352 * vm ops 1353 */ 1354 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1355 { 1356 struct vm_area_struct *vma = vmf->vma; 1357 struct inode *inode = file_inode(vma->vm_file); 1358 struct ceph_inode_info *ci = ceph_inode(inode); 1359 struct ceph_file_info *fi = vma->vm_file->private_data; 1360 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1361 int want, got, err; 1362 sigset_t oldset; 1363 vm_fault_t ret = VM_FAULT_SIGBUS; 1364 1365 ceph_block_sigs(&oldset); 1366 1367 dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 1368 inode, ceph_vinop(inode), off); 1369 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1370 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1371 else 1372 want = CEPH_CAP_FILE_CACHE; 1373 1374 got = 0; 1375 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1376 if (err < 0) 1377 goto out_restore; 1378 1379 dout("filemap_fault %p %llu got cap refs on %s\n", 1380 inode, off, ceph_cap_string(got)); 1381 1382 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1383 ci->i_inline_version == CEPH_INLINE_NONE) { 1384 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1385 ceph_add_rw_context(fi, &rw_ctx); 1386 ret = filemap_fault(vmf); 1387 ceph_del_rw_context(fi, &rw_ctx); 1388 dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 1389 inode, off, ceph_cap_string(got), ret); 1390 } else 1391 err = -EAGAIN; 1392 1393 ceph_put_cap_refs(ci, got); 1394 1395 if (err != -EAGAIN) 1396 goto out_restore; 1397 1398 /* read inline data */ 1399 if (off >= PAGE_SIZE) { 1400 /* does not support inline data > PAGE_SIZE */ 1401 ret = VM_FAULT_SIGBUS; 1402 } else { 1403 struct address_space *mapping = inode->i_mapping; 1404 struct page *page = find_or_create_page(mapping, 0, 1405 mapping_gfp_constraint(mapping, 1406 ~__GFP_FS)); 1407 if (!page) { 1408 ret = VM_FAULT_OOM; 1409 goto out_inline; 1410 } 1411 err = __ceph_do_getattr(inode, page, 1412 CEPH_STAT_CAP_INLINE_DATA, true); 1413 if (err < 0 || off >= i_size_read(inode)) { 1414 unlock_page(page); 1415 put_page(page); 1416 ret = vmf_error(err); 1417 goto out_inline; 1418 } 1419 if (err < PAGE_SIZE) 1420 zero_user_segment(page, err, PAGE_SIZE); 1421 else 1422 flush_dcache_page(page); 1423 SetPageUptodate(page); 1424 vmf->page = page; 1425 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1426 out_inline: 1427 dout("filemap_fault %p %llu read inline data ret %x\n", 1428 inode, off, ret); 1429 } 1430 out_restore: 1431 ceph_restore_sigs(&oldset); 1432 if (err < 0) 1433 ret = vmf_error(err); 1434 1435 return ret; 1436 } 1437 1438 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1439 { 1440 struct vm_area_struct *vma = vmf->vma; 1441 struct inode *inode = file_inode(vma->vm_file); 1442 struct ceph_inode_info *ci = ceph_inode(inode); 1443 struct ceph_file_info *fi = vma->vm_file->private_data; 1444 struct ceph_cap_flush *prealloc_cf; 1445 struct page *page = vmf->page; 1446 loff_t off = page_offset(page); 1447 loff_t size = i_size_read(inode); 1448 size_t len; 1449 int want, got, err; 1450 sigset_t oldset; 1451 vm_fault_t ret = VM_FAULT_SIGBUS; 1452 1453 prealloc_cf = ceph_alloc_cap_flush(); 1454 if (!prealloc_cf) 1455 return VM_FAULT_OOM; 1456 1457 sb_start_pagefault(inode->i_sb); 1458 ceph_block_sigs(&oldset); 1459 1460 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1461 struct page *locked_page = NULL; 1462 if (off == 0) { 1463 lock_page(page); 1464 locked_page = page; 1465 } 1466 err = ceph_uninline_data(vma->vm_file, locked_page); 1467 if (locked_page) 1468 unlock_page(locked_page); 1469 if (err < 0) 1470 goto out_free; 1471 } 1472 1473 if (off + thp_size(page) <= size) 1474 len = thp_size(page); 1475 else 1476 len = offset_in_thp(page, size); 1477 1478 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1479 inode, ceph_vinop(inode), off, len, size); 1480 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1481 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1482 else 1483 want = CEPH_CAP_FILE_BUFFER; 1484 1485 got = 0; 1486 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 1487 if (err < 0) 1488 goto out_free; 1489 1490 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1491 inode, off, len, ceph_cap_string(got)); 1492 1493 /* Update time before taking page lock */ 1494 file_update_time(vma->vm_file); 1495 inode_inc_iversion_raw(inode); 1496 1497 do { 1498 struct ceph_snap_context *snapc; 1499 1500 lock_page(page); 1501 1502 if (page_mkwrite_check_truncate(page, inode) < 0) { 1503 unlock_page(page); 1504 ret = VM_FAULT_NOPAGE; 1505 break; 1506 } 1507 1508 snapc = ceph_find_incompatible(page); 1509 if (!snapc) { 1510 /* success. we'll keep the page locked. */ 1511 set_page_dirty(page); 1512 ret = VM_FAULT_LOCKED; 1513 break; 1514 } 1515 1516 unlock_page(page); 1517 1518 if (IS_ERR(snapc)) { 1519 ret = VM_FAULT_SIGBUS; 1520 break; 1521 } 1522 1523 ceph_queue_writeback(inode); 1524 err = wait_event_killable(ci->i_cap_wq, 1525 context_is_writeable_or_written(inode, snapc)); 1526 ceph_put_snap_context(snapc); 1527 } while (err == 0); 1528 1529 if (ret == VM_FAULT_LOCKED || 1530 ci->i_inline_version != CEPH_INLINE_NONE) { 1531 int dirty; 1532 spin_lock(&ci->i_ceph_lock); 1533 ci->i_inline_version = CEPH_INLINE_NONE; 1534 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1535 &prealloc_cf); 1536 spin_unlock(&ci->i_ceph_lock); 1537 if (dirty) 1538 __mark_inode_dirty(inode, dirty); 1539 } 1540 1541 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1542 inode, off, len, ceph_cap_string(got), ret); 1543 ceph_put_cap_refs_async(ci, got); 1544 out_free: 1545 ceph_restore_sigs(&oldset); 1546 sb_end_pagefault(inode->i_sb); 1547 ceph_free_cap_flush(prealloc_cf); 1548 if (err < 0) 1549 ret = vmf_error(err); 1550 return ret; 1551 } 1552 1553 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1554 char *data, size_t len) 1555 { 1556 struct address_space *mapping = inode->i_mapping; 1557 struct page *page; 1558 1559 if (locked_page) { 1560 page = locked_page; 1561 } else { 1562 if (i_size_read(inode) == 0) 1563 return; 1564 page = find_or_create_page(mapping, 0, 1565 mapping_gfp_constraint(mapping, 1566 ~__GFP_FS)); 1567 if (!page) 1568 return; 1569 if (PageUptodate(page)) { 1570 unlock_page(page); 1571 put_page(page); 1572 return; 1573 } 1574 } 1575 1576 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1577 inode, ceph_vinop(inode), len, locked_page); 1578 1579 if (len > 0) { 1580 void *kaddr = kmap_atomic(page); 1581 memcpy(kaddr, data, len); 1582 kunmap_atomic(kaddr); 1583 } 1584 1585 if (page != locked_page) { 1586 if (len < PAGE_SIZE) 1587 zero_user_segment(page, len, PAGE_SIZE); 1588 else 1589 flush_dcache_page(page); 1590 1591 SetPageUptodate(page); 1592 unlock_page(page); 1593 put_page(page); 1594 } 1595 } 1596 1597 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1598 { 1599 struct inode *inode = file_inode(filp); 1600 struct ceph_inode_info *ci = ceph_inode(inode); 1601 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1602 struct ceph_osd_request *req; 1603 struct page *page = NULL; 1604 u64 len, inline_version; 1605 int err = 0; 1606 bool from_pagecache = false; 1607 1608 spin_lock(&ci->i_ceph_lock); 1609 inline_version = ci->i_inline_version; 1610 spin_unlock(&ci->i_ceph_lock); 1611 1612 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1613 inode, ceph_vinop(inode), inline_version); 1614 1615 if (inline_version == 1 || /* initial version, no data */ 1616 inline_version == CEPH_INLINE_NONE) 1617 goto out; 1618 1619 if (locked_page) { 1620 page = locked_page; 1621 WARN_ON(!PageUptodate(page)); 1622 } else if (ceph_caps_issued(ci) & 1623 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1624 page = find_get_page(inode->i_mapping, 0); 1625 if (page) { 1626 if (PageUptodate(page)) { 1627 from_pagecache = true; 1628 lock_page(page); 1629 } else { 1630 put_page(page); 1631 page = NULL; 1632 } 1633 } 1634 } 1635 1636 if (page) { 1637 len = i_size_read(inode); 1638 if (len > PAGE_SIZE) 1639 len = PAGE_SIZE; 1640 } else { 1641 page = __page_cache_alloc(GFP_NOFS); 1642 if (!page) { 1643 err = -ENOMEM; 1644 goto out; 1645 } 1646 err = __ceph_do_getattr(inode, page, 1647 CEPH_STAT_CAP_INLINE_DATA, true); 1648 if (err < 0) { 1649 /* no inline data */ 1650 if (err == -ENODATA) 1651 err = 0; 1652 goto out; 1653 } 1654 len = err; 1655 } 1656 1657 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1658 ceph_vino(inode), 0, &len, 0, 1, 1659 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1660 NULL, 0, 0, false); 1661 if (IS_ERR(req)) { 1662 err = PTR_ERR(req); 1663 goto out; 1664 } 1665 1666 req->r_mtime = inode->i_mtime; 1667 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1668 if (!err) 1669 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1670 ceph_osdc_put_request(req); 1671 if (err < 0) 1672 goto out; 1673 1674 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1675 ceph_vino(inode), 0, &len, 1, 3, 1676 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1677 NULL, ci->i_truncate_seq, 1678 ci->i_truncate_size, false); 1679 if (IS_ERR(req)) { 1680 err = PTR_ERR(req); 1681 goto out; 1682 } 1683 1684 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1685 1686 { 1687 __le64 xattr_buf = cpu_to_le64(inline_version); 1688 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1689 "inline_version", &xattr_buf, 1690 sizeof(xattr_buf), 1691 CEPH_OSD_CMPXATTR_OP_GT, 1692 CEPH_OSD_CMPXATTR_MODE_U64); 1693 if (err) 1694 goto out_put; 1695 } 1696 1697 { 1698 char xattr_buf[32]; 1699 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1700 "%llu", inline_version); 1701 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1702 "inline_version", 1703 xattr_buf, xattr_len, 0, 0); 1704 if (err) 1705 goto out_put; 1706 } 1707 1708 req->r_mtime = inode->i_mtime; 1709 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1710 if (!err) 1711 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1712 1713 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1714 req->r_end_latency, err); 1715 1716 out_put: 1717 ceph_osdc_put_request(req); 1718 if (err == -ECANCELED) 1719 err = 0; 1720 out: 1721 if (page && page != locked_page) { 1722 if (from_pagecache) { 1723 unlock_page(page); 1724 put_page(page); 1725 } else 1726 __free_pages(page, 0); 1727 } 1728 1729 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1730 inode, ceph_vinop(inode), inline_version, err); 1731 return err; 1732 } 1733 1734 static const struct vm_operations_struct ceph_vmops = { 1735 .fault = ceph_filemap_fault, 1736 .page_mkwrite = ceph_page_mkwrite, 1737 }; 1738 1739 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1740 { 1741 struct address_space *mapping = file->f_mapping; 1742 1743 if (!mapping->a_ops->readpage) 1744 return -ENOEXEC; 1745 file_accessed(file); 1746 vma->vm_ops = &ceph_vmops; 1747 return 0; 1748 } 1749 1750 enum { 1751 POOL_READ = 1, 1752 POOL_WRITE = 2, 1753 }; 1754 1755 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1756 s64 pool, struct ceph_string *pool_ns) 1757 { 1758 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1759 struct ceph_mds_client *mdsc = fsc->mdsc; 1760 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1761 struct rb_node **p, *parent; 1762 struct ceph_pool_perm *perm; 1763 struct page **pages; 1764 size_t pool_ns_len; 1765 int err = 0, err2 = 0, have = 0; 1766 1767 down_read(&mdsc->pool_perm_rwsem); 1768 p = &mdsc->pool_perm_tree.rb_node; 1769 while (*p) { 1770 perm = rb_entry(*p, struct ceph_pool_perm, node); 1771 if (pool < perm->pool) 1772 p = &(*p)->rb_left; 1773 else if (pool > perm->pool) 1774 p = &(*p)->rb_right; 1775 else { 1776 int ret = ceph_compare_string(pool_ns, 1777 perm->pool_ns, 1778 perm->pool_ns_len); 1779 if (ret < 0) 1780 p = &(*p)->rb_left; 1781 else if (ret > 0) 1782 p = &(*p)->rb_right; 1783 else { 1784 have = perm->perm; 1785 break; 1786 } 1787 } 1788 } 1789 up_read(&mdsc->pool_perm_rwsem); 1790 if (*p) 1791 goto out; 1792 1793 if (pool_ns) 1794 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1795 pool, (int)pool_ns->len, pool_ns->str); 1796 else 1797 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1798 1799 down_write(&mdsc->pool_perm_rwsem); 1800 p = &mdsc->pool_perm_tree.rb_node; 1801 parent = NULL; 1802 while (*p) { 1803 parent = *p; 1804 perm = rb_entry(parent, struct ceph_pool_perm, node); 1805 if (pool < perm->pool) 1806 p = &(*p)->rb_left; 1807 else if (pool > perm->pool) 1808 p = &(*p)->rb_right; 1809 else { 1810 int ret = ceph_compare_string(pool_ns, 1811 perm->pool_ns, 1812 perm->pool_ns_len); 1813 if (ret < 0) 1814 p = &(*p)->rb_left; 1815 else if (ret > 0) 1816 p = &(*p)->rb_right; 1817 else { 1818 have = perm->perm; 1819 break; 1820 } 1821 } 1822 } 1823 if (*p) { 1824 up_write(&mdsc->pool_perm_rwsem); 1825 goto out; 1826 } 1827 1828 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1829 1, false, GFP_NOFS); 1830 if (!rd_req) { 1831 err = -ENOMEM; 1832 goto out_unlock; 1833 } 1834 1835 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1836 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1837 rd_req->r_base_oloc.pool = pool; 1838 if (pool_ns) 1839 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1840 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1841 1842 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1843 if (err) 1844 goto out_unlock; 1845 1846 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1847 1, false, GFP_NOFS); 1848 if (!wr_req) { 1849 err = -ENOMEM; 1850 goto out_unlock; 1851 } 1852 1853 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1854 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1855 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1856 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1857 1858 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1859 if (err) 1860 goto out_unlock; 1861 1862 /* one page should be large enough for STAT data */ 1863 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1864 if (IS_ERR(pages)) { 1865 err = PTR_ERR(pages); 1866 goto out_unlock; 1867 } 1868 1869 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1870 0, false, true); 1871 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1872 1873 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1874 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1875 1876 if (!err) 1877 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1878 if (!err2) 1879 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1880 1881 if (err >= 0 || err == -ENOENT) 1882 have |= POOL_READ; 1883 else if (err != -EPERM) { 1884 if (err == -EBLOCKLISTED) 1885 fsc->blocklisted = true; 1886 goto out_unlock; 1887 } 1888 1889 if (err2 == 0 || err2 == -EEXIST) 1890 have |= POOL_WRITE; 1891 else if (err2 != -EPERM) { 1892 if (err2 == -EBLOCKLISTED) 1893 fsc->blocklisted = true; 1894 err = err2; 1895 goto out_unlock; 1896 } 1897 1898 pool_ns_len = pool_ns ? pool_ns->len : 0; 1899 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1900 if (!perm) { 1901 err = -ENOMEM; 1902 goto out_unlock; 1903 } 1904 1905 perm->pool = pool; 1906 perm->perm = have; 1907 perm->pool_ns_len = pool_ns_len; 1908 if (pool_ns_len > 0) 1909 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1910 perm->pool_ns[pool_ns_len] = 0; 1911 1912 rb_link_node(&perm->node, parent, p); 1913 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1914 err = 0; 1915 out_unlock: 1916 up_write(&mdsc->pool_perm_rwsem); 1917 1918 ceph_osdc_put_request(rd_req); 1919 ceph_osdc_put_request(wr_req); 1920 out: 1921 if (!err) 1922 err = have; 1923 if (pool_ns) 1924 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1925 pool, (int)pool_ns->len, pool_ns->str, err); 1926 else 1927 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1928 return err; 1929 } 1930 1931 int ceph_pool_perm_check(struct inode *inode, int need) 1932 { 1933 struct ceph_inode_info *ci = ceph_inode(inode); 1934 struct ceph_string *pool_ns; 1935 s64 pool; 1936 int ret, flags; 1937 1938 /* Only need to do this for regular files */ 1939 if (!S_ISREG(inode->i_mode)) 1940 return 0; 1941 1942 if (ci->i_vino.snap != CEPH_NOSNAP) { 1943 /* 1944 * Pool permission check needs to write to the first object. 1945 * But for snapshot, head of the first object may have alread 1946 * been deleted. Skip check to avoid creating orphan object. 1947 */ 1948 return 0; 1949 } 1950 1951 if (ceph_test_mount_opt(ceph_inode_to_client(inode), 1952 NOPOOLPERM)) 1953 return 0; 1954 1955 spin_lock(&ci->i_ceph_lock); 1956 flags = ci->i_ceph_flags; 1957 pool = ci->i_layout.pool_id; 1958 spin_unlock(&ci->i_ceph_lock); 1959 check: 1960 if (flags & CEPH_I_POOL_PERM) { 1961 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 1962 dout("ceph_pool_perm_check pool %lld no read perm\n", 1963 pool); 1964 return -EPERM; 1965 } 1966 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 1967 dout("ceph_pool_perm_check pool %lld no write perm\n", 1968 pool); 1969 return -EPERM; 1970 } 1971 return 0; 1972 } 1973 1974 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 1975 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 1976 ceph_put_string(pool_ns); 1977 if (ret < 0) 1978 return ret; 1979 1980 flags = CEPH_I_POOL_PERM; 1981 if (ret & POOL_READ) 1982 flags |= CEPH_I_POOL_RD; 1983 if (ret & POOL_WRITE) 1984 flags |= CEPH_I_POOL_WR; 1985 1986 spin_lock(&ci->i_ceph_lock); 1987 if (pool == ci->i_layout.pool_id && 1988 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 1989 ci->i_ceph_flags |= flags; 1990 } else { 1991 pool = ci->i_layout.pool_id; 1992 flags = ci->i_ceph_flags; 1993 } 1994 spin_unlock(&ci->i_ceph_lock); 1995 goto check; 1996 } 1997 1998 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 1999 { 2000 struct ceph_pool_perm *perm; 2001 struct rb_node *n; 2002 2003 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2004 n = rb_first(&mdsc->pool_perm_tree); 2005 perm = rb_entry(n, struct ceph_pool_perm, node); 2006 rb_erase(n, &mdsc->pool_perm_tree); 2007 kfree(perm); 2008 } 2009 } 2010