1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/swap.h> 8 #include <linux/pagemap.h> 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 17 #include "super.h" 18 #include "mds_client.h" 19 #include "cache.h" 20 #include "metric.h" 21 #include <linux/ceph/osd_client.h> 22 #include <linux/ceph/striper.h> 23 24 /* 25 * Ceph address space ops. 26 * 27 * There are a few funny things going on here. 28 * 29 * The page->private field is used to reference a struct 30 * ceph_snap_context for _every_ dirty page. This indicates which 31 * snapshot the page was logically dirtied in, and thus which snap 32 * context needs to be associated with the osd write during writeback. 33 * 34 * Similarly, struct ceph_inode_info maintains a set of counters to 35 * count dirty pages on the inode. In the absence of snapshots, 36 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 37 * 38 * When a snapshot is taken (that is, when the client receives 39 * notification that a snapshot was taken), each inode with caps and 40 * with dirty pages (dirty pages implies there is a cap) gets a new 41 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 42 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 43 * moved to capsnap->dirty. (Unless a sync write is currently in 44 * progress. In that case, the capsnap is said to be "pending", new 45 * writes cannot start, and the capsnap isn't "finalized" until the 46 * write completes (or fails) and a final size/mtime for the inode for 47 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 48 * 49 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 50 * we look for the first capsnap in i_cap_snaps and write out pages in 51 * that snap context _only_. Then we move on to the next capsnap, 52 * eventually reaching the "live" or "head" context (i.e., pages that 53 * are not yet snapped) and are writing the most recently dirtied 54 * pages. 55 * 56 * Invalidate and so forth must take care to ensure the dirty page 57 * accounting is preserved. 58 */ 59 60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 61 #define CONGESTION_OFF_THRESH(congestion_kb) \ 62 (CONGESTION_ON_THRESH(congestion_kb) - \ 63 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 64 65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 66 struct folio *folio, void **_fsdata); 67 68 static inline struct ceph_snap_context *page_snap_context(struct page *page) 69 { 70 if (PagePrivate(page)) 71 return (void *)page->private; 72 return NULL; 73 } 74 75 /* 76 * Dirty a page. Optimistically adjust accounting, on the assumption 77 * that we won't race with invalidate. If we do, readjust. 78 */ 79 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) 80 { 81 struct inode *inode; 82 struct ceph_inode_info *ci; 83 struct ceph_snap_context *snapc; 84 85 if (folio_test_dirty(folio)) { 86 dout("%p dirty_folio %p idx %lu -- already dirty\n", 87 mapping->host, folio, folio->index); 88 BUG_ON(!folio_get_private(folio)); 89 return false; 90 } 91 92 inode = mapping->host; 93 ci = ceph_inode(inode); 94 95 /* dirty the head */ 96 spin_lock(&ci->i_ceph_lock); 97 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 98 if (__ceph_have_pending_cap_snap(ci)) { 99 struct ceph_cap_snap *capsnap = 100 list_last_entry(&ci->i_cap_snaps, 101 struct ceph_cap_snap, 102 ci_item); 103 snapc = ceph_get_snap_context(capsnap->context); 104 capsnap->dirty_pages++; 105 } else { 106 BUG_ON(!ci->i_head_snapc); 107 snapc = ceph_get_snap_context(ci->i_head_snapc); 108 ++ci->i_wrbuffer_ref_head; 109 } 110 if (ci->i_wrbuffer_ref == 0) 111 ihold(inode); 112 ++ci->i_wrbuffer_ref; 113 dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d " 114 "snapc %p seq %lld (%d snaps)\n", 115 mapping->host, folio, folio->index, 116 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 117 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 118 snapc, snapc->seq, snapc->num_snaps); 119 spin_unlock(&ci->i_ceph_lock); 120 121 /* 122 * Reference snap context in folio->private. Also set 123 * PagePrivate so that we get invalidate_folio callback. 124 */ 125 BUG_ON(folio_get_private(folio)); 126 folio_attach_private(folio, snapc); 127 128 return ceph_fscache_dirty_folio(mapping, folio); 129 } 130 131 /* 132 * If we are truncating the full folio (i.e. offset == 0), adjust the 133 * dirty folio counters appropriately. Only called if there is private 134 * data on the folio. 135 */ 136 static void ceph_invalidate_folio(struct folio *folio, size_t offset, 137 size_t length) 138 { 139 struct inode *inode; 140 struct ceph_inode_info *ci; 141 struct ceph_snap_context *snapc; 142 143 inode = folio->mapping->host; 144 ci = ceph_inode(inode); 145 146 if (offset != 0 || length != folio_size(folio)) { 147 dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n", 148 inode, folio->index, offset, length); 149 return; 150 } 151 152 WARN_ON(!folio_test_locked(folio)); 153 if (folio_get_private(folio)) { 154 dout("%p invalidate_folio idx %lu full dirty page\n", 155 inode, folio->index); 156 157 snapc = folio_detach_private(folio); 158 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 159 ceph_put_snap_context(snapc); 160 } 161 162 folio_wait_fscache(folio); 163 } 164 165 static int ceph_releasepage(struct page *page, gfp_t gfp) 166 { 167 struct inode *inode = page->mapping->host; 168 169 dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n", 170 ceph_vinop(inode), page, 171 page->index, PageDirty(page) ? "" : "not "); 172 173 if (PagePrivate(page)) 174 return 0; 175 176 if (PageFsCache(page)) { 177 if (current_is_kswapd() || !(gfp & __GFP_FS)) 178 return 0; 179 wait_on_page_fscache(page); 180 } 181 ceph_fscache_note_page_release(inode); 182 return 1; 183 } 184 185 static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) 186 { 187 struct inode *inode = rreq->mapping->host; 188 struct ceph_inode_info *ci = ceph_inode(inode); 189 struct ceph_file_layout *lo = &ci->i_layout; 190 u32 blockoff; 191 u64 blockno; 192 193 /* Expand the start downward */ 194 blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 195 rreq->start = blockno * lo->stripe_unit; 196 rreq->len += blockoff; 197 198 /* Now, round up the length to the next block */ 199 rreq->len = roundup(rreq->len, lo->stripe_unit); 200 } 201 202 static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) 203 { 204 struct inode *inode = subreq->rreq->mapping->host; 205 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 206 struct ceph_inode_info *ci = ceph_inode(inode); 207 u64 objno, objoff; 208 u32 xlen; 209 210 /* Truncate the extent at the end of the current block */ 211 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 212 &objno, &objoff, &xlen); 213 subreq->len = min(xlen, fsc->mount_options->rsize); 214 return true; 215 } 216 217 static void finish_netfs_read(struct ceph_osd_request *req) 218 { 219 struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 220 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 221 struct netfs_read_subrequest *subreq = req->r_priv; 222 int num_pages; 223 int err = req->r_result; 224 225 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 226 req->r_end_latency, osd_data->length, err); 227 228 dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 229 subreq->len, i_size_read(req->r_inode)); 230 231 /* no object means success but no data */ 232 if (err == -ENOENT) 233 err = 0; 234 else if (err == -EBLOCKLISTED) 235 fsc->blocklisted = true; 236 237 if (err >= 0 && err < subreq->len) 238 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 239 240 netfs_subreq_terminated(subreq, err, true); 241 242 num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 243 ceph_put_page_vector(osd_data->pages, num_pages, false); 244 iput(req->r_inode); 245 } 246 247 static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) 248 { 249 struct netfs_read_request *rreq = subreq->rreq; 250 struct inode *inode = rreq->mapping->host; 251 struct ceph_inode_info *ci = ceph_inode(inode); 252 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 253 struct ceph_osd_request *req; 254 struct ceph_vino vino = ceph_vino(inode); 255 struct iov_iter iter; 256 struct page **pages; 257 size_t page_off; 258 int err = 0; 259 u64 len = subreq->len; 260 261 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 262 0, 1, CEPH_OSD_OP_READ, 263 CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 264 NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 265 if (IS_ERR(req)) { 266 err = PTR_ERR(req); 267 req = NULL; 268 goto out; 269 } 270 271 dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 272 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 273 err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); 274 if (err < 0) { 275 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 276 goto out; 277 } 278 279 /* should always give us a page-aligned read */ 280 WARN_ON_ONCE(page_off); 281 len = err; 282 283 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 284 req->r_callback = finish_netfs_read; 285 req->r_priv = subreq; 286 req->r_inode = inode; 287 ihold(inode); 288 289 err = ceph_osdc_start_request(req->r_osdc, req, false); 290 if (err) 291 iput(inode); 292 out: 293 ceph_osdc_put_request(req); 294 if (err) 295 netfs_subreq_terminated(subreq, err, false); 296 dout("%s: result %d\n", __func__, err); 297 } 298 299 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) 300 { 301 struct inode *inode = mapping->host; 302 struct ceph_inode_info *ci = ceph_inode(inode); 303 int got = (uintptr_t)priv; 304 305 if (got) 306 ceph_put_cap_refs(ci, got); 307 } 308 309 static const struct netfs_read_request_ops ceph_netfs_read_ops = { 310 .is_cache_enabled = ceph_is_cache_enabled, 311 .begin_cache_operation = ceph_begin_cache_operation, 312 .issue_op = ceph_netfs_issue_op, 313 .expand_readahead = ceph_netfs_expand_readahead, 314 .clamp_length = ceph_netfs_clamp_length, 315 .check_write_begin = ceph_netfs_check_write_begin, 316 .cleanup = ceph_readahead_cleanup, 317 }; 318 319 /* read a single page, without unlocking it. */ 320 static int ceph_readpage(struct file *file, struct page *subpage) 321 { 322 struct folio *folio = page_folio(subpage); 323 struct inode *inode = file_inode(file); 324 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_vino vino = ceph_vino(inode); 326 size_t len = folio_size(folio); 327 u64 off = folio_file_pos(folio); 328 329 if (ci->i_inline_version != CEPH_INLINE_NONE) { 330 /* 331 * Uptodate inline data should have been added 332 * into page cache while getting Fcr caps. 333 */ 334 if (off == 0) { 335 folio_unlock(folio); 336 return -EINVAL; 337 } 338 zero_user_segment(&folio->page, 0, folio_size(folio)); 339 folio_mark_uptodate(folio); 340 folio_unlock(folio); 341 return 0; 342 } 343 344 dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n", 345 vino.ino, vino.snap, file, off, len, folio, folio_index(folio)); 346 347 return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); 348 } 349 350 static void ceph_readahead(struct readahead_control *ractl) 351 { 352 struct inode *inode = file_inode(ractl->file); 353 struct ceph_file_info *fi = ractl->file->private_data; 354 struct ceph_rw_context *rw_ctx; 355 int got = 0; 356 int ret = 0; 357 358 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 359 return; 360 361 rw_ctx = ceph_find_rw_context(fi); 362 if (!rw_ctx) { 363 /* 364 * readahead callers do not necessarily hold Fcb caps 365 * (e.g. fadvise, madvise). 366 */ 367 int want = CEPH_CAP_FILE_CACHE; 368 369 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 370 if (ret < 0) 371 dout("start_read %p, error getting cap\n", inode); 372 else if (!(got & want)) 373 dout("start_read %p, no cache cap\n", inode); 374 375 if (ret <= 0) 376 return; 377 } 378 netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); 379 } 380 381 #ifdef CONFIG_CEPH_FSCACHE 382 static void ceph_set_page_fscache(struct page *page) 383 { 384 set_page_fscache(page); 385 } 386 387 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 388 { 389 struct inode *inode = priv; 390 391 if (IS_ERR_VALUE(error) && error != -ENOBUFS) 392 ceph_fscache_invalidate(inode, false); 393 } 394 395 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 396 { 397 struct ceph_inode_info *ci = ceph_inode(inode); 398 struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 399 400 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 401 ceph_fscache_write_terminated, inode, caching); 402 } 403 #else 404 static inline void ceph_set_page_fscache(struct page *page) 405 { 406 } 407 408 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 409 { 410 } 411 #endif /* CONFIG_CEPH_FSCACHE */ 412 413 struct ceph_writeback_ctl 414 { 415 loff_t i_size; 416 u64 truncate_size; 417 u32 truncate_seq; 418 bool size_stable; 419 bool head_snapc; 420 }; 421 422 /* 423 * Get ref for the oldest snapc for an inode with dirty data... that is, the 424 * only snap context we are allowed to write back. 425 */ 426 static struct ceph_snap_context * 427 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 428 struct ceph_snap_context *page_snapc) 429 { 430 struct ceph_inode_info *ci = ceph_inode(inode); 431 struct ceph_snap_context *snapc = NULL; 432 struct ceph_cap_snap *capsnap = NULL; 433 434 spin_lock(&ci->i_ceph_lock); 435 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 436 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 437 capsnap->context, capsnap->dirty_pages); 438 if (!capsnap->dirty_pages) 439 continue; 440 441 /* get i_size, truncate_{seq,size} for page_snapc? */ 442 if (snapc && capsnap->context != page_snapc) 443 continue; 444 445 if (ctl) { 446 if (capsnap->writing) { 447 ctl->i_size = i_size_read(inode); 448 ctl->size_stable = false; 449 } else { 450 ctl->i_size = capsnap->size; 451 ctl->size_stable = true; 452 } 453 ctl->truncate_size = capsnap->truncate_size; 454 ctl->truncate_seq = capsnap->truncate_seq; 455 ctl->head_snapc = false; 456 } 457 458 if (snapc) 459 break; 460 461 snapc = ceph_get_snap_context(capsnap->context); 462 if (!page_snapc || 463 page_snapc == snapc || 464 page_snapc->seq > snapc->seq) 465 break; 466 } 467 if (!snapc && ci->i_wrbuffer_ref_head) { 468 snapc = ceph_get_snap_context(ci->i_head_snapc); 469 dout(" head snapc %p has %d dirty pages\n", 470 snapc, ci->i_wrbuffer_ref_head); 471 if (ctl) { 472 ctl->i_size = i_size_read(inode); 473 ctl->truncate_size = ci->i_truncate_size; 474 ctl->truncate_seq = ci->i_truncate_seq; 475 ctl->size_stable = false; 476 ctl->head_snapc = true; 477 } 478 } 479 spin_unlock(&ci->i_ceph_lock); 480 return snapc; 481 } 482 483 static u64 get_writepages_data_length(struct inode *inode, 484 struct page *page, u64 start) 485 { 486 struct ceph_inode_info *ci = ceph_inode(inode); 487 struct ceph_snap_context *snapc = page_snap_context(page); 488 struct ceph_cap_snap *capsnap = NULL; 489 u64 end = i_size_read(inode); 490 491 if (snapc != ci->i_head_snapc) { 492 bool found = false; 493 spin_lock(&ci->i_ceph_lock); 494 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 495 if (capsnap->context == snapc) { 496 if (!capsnap->writing) 497 end = capsnap->size; 498 found = true; 499 break; 500 } 501 } 502 spin_unlock(&ci->i_ceph_lock); 503 WARN_ON(!found); 504 } 505 if (end > page_offset(page) + thp_size(page)) 506 end = page_offset(page) + thp_size(page); 507 return end > start ? end - start : 0; 508 } 509 510 /* 511 * Write a single page, but leave the page locked. 512 * 513 * If we get a write error, mark the mapping for error, but still adjust the 514 * dirty page accounting (i.e., page is no longer dirty). 515 */ 516 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 517 { 518 struct folio *folio = page_folio(page); 519 struct inode *inode = page->mapping->host; 520 struct ceph_inode_info *ci = ceph_inode(inode); 521 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 522 struct ceph_snap_context *snapc, *oldest; 523 loff_t page_off = page_offset(page); 524 int err; 525 loff_t len = thp_size(page); 526 struct ceph_writeback_ctl ceph_wbc; 527 struct ceph_osd_client *osdc = &fsc->client->osdc; 528 struct ceph_osd_request *req; 529 bool caching = ceph_is_cache_enabled(inode); 530 531 dout("writepage %p idx %lu\n", page, page->index); 532 533 /* verify this is a writeable snap context */ 534 snapc = page_snap_context(page); 535 if (!snapc) { 536 dout("writepage %p page %p not dirty?\n", inode, page); 537 return 0; 538 } 539 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 540 if (snapc->seq > oldest->seq) { 541 dout("writepage %p page %p snapc %p not writeable - noop\n", 542 inode, page, snapc); 543 /* we should only noop if called by kswapd */ 544 WARN_ON(!(current->flags & PF_MEMALLOC)); 545 ceph_put_snap_context(oldest); 546 redirty_page_for_writepage(wbc, page); 547 return 0; 548 } 549 ceph_put_snap_context(oldest); 550 551 /* is this a partial page at end of file? */ 552 if (page_off >= ceph_wbc.i_size) { 553 dout("folio at %lu beyond eof %llu\n", folio->index, 554 ceph_wbc.i_size); 555 folio_invalidate(folio, 0, folio_size(folio)); 556 return 0; 557 } 558 559 if (ceph_wbc.i_size < page_off + len) 560 len = ceph_wbc.i_size - page_off; 561 562 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 563 inode, page, page->index, page_off, len, snapc, snapc->seq); 564 565 if (atomic_long_inc_return(&fsc->writeback_count) > 566 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 567 fsc->write_congested = true; 568 569 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 570 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 571 ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 572 true); 573 if (IS_ERR(req)) 574 return PTR_ERR(req); 575 576 set_page_writeback(page); 577 if (caching) 578 ceph_set_page_fscache(page); 579 ceph_fscache_write_to_cache(inode, page_off, len, caching); 580 581 /* it may be a short write due to an object boundary */ 582 WARN_ON_ONCE(len > thp_size(page)); 583 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 584 dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 585 586 req->r_mtime = inode->i_mtime; 587 err = ceph_osdc_start_request(osdc, req, true); 588 if (!err) 589 err = ceph_osdc_wait_request(osdc, req); 590 591 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 592 req->r_end_latency, len, err); 593 594 ceph_osdc_put_request(req); 595 if (err == 0) 596 err = len; 597 598 if (err < 0) { 599 struct writeback_control tmp_wbc; 600 if (!wbc) 601 wbc = &tmp_wbc; 602 if (err == -ERESTARTSYS) { 603 /* killed by SIGKILL */ 604 dout("writepage interrupted page %p\n", page); 605 redirty_page_for_writepage(wbc, page); 606 end_page_writeback(page); 607 return err; 608 } 609 if (err == -EBLOCKLISTED) 610 fsc->blocklisted = true; 611 dout("writepage setting page/mapping error %d %p\n", 612 err, page); 613 mapping_set_error(&inode->i_data, err); 614 wbc->pages_skipped++; 615 } else { 616 dout("writepage cleaned page %p\n", page); 617 err = 0; /* vfs expects us to return 0 */ 618 } 619 oldest = detach_page_private(page); 620 WARN_ON_ONCE(oldest != snapc); 621 end_page_writeback(page); 622 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 623 ceph_put_snap_context(snapc); /* page's reference */ 624 625 if (atomic_long_dec_return(&fsc->writeback_count) < 626 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 627 fsc->write_congested = false; 628 629 return err; 630 } 631 632 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 633 { 634 int err; 635 struct inode *inode = page->mapping->host; 636 BUG_ON(!inode); 637 ihold(inode); 638 639 if (wbc->sync_mode == WB_SYNC_NONE && 640 ceph_inode_to_client(inode)->write_congested) 641 return AOP_WRITEPAGE_ACTIVATE; 642 643 wait_on_page_fscache(page); 644 645 err = writepage_nounlock(page, wbc); 646 if (err == -ERESTARTSYS) { 647 /* direct memory reclaimer was killed by SIGKILL. return 0 648 * to prevent caller from setting mapping/page error */ 649 err = 0; 650 } 651 unlock_page(page); 652 iput(inode); 653 return err; 654 } 655 656 /* 657 * async writeback completion handler. 658 * 659 * If we get an error, set the mapping error bit, but not the individual 660 * page error bits. 661 */ 662 static void writepages_finish(struct ceph_osd_request *req) 663 { 664 struct inode *inode = req->r_inode; 665 struct ceph_inode_info *ci = ceph_inode(inode); 666 struct ceph_osd_data *osd_data; 667 struct page *page; 668 int num_pages, total_pages = 0; 669 int i, j; 670 int rc = req->r_result; 671 struct ceph_snap_context *snapc = req->r_snapc; 672 struct address_space *mapping = inode->i_mapping; 673 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 674 unsigned int len = 0; 675 bool remove_page; 676 677 dout("writepages_finish %p rc %d\n", inode, rc); 678 if (rc < 0) { 679 mapping_set_error(mapping, rc); 680 ceph_set_error_write(ci); 681 if (rc == -EBLOCKLISTED) 682 fsc->blocklisted = true; 683 } else { 684 ceph_clear_error_write(ci); 685 } 686 687 /* 688 * We lost the cache cap, need to truncate the page before 689 * it is unlocked, otherwise we'd truncate it later in the 690 * page truncation thread, possibly losing some data that 691 * raced its way in 692 */ 693 remove_page = !(ceph_caps_issued(ci) & 694 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 695 696 /* clean all pages */ 697 for (i = 0; i < req->r_num_ops; i++) { 698 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 699 break; 700 701 osd_data = osd_req_op_extent_osd_data(req, i); 702 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 703 len += osd_data->length; 704 num_pages = calc_pages_for((u64)osd_data->alignment, 705 (u64)osd_data->length); 706 total_pages += num_pages; 707 for (j = 0; j < num_pages; j++) { 708 page = osd_data->pages[j]; 709 BUG_ON(!page); 710 WARN_ON(!PageUptodate(page)); 711 712 if (atomic_long_dec_return(&fsc->writeback_count) < 713 CONGESTION_OFF_THRESH( 714 fsc->mount_options->congestion_kb)) 715 fsc->write_congested = false; 716 717 ceph_put_snap_context(detach_page_private(page)); 718 end_page_writeback(page); 719 dout("unlocking %p\n", page); 720 721 if (remove_page) 722 generic_error_remove_page(inode->i_mapping, 723 page); 724 725 unlock_page(page); 726 } 727 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 728 inode, osd_data->length, rc >= 0 ? num_pages : 0); 729 730 release_pages(osd_data->pages, num_pages); 731 } 732 733 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 734 req->r_end_latency, len, rc); 735 736 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 737 738 osd_data = osd_req_op_extent_osd_data(req, 0); 739 if (osd_data->pages_from_pool) 740 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 741 else 742 kfree(osd_data->pages); 743 ceph_osdc_put_request(req); 744 } 745 746 /* 747 * initiate async writeback 748 */ 749 static int ceph_writepages_start(struct address_space *mapping, 750 struct writeback_control *wbc) 751 { 752 struct inode *inode = mapping->host; 753 struct ceph_inode_info *ci = ceph_inode(inode); 754 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 755 struct ceph_vino vino = ceph_vino(inode); 756 pgoff_t index, start_index, end = -1; 757 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 758 struct pagevec pvec; 759 int rc = 0; 760 unsigned int wsize = i_blocksize(inode); 761 struct ceph_osd_request *req = NULL; 762 struct ceph_writeback_ctl ceph_wbc; 763 bool should_loop, range_whole = false; 764 bool done = false; 765 bool caching = ceph_is_cache_enabled(inode); 766 767 if (wbc->sync_mode == WB_SYNC_NONE && 768 fsc->write_congested) 769 return 0; 770 771 dout("writepages_start %p (mode=%s)\n", inode, 772 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 773 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 774 775 if (ceph_inode_is_shutdown(inode)) { 776 if (ci->i_wrbuffer_ref > 0) { 777 pr_warn_ratelimited( 778 "writepage_start %p %lld forced umount\n", 779 inode, ceph_ino(inode)); 780 } 781 mapping_set_error(mapping, -EIO); 782 return -EIO; /* we're in a forced umount, don't write! */ 783 } 784 if (fsc->mount_options->wsize < wsize) 785 wsize = fsc->mount_options->wsize; 786 787 pagevec_init(&pvec); 788 789 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 790 index = start_index; 791 792 retry: 793 /* find oldest snap context with dirty data */ 794 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 795 if (!snapc) { 796 /* hmm, why does writepages get called when there 797 is no dirty data? */ 798 dout(" no snap context with dirty data?\n"); 799 goto out; 800 } 801 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 802 snapc, snapc->seq, snapc->num_snaps); 803 804 should_loop = false; 805 if (ceph_wbc.head_snapc && snapc != last_snapc) { 806 /* where to start/end? */ 807 if (wbc->range_cyclic) { 808 index = start_index; 809 end = -1; 810 if (index > 0) 811 should_loop = true; 812 dout(" cyclic, start at %lu\n", index); 813 } else { 814 index = wbc->range_start >> PAGE_SHIFT; 815 end = wbc->range_end >> PAGE_SHIFT; 816 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 817 range_whole = true; 818 dout(" not cyclic, %lu to %lu\n", index, end); 819 } 820 } else if (!ceph_wbc.head_snapc) { 821 /* Do not respect wbc->range_{start,end}. Dirty pages 822 * in that range can be associated with newer snapc. 823 * They are not writeable until we write all dirty pages 824 * associated with 'snapc' get written */ 825 if (index > 0) 826 should_loop = true; 827 dout(" non-head snapc, range whole\n"); 828 } 829 830 ceph_put_snap_context(last_snapc); 831 last_snapc = snapc; 832 833 while (!done && index <= end) { 834 int num_ops = 0, op_idx; 835 unsigned i, pvec_pages, max_pages, locked_pages = 0; 836 struct page **pages = NULL, **data_pages; 837 struct page *page; 838 pgoff_t strip_unit_end = 0; 839 u64 offset = 0, len = 0; 840 bool from_pool = false; 841 842 max_pages = wsize >> PAGE_SHIFT; 843 844 get_more_pages: 845 pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 846 end, PAGECACHE_TAG_DIRTY); 847 dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 848 if (!pvec_pages && !locked_pages) 849 break; 850 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 851 page = pvec.pages[i]; 852 dout("? %p idx %lu\n", page, page->index); 853 if (locked_pages == 0) 854 lock_page(page); /* first page */ 855 else if (!trylock_page(page)) 856 break; 857 858 /* only dirty pages, or our accounting breaks */ 859 if (unlikely(!PageDirty(page)) || 860 unlikely(page->mapping != mapping)) { 861 dout("!dirty or !mapping %p\n", page); 862 unlock_page(page); 863 continue; 864 } 865 /* only if matching snap context */ 866 pgsnapc = page_snap_context(page); 867 if (pgsnapc != snapc) { 868 dout("page snapc %p %lld != oldest %p %lld\n", 869 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 870 if (!should_loop && 871 !ceph_wbc.head_snapc && 872 wbc->sync_mode != WB_SYNC_NONE) 873 should_loop = true; 874 unlock_page(page); 875 continue; 876 } 877 if (page_offset(page) >= ceph_wbc.i_size) { 878 struct folio *folio = page_folio(page); 879 880 dout("folio at %lu beyond eof %llu\n", 881 folio->index, ceph_wbc.i_size); 882 if ((ceph_wbc.size_stable || 883 folio_pos(folio) >= i_size_read(inode)) && 884 folio_clear_dirty_for_io(folio)) 885 folio_invalidate(folio, 0, 886 folio_size(folio)); 887 folio_unlock(folio); 888 continue; 889 } 890 if (strip_unit_end && (page->index > strip_unit_end)) { 891 dout("end of strip unit %p\n", page); 892 unlock_page(page); 893 break; 894 } 895 if (PageWriteback(page) || PageFsCache(page)) { 896 if (wbc->sync_mode == WB_SYNC_NONE) { 897 dout("%p under writeback\n", page); 898 unlock_page(page); 899 continue; 900 } 901 dout("waiting on writeback %p\n", page); 902 wait_on_page_writeback(page); 903 wait_on_page_fscache(page); 904 } 905 906 if (!clear_page_dirty_for_io(page)) { 907 dout("%p !clear_page_dirty_for_io\n", page); 908 unlock_page(page); 909 continue; 910 } 911 912 /* 913 * We have something to write. If this is 914 * the first locked page this time through, 915 * calculate max possinle write size and 916 * allocate a page array 917 */ 918 if (locked_pages == 0) { 919 u64 objnum; 920 u64 objoff; 921 u32 xlen; 922 923 /* prepare async write request */ 924 offset = (u64)page_offset(page); 925 ceph_calc_file_object_mapping(&ci->i_layout, 926 offset, wsize, 927 &objnum, &objoff, 928 &xlen); 929 len = xlen; 930 931 num_ops = 1; 932 strip_unit_end = page->index + 933 ((len - 1) >> PAGE_SHIFT); 934 935 BUG_ON(pages); 936 max_pages = calc_pages_for(0, (u64)len); 937 pages = kmalloc_array(max_pages, 938 sizeof(*pages), 939 GFP_NOFS); 940 if (!pages) { 941 from_pool = true; 942 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 943 BUG_ON(!pages); 944 } 945 946 len = 0; 947 } else if (page->index != 948 (offset + len) >> PAGE_SHIFT) { 949 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 950 CEPH_OSD_MAX_OPS)) { 951 redirty_page_for_writepage(wbc, page); 952 unlock_page(page); 953 break; 954 } 955 956 num_ops++; 957 offset = (u64)page_offset(page); 958 len = 0; 959 } 960 961 /* note position of first page in pvec */ 962 dout("%p will write page %p idx %lu\n", 963 inode, page, page->index); 964 965 if (atomic_long_inc_return(&fsc->writeback_count) > 966 CONGESTION_ON_THRESH( 967 fsc->mount_options->congestion_kb)) 968 fsc->write_congested = true; 969 970 pages[locked_pages++] = page; 971 pvec.pages[i] = NULL; 972 973 len += thp_size(page); 974 } 975 976 /* did we get anything? */ 977 if (!locked_pages) 978 goto release_pvec_pages; 979 if (i) { 980 unsigned j, n = 0; 981 /* shift unused page to beginning of pvec */ 982 for (j = 0; j < pvec_pages; j++) { 983 if (!pvec.pages[j]) 984 continue; 985 if (n < j) 986 pvec.pages[n] = pvec.pages[j]; 987 n++; 988 } 989 pvec.nr = n; 990 991 if (pvec_pages && i == pvec_pages && 992 locked_pages < max_pages) { 993 dout("reached end pvec, trying for more\n"); 994 pagevec_release(&pvec); 995 goto get_more_pages; 996 } 997 } 998 999 new_request: 1000 offset = page_offset(pages[0]); 1001 len = wsize; 1002 1003 req = ceph_osdc_new_request(&fsc->client->osdc, 1004 &ci->i_layout, vino, 1005 offset, &len, 0, num_ops, 1006 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1007 snapc, ceph_wbc.truncate_seq, 1008 ceph_wbc.truncate_size, false); 1009 if (IS_ERR(req)) { 1010 req = ceph_osdc_new_request(&fsc->client->osdc, 1011 &ci->i_layout, vino, 1012 offset, &len, 0, 1013 min(num_ops, 1014 CEPH_OSD_SLAB_OPS), 1015 CEPH_OSD_OP_WRITE, 1016 CEPH_OSD_FLAG_WRITE, 1017 snapc, ceph_wbc.truncate_seq, 1018 ceph_wbc.truncate_size, true); 1019 BUG_ON(IS_ERR(req)); 1020 } 1021 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 1022 thp_size(page) - offset); 1023 1024 req->r_callback = writepages_finish; 1025 req->r_inode = inode; 1026 1027 /* Format the osd request message and submit the write */ 1028 len = 0; 1029 data_pages = pages; 1030 op_idx = 0; 1031 for (i = 0; i < locked_pages; i++) { 1032 u64 cur_offset = page_offset(pages[i]); 1033 /* 1034 * Discontinuity in page range? Ceph can handle that by just passing 1035 * multiple extents in the write op. 1036 */ 1037 if (offset + len != cur_offset) { 1038 /* If it's full, stop here */ 1039 if (op_idx + 1 == req->r_num_ops) 1040 break; 1041 1042 /* Kick off an fscache write with what we have so far. */ 1043 ceph_fscache_write_to_cache(inode, offset, len, caching); 1044 1045 /* Start a new extent */ 1046 osd_req_op_extent_dup_last(req, op_idx, 1047 cur_offset - offset); 1048 dout("writepages got pages at %llu~%llu\n", 1049 offset, len); 1050 osd_req_op_extent_osd_data_pages(req, op_idx, 1051 data_pages, len, 0, 1052 from_pool, false); 1053 osd_req_op_extent_update(req, op_idx, len); 1054 1055 len = 0; 1056 offset = cur_offset; 1057 data_pages = pages + i; 1058 op_idx++; 1059 } 1060 1061 set_page_writeback(pages[i]); 1062 if (caching) 1063 ceph_set_page_fscache(pages[i]); 1064 len += thp_size(page); 1065 } 1066 ceph_fscache_write_to_cache(inode, offset, len, caching); 1067 1068 if (ceph_wbc.size_stable) { 1069 len = min(len, ceph_wbc.i_size - offset); 1070 } else if (i == locked_pages) { 1071 /* writepages_finish() clears writeback pages 1072 * according to the data length, so make sure 1073 * data length covers all locked pages */ 1074 u64 min_len = len + 1 - thp_size(page); 1075 len = get_writepages_data_length(inode, pages[i - 1], 1076 offset); 1077 len = max(len, min_len); 1078 } 1079 dout("writepages got pages at %llu~%llu\n", offset, len); 1080 1081 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1082 0, from_pool, false); 1083 osd_req_op_extent_update(req, op_idx, len); 1084 1085 BUG_ON(op_idx + 1 != req->r_num_ops); 1086 1087 from_pool = false; 1088 if (i < locked_pages) { 1089 BUG_ON(num_ops <= req->r_num_ops); 1090 num_ops -= req->r_num_ops; 1091 locked_pages -= i; 1092 1093 /* allocate new pages array for next request */ 1094 data_pages = pages; 1095 pages = kmalloc_array(locked_pages, sizeof(*pages), 1096 GFP_NOFS); 1097 if (!pages) { 1098 from_pool = true; 1099 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1100 BUG_ON(!pages); 1101 } 1102 memcpy(pages, data_pages + i, 1103 locked_pages * sizeof(*pages)); 1104 memset(data_pages + i, 0, 1105 locked_pages * sizeof(*pages)); 1106 } else { 1107 BUG_ON(num_ops != req->r_num_ops); 1108 index = pages[i - 1]->index + 1; 1109 /* request message now owns the pages array */ 1110 pages = NULL; 1111 } 1112 1113 req->r_mtime = inode->i_mtime; 1114 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1115 BUG_ON(rc); 1116 req = NULL; 1117 1118 wbc->nr_to_write -= i; 1119 if (pages) 1120 goto new_request; 1121 1122 /* 1123 * We stop writing back only if we are not doing 1124 * integrity sync. In case of integrity sync we have to 1125 * keep going until we have written all the pages 1126 * we tagged for writeback prior to entering this loop. 1127 */ 1128 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1129 done = true; 1130 1131 release_pvec_pages: 1132 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1133 pvec.nr ? pvec.pages[0] : NULL); 1134 pagevec_release(&pvec); 1135 } 1136 1137 if (should_loop && !done) { 1138 /* more to do; loop back to beginning of file */ 1139 dout("writepages looping back to beginning of file\n"); 1140 end = start_index - 1; /* OK even when start_index == 0 */ 1141 1142 /* to write dirty pages associated with next snapc, 1143 * we need to wait until current writes complete */ 1144 if (wbc->sync_mode != WB_SYNC_NONE && 1145 start_index == 0 && /* all dirty pages were checked */ 1146 !ceph_wbc.head_snapc) { 1147 struct page *page; 1148 unsigned i, nr; 1149 index = 0; 1150 while ((index <= end) && 1151 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1152 PAGECACHE_TAG_WRITEBACK))) { 1153 for (i = 0; i < nr; i++) { 1154 page = pvec.pages[i]; 1155 if (page_snap_context(page) != snapc) 1156 continue; 1157 wait_on_page_writeback(page); 1158 } 1159 pagevec_release(&pvec); 1160 cond_resched(); 1161 } 1162 } 1163 1164 start_index = 0; 1165 index = 0; 1166 goto retry; 1167 } 1168 1169 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1170 mapping->writeback_index = index; 1171 1172 out: 1173 ceph_osdc_put_request(req); 1174 ceph_put_snap_context(last_snapc); 1175 dout("writepages dend - startone, rc = %d\n", rc); 1176 return rc; 1177 } 1178 1179 1180 1181 /* 1182 * See if a given @snapc is either writeable, or already written. 1183 */ 1184 static int context_is_writeable_or_written(struct inode *inode, 1185 struct ceph_snap_context *snapc) 1186 { 1187 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1188 int ret = !oldest || snapc->seq <= oldest->seq; 1189 1190 ceph_put_snap_context(oldest); 1191 return ret; 1192 } 1193 1194 /** 1195 * ceph_find_incompatible - find an incompatible context and return it 1196 * @page: page being dirtied 1197 * 1198 * We are only allowed to write into/dirty a page if the page is 1199 * clean, or already dirty within the same snap context. Returns a 1200 * conflicting context if there is one, NULL if there isn't, or a 1201 * negative error code on other errors. 1202 * 1203 * Must be called with page lock held. 1204 */ 1205 static struct ceph_snap_context * 1206 ceph_find_incompatible(struct page *page) 1207 { 1208 struct inode *inode = page->mapping->host; 1209 struct ceph_inode_info *ci = ceph_inode(inode); 1210 1211 if (ceph_inode_is_shutdown(inode)) { 1212 dout(" page %p %llx:%llx is shutdown\n", page, 1213 ceph_vinop(inode)); 1214 return ERR_PTR(-ESTALE); 1215 } 1216 1217 for (;;) { 1218 struct ceph_snap_context *snapc, *oldest; 1219 1220 wait_on_page_writeback(page); 1221 1222 snapc = page_snap_context(page); 1223 if (!snapc || snapc == ci->i_head_snapc) 1224 break; 1225 1226 /* 1227 * this page is already dirty in another (older) snap 1228 * context! is it writeable now? 1229 */ 1230 oldest = get_oldest_context(inode, NULL, NULL); 1231 if (snapc->seq > oldest->seq) { 1232 /* not writeable -- return it for the caller to deal with */ 1233 ceph_put_snap_context(oldest); 1234 dout(" page %p snapc %p not current or oldest\n", page, snapc); 1235 return ceph_get_snap_context(snapc); 1236 } 1237 ceph_put_snap_context(oldest); 1238 1239 /* yay, writeable, do it now (without dropping page lock) */ 1240 dout(" page %p snapc %p not current, but oldest\n", page, snapc); 1241 if (clear_page_dirty_for_io(page)) { 1242 int r = writepage_nounlock(page, NULL); 1243 if (r < 0) 1244 return ERR_PTR(r); 1245 } 1246 } 1247 return NULL; 1248 } 1249 1250 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1251 struct folio *folio, void **_fsdata) 1252 { 1253 struct inode *inode = file_inode(file); 1254 struct ceph_inode_info *ci = ceph_inode(inode); 1255 struct ceph_snap_context *snapc; 1256 1257 snapc = ceph_find_incompatible(folio_page(folio, 0)); 1258 if (snapc) { 1259 int r; 1260 1261 folio_unlock(folio); 1262 folio_put(folio); 1263 if (IS_ERR(snapc)) 1264 return PTR_ERR(snapc); 1265 1266 ceph_queue_writeback(inode); 1267 r = wait_event_killable(ci->i_cap_wq, 1268 context_is_writeable_or_written(inode, snapc)); 1269 ceph_put_snap_context(snapc); 1270 return r == 0 ? -EAGAIN : r; 1271 } 1272 return 0; 1273 } 1274 1275 /* 1276 * We are only allowed to write into/dirty the page if the page is 1277 * clean, or already dirty within the same snap context. 1278 */ 1279 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1280 loff_t pos, unsigned len, unsigned aop_flags, 1281 struct page **pagep, void **fsdata) 1282 { 1283 struct inode *inode = file_inode(file); 1284 struct ceph_inode_info *ci = ceph_inode(inode); 1285 struct folio *folio = NULL; 1286 pgoff_t index = pos >> PAGE_SHIFT; 1287 int r; 1288 1289 /* 1290 * Uninlining should have already been done and everything updated, EXCEPT 1291 * for inline_version sent to the MDS. 1292 */ 1293 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1294 unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 1295 if (aop_flags & AOP_FLAG_NOFS) 1296 fgp_flags |= FGP_NOFS; 1297 folio = __filemap_get_folio(mapping, index, fgp_flags, 1298 mapping_gfp_mask(mapping)); 1299 if (!folio) 1300 return -ENOMEM; 1301 1302 /* 1303 * The inline_version on a new inode is set to 1. If that's the 1304 * case, then the folio is brand new and isn't yet Uptodate. 1305 */ 1306 r = 0; 1307 if (index == 0 && ci->i_inline_version != 1) { 1308 if (!folio_test_uptodate(folio)) { 1309 WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", 1310 ci->i_inline_version); 1311 r = -EINVAL; 1312 } 1313 goto out; 1314 } 1315 zero_user_segment(&folio->page, 0, folio_size(folio)); 1316 folio_mark_uptodate(folio); 1317 goto out; 1318 } 1319 1320 r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, 1321 &ceph_netfs_read_ops, NULL); 1322 out: 1323 if (r == 0) 1324 folio_wait_fscache(folio); 1325 if (r < 0) { 1326 if (folio) 1327 folio_put(folio); 1328 } else { 1329 WARN_ON_ONCE(!folio_test_locked(folio)); 1330 *pagep = &folio->page; 1331 } 1332 return r; 1333 } 1334 1335 /* 1336 * we don't do anything in here that simple_write_end doesn't do 1337 * except adjust dirty page accounting 1338 */ 1339 static int ceph_write_end(struct file *file, struct address_space *mapping, 1340 loff_t pos, unsigned len, unsigned copied, 1341 struct page *subpage, void *fsdata) 1342 { 1343 struct folio *folio = page_folio(subpage); 1344 struct inode *inode = file_inode(file); 1345 bool check_cap = false; 1346 1347 dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 1348 inode, folio, (int)pos, (int)copied, (int)len); 1349 1350 if (!folio_test_uptodate(folio)) { 1351 /* just return that nothing was copied on a short copy */ 1352 if (copied < len) { 1353 copied = 0; 1354 goto out; 1355 } 1356 folio_mark_uptodate(folio); 1357 } 1358 1359 /* did file size increase? */ 1360 if (pos+copied > i_size_read(inode)) 1361 check_cap = ceph_inode_set_size(inode, pos+copied); 1362 1363 folio_mark_dirty(folio); 1364 1365 out: 1366 folio_unlock(folio); 1367 folio_put(folio); 1368 1369 if (check_cap) 1370 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1371 1372 return copied; 1373 } 1374 1375 const struct address_space_operations ceph_aops = { 1376 .readpage = ceph_readpage, 1377 .readahead = ceph_readahead, 1378 .writepage = ceph_writepage, 1379 .writepages = ceph_writepages_start, 1380 .write_begin = ceph_write_begin, 1381 .write_end = ceph_write_end, 1382 .dirty_folio = ceph_dirty_folio, 1383 .invalidate_folio = ceph_invalidate_folio, 1384 .releasepage = ceph_releasepage, 1385 .direct_IO = noop_direct_IO, 1386 }; 1387 1388 static void ceph_block_sigs(sigset_t *oldset) 1389 { 1390 sigset_t mask; 1391 siginitsetinv(&mask, sigmask(SIGKILL)); 1392 sigprocmask(SIG_BLOCK, &mask, oldset); 1393 } 1394 1395 static void ceph_restore_sigs(sigset_t *oldset) 1396 { 1397 sigprocmask(SIG_SETMASK, oldset, NULL); 1398 } 1399 1400 /* 1401 * vm ops 1402 */ 1403 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1404 { 1405 struct vm_area_struct *vma = vmf->vma; 1406 struct inode *inode = file_inode(vma->vm_file); 1407 struct ceph_inode_info *ci = ceph_inode(inode); 1408 struct ceph_file_info *fi = vma->vm_file->private_data; 1409 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1410 int want, got, err; 1411 sigset_t oldset; 1412 vm_fault_t ret = VM_FAULT_SIGBUS; 1413 1414 if (ceph_inode_is_shutdown(inode)) 1415 return ret; 1416 1417 ceph_block_sigs(&oldset); 1418 1419 dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 1420 inode, ceph_vinop(inode), off); 1421 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1422 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1423 else 1424 want = CEPH_CAP_FILE_CACHE; 1425 1426 got = 0; 1427 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1428 if (err < 0) 1429 goto out_restore; 1430 1431 dout("filemap_fault %p %llu got cap refs on %s\n", 1432 inode, off, ceph_cap_string(got)); 1433 1434 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1435 ci->i_inline_version == CEPH_INLINE_NONE) { 1436 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1437 ceph_add_rw_context(fi, &rw_ctx); 1438 ret = filemap_fault(vmf); 1439 ceph_del_rw_context(fi, &rw_ctx); 1440 dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 1441 inode, off, ceph_cap_string(got), ret); 1442 } else 1443 err = -EAGAIN; 1444 1445 ceph_put_cap_refs(ci, got); 1446 1447 if (err != -EAGAIN) 1448 goto out_restore; 1449 1450 /* read inline data */ 1451 if (off >= PAGE_SIZE) { 1452 /* does not support inline data > PAGE_SIZE */ 1453 ret = VM_FAULT_SIGBUS; 1454 } else { 1455 struct address_space *mapping = inode->i_mapping; 1456 struct page *page; 1457 1458 filemap_invalidate_lock_shared(mapping); 1459 page = find_or_create_page(mapping, 0, 1460 mapping_gfp_constraint(mapping, ~__GFP_FS)); 1461 if (!page) { 1462 ret = VM_FAULT_OOM; 1463 goto out_inline; 1464 } 1465 err = __ceph_do_getattr(inode, page, 1466 CEPH_STAT_CAP_INLINE_DATA, true); 1467 if (err < 0 || off >= i_size_read(inode)) { 1468 unlock_page(page); 1469 put_page(page); 1470 ret = vmf_error(err); 1471 goto out_inline; 1472 } 1473 if (err < PAGE_SIZE) 1474 zero_user_segment(page, err, PAGE_SIZE); 1475 else 1476 flush_dcache_page(page); 1477 SetPageUptodate(page); 1478 vmf->page = page; 1479 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1480 out_inline: 1481 filemap_invalidate_unlock_shared(mapping); 1482 dout("filemap_fault %p %llu read inline data ret %x\n", 1483 inode, off, ret); 1484 } 1485 out_restore: 1486 ceph_restore_sigs(&oldset); 1487 if (err < 0) 1488 ret = vmf_error(err); 1489 1490 return ret; 1491 } 1492 1493 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1494 { 1495 struct vm_area_struct *vma = vmf->vma; 1496 struct inode *inode = file_inode(vma->vm_file); 1497 struct ceph_inode_info *ci = ceph_inode(inode); 1498 struct ceph_file_info *fi = vma->vm_file->private_data; 1499 struct ceph_cap_flush *prealloc_cf; 1500 struct page *page = vmf->page; 1501 loff_t off = page_offset(page); 1502 loff_t size = i_size_read(inode); 1503 size_t len; 1504 int want, got, err; 1505 sigset_t oldset; 1506 vm_fault_t ret = VM_FAULT_SIGBUS; 1507 1508 if (ceph_inode_is_shutdown(inode)) 1509 return ret; 1510 1511 prealloc_cf = ceph_alloc_cap_flush(); 1512 if (!prealloc_cf) 1513 return VM_FAULT_OOM; 1514 1515 sb_start_pagefault(inode->i_sb); 1516 ceph_block_sigs(&oldset); 1517 1518 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1519 struct page *locked_page = NULL; 1520 if (off == 0) { 1521 lock_page(page); 1522 locked_page = page; 1523 } 1524 err = ceph_uninline_data(vma->vm_file, locked_page); 1525 if (locked_page) 1526 unlock_page(locked_page); 1527 if (err < 0) 1528 goto out_free; 1529 } 1530 1531 if (off + thp_size(page) <= size) 1532 len = thp_size(page); 1533 else 1534 len = offset_in_thp(page, size); 1535 1536 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1537 inode, ceph_vinop(inode), off, len, size); 1538 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1539 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1540 else 1541 want = CEPH_CAP_FILE_BUFFER; 1542 1543 got = 0; 1544 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 1545 if (err < 0) 1546 goto out_free; 1547 1548 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1549 inode, off, len, ceph_cap_string(got)); 1550 1551 /* Update time before taking page lock */ 1552 file_update_time(vma->vm_file); 1553 inode_inc_iversion_raw(inode); 1554 1555 do { 1556 struct ceph_snap_context *snapc; 1557 1558 lock_page(page); 1559 1560 if (page_mkwrite_check_truncate(page, inode) < 0) { 1561 unlock_page(page); 1562 ret = VM_FAULT_NOPAGE; 1563 break; 1564 } 1565 1566 snapc = ceph_find_incompatible(page); 1567 if (!snapc) { 1568 /* success. we'll keep the page locked. */ 1569 set_page_dirty(page); 1570 ret = VM_FAULT_LOCKED; 1571 break; 1572 } 1573 1574 unlock_page(page); 1575 1576 if (IS_ERR(snapc)) { 1577 ret = VM_FAULT_SIGBUS; 1578 break; 1579 } 1580 1581 ceph_queue_writeback(inode); 1582 err = wait_event_killable(ci->i_cap_wq, 1583 context_is_writeable_or_written(inode, snapc)); 1584 ceph_put_snap_context(snapc); 1585 } while (err == 0); 1586 1587 if (ret == VM_FAULT_LOCKED || 1588 ci->i_inline_version != CEPH_INLINE_NONE) { 1589 int dirty; 1590 spin_lock(&ci->i_ceph_lock); 1591 ci->i_inline_version = CEPH_INLINE_NONE; 1592 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1593 &prealloc_cf); 1594 spin_unlock(&ci->i_ceph_lock); 1595 if (dirty) 1596 __mark_inode_dirty(inode, dirty); 1597 } 1598 1599 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1600 inode, off, len, ceph_cap_string(got), ret); 1601 ceph_put_cap_refs_async(ci, got); 1602 out_free: 1603 ceph_restore_sigs(&oldset); 1604 sb_end_pagefault(inode->i_sb); 1605 ceph_free_cap_flush(prealloc_cf); 1606 if (err < 0) 1607 ret = vmf_error(err); 1608 return ret; 1609 } 1610 1611 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1612 char *data, size_t len) 1613 { 1614 struct address_space *mapping = inode->i_mapping; 1615 struct page *page; 1616 1617 if (locked_page) { 1618 page = locked_page; 1619 } else { 1620 if (i_size_read(inode) == 0) 1621 return; 1622 page = find_or_create_page(mapping, 0, 1623 mapping_gfp_constraint(mapping, 1624 ~__GFP_FS)); 1625 if (!page) 1626 return; 1627 if (PageUptodate(page)) { 1628 unlock_page(page); 1629 put_page(page); 1630 return; 1631 } 1632 } 1633 1634 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1635 inode, ceph_vinop(inode), len, locked_page); 1636 1637 if (len > 0) { 1638 void *kaddr = kmap_atomic(page); 1639 memcpy(kaddr, data, len); 1640 kunmap_atomic(kaddr); 1641 } 1642 1643 if (page != locked_page) { 1644 if (len < PAGE_SIZE) 1645 zero_user_segment(page, len, PAGE_SIZE); 1646 else 1647 flush_dcache_page(page); 1648 1649 SetPageUptodate(page); 1650 unlock_page(page); 1651 put_page(page); 1652 } 1653 } 1654 1655 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1656 { 1657 struct inode *inode = file_inode(filp); 1658 struct ceph_inode_info *ci = ceph_inode(inode); 1659 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1660 struct ceph_osd_request *req; 1661 struct page *page = NULL; 1662 u64 len, inline_version; 1663 int err = 0; 1664 bool from_pagecache = false; 1665 1666 spin_lock(&ci->i_ceph_lock); 1667 inline_version = ci->i_inline_version; 1668 spin_unlock(&ci->i_ceph_lock); 1669 1670 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1671 inode, ceph_vinop(inode), inline_version); 1672 1673 if (inline_version == 1 || /* initial version, no data */ 1674 inline_version == CEPH_INLINE_NONE) 1675 goto out; 1676 1677 if (locked_page) { 1678 page = locked_page; 1679 WARN_ON(!PageUptodate(page)); 1680 } else if (ceph_caps_issued(ci) & 1681 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1682 page = find_get_page(inode->i_mapping, 0); 1683 if (page) { 1684 if (PageUptodate(page)) { 1685 from_pagecache = true; 1686 lock_page(page); 1687 } else { 1688 put_page(page); 1689 page = NULL; 1690 } 1691 } 1692 } 1693 1694 if (page) { 1695 len = i_size_read(inode); 1696 if (len > PAGE_SIZE) 1697 len = PAGE_SIZE; 1698 } else { 1699 page = __page_cache_alloc(GFP_NOFS); 1700 if (!page) { 1701 err = -ENOMEM; 1702 goto out; 1703 } 1704 err = __ceph_do_getattr(inode, page, 1705 CEPH_STAT_CAP_INLINE_DATA, true); 1706 if (err < 0) { 1707 /* no inline data */ 1708 if (err == -ENODATA) 1709 err = 0; 1710 goto out; 1711 } 1712 len = err; 1713 } 1714 1715 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1716 ceph_vino(inode), 0, &len, 0, 1, 1717 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1718 NULL, 0, 0, false); 1719 if (IS_ERR(req)) { 1720 err = PTR_ERR(req); 1721 goto out; 1722 } 1723 1724 req->r_mtime = inode->i_mtime; 1725 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1726 if (!err) 1727 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1728 ceph_osdc_put_request(req); 1729 if (err < 0) 1730 goto out; 1731 1732 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1733 ceph_vino(inode), 0, &len, 1, 3, 1734 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1735 NULL, ci->i_truncate_seq, 1736 ci->i_truncate_size, false); 1737 if (IS_ERR(req)) { 1738 err = PTR_ERR(req); 1739 goto out; 1740 } 1741 1742 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1743 1744 { 1745 __le64 xattr_buf = cpu_to_le64(inline_version); 1746 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1747 "inline_version", &xattr_buf, 1748 sizeof(xattr_buf), 1749 CEPH_OSD_CMPXATTR_OP_GT, 1750 CEPH_OSD_CMPXATTR_MODE_U64); 1751 if (err) 1752 goto out_put; 1753 } 1754 1755 { 1756 char xattr_buf[32]; 1757 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1758 "%llu", inline_version); 1759 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1760 "inline_version", 1761 xattr_buf, xattr_len, 0, 0); 1762 if (err) 1763 goto out_put; 1764 } 1765 1766 req->r_mtime = inode->i_mtime; 1767 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1768 if (!err) 1769 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1770 1771 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1772 req->r_end_latency, len, err); 1773 1774 out_put: 1775 ceph_osdc_put_request(req); 1776 if (err == -ECANCELED) 1777 err = 0; 1778 out: 1779 if (page && page != locked_page) { 1780 if (from_pagecache) { 1781 unlock_page(page); 1782 put_page(page); 1783 } else 1784 __free_pages(page, 0); 1785 } 1786 1787 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1788 inode, ceph_vinop(inode), inline_version, err); 1789 return err; 1790 } 1791 1792 static const struct vm_operations_struct ceph_vmops = { 1793 .fault = ceph_filemap_fault, 1794 .page_mkwrite = ceph_page_mkwrite, 1795 }; 1796 1797 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1798 { 1799 struct address_space *mapping = file->f_mapping; 1800 1801 if (!mapping->a_ops->readpage) 1802 return -ENOEXEC; 1803 file_accessed(file); 1804 vma->vm_ops = &ceph_vmops; 1805 return 0; 1806 } 1807 1808 enum { 1809 POOL_READ = 1, 1810 POOL_WRITE = 2, 1811 }; 1812 1813 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1814 s64 pool, struct ceph_string *pool_ns) 1815 { 1816 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1817 struct ceph_mds_client *mdsc = fsc->mdsc; 1818 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1819 struct rb_node **p, *parent; 1820 struct ceph_pool_perm *perm; 1821 struct page **pages; 1822 size_t pool_ns_len; 1823 int err = 0, err2 = 0, have = 0; 1824 1825 down_read(&mdsc->pool_perm_rwsem); 1826 p = &mdsc->pool_perm_tree.rb_node; 1827 while (*p) { 1828 perm = rb_entry(*p, struct ceph_pool_perm, node); 1829 if (pool < perm->pool) 1830 p = &(*p)->rb_left; 1831 else if (pool > perm->pool) 1832 p = &(*p)->rb_right; 1833 else { 1834 int ret = ceph_compare_string(pool_ns, 1835 perm->pool_ns, 1836 perm->pool_ns_len); 1837 if (ret < 0) 1838 p = &(*p)->rb_left; 1839 else if (ret > 0) 1840 p = &(*p)->rb_right; 1841 else { 1842 have = perm->perm; 1843 break; 1844 } 1845 } 1846 } 1847 up_read(&mdsc->pool_perm_rwsem); 1848 if (*p) 1849 goto out; 1850 1851 if (pool_ns) 1852 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1853 pool, (int)pool_ns->len, pool_ns->str); 1854 else 1855 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1856 1857 down_write(&mdsc->pool_perm_rwsem); 1858 p = &mdsc->pool_perm_tree.rb_node; 1859 parent = NULL; 1860 while (*p) { 1861 parent = *p; 1862 perm = rb_entry(parent, struct ceph_pool_perm, node); 1863 if (pool < perm->pool) 1864 p = &(*p)->rb_left; 1865 else if (pool > perm->pool) 1866 p = &(*p)->rb_right; 1867 else { 1868 int ret = ceph_compare_string(pool_ns, 1869 perm->pool_ns, 1870 perm->pool_ns_len); 1871 if (ret < 0) 1872 p = &(*p)->rb_left; 1873 else if (ret > 0) 1874 p = &(*p)->rb_right; 1875 else { 1876 have = perm->perm; 1877 break; 1878 } 1879 } 1880 } 1881 if (*p) { 1882 up_write(&mdsc->pool_perm_rwsem); 1883 goto out; 1884 } 1885 1886 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1887 1, false, GFP_NOFS); 1888 if (!rd_req) { 1889 err = -ENOMEM; 1890 goto out_unlock; 1891 } 1892 1893 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1894 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1895 rd_req->r_base_oloc.pool = pool; 1896 if (pool_ns) 1897 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1898 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1899 1900 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1901 if (err) 1902 goto out_unlock; 1903 1904 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1905 1, false, GFP_NOFS); 1906 if (!wr_req) { 1907 err = -ENOMEM; 1908 goto out_unlock; 1909 } 1910 1911 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1912 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1913 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1914 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1915 1916 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1917 if (err) 1918 goto out_unlock; 1919 1920 /* one page should be large enough for STAT data */ 1921 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1922 if (IS_ERR(pages)) { 1923 err = PTR_ERR(pages); 1924 goto out_unlock; 1925 } 1926 1927 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1928 0, false, true); 1929 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1930 1931 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1932 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1933 1934 if (!err) 1935 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1936 if (!err2) 1937 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1938 1939 if (err >= 0 || err == -ENOENT) 1940 have |= POOL_READ; 1941 else if (err != -EPERM) { 1942 if (err == -EBLOCKLISTED) 1943 fsc->blocklisted = true; 1944 goto out_unlock; 1945 } 1946 1947 if (err2 == 0 || err2 == -EEXIST) 1948 have |= POOL_WRITE; 1949 else if (err2 != -EPERM) { 1950 if (err2 == -EBLOCKLISTED) 1951 fsc->blocklisted = true; 1952 err = err2; 1953 goto out_unlock; 1954 } 1955 1956 pool_ns_len = pool_ns ? pool_ns->len : 0; 1957 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1958 if (!perm) { 1959 err = -ENOMEM; 1960 goto out_unlock; 1961 } 1962 1963 perm->pool = pool; 1964 perm->perm = have; 1965 perm->pool_ns_len = pool_ns_len; 1966 if (pool_ns_len > 0) 1967 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1968 perm->pool_ns[pool_ns_len] = 0; 1969 1970 rb_link_node(&perm->node, parent, p); 1971 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1972 err = 0; 1973 out_unlock: 1974 up_write(&mdsc->pool_perm_rwsem); 1975 1976 ceph_osdc_put_request(rd_req); 1977 ceph_osdc_put_request(wr_req); 1978 out: 1979 if (!err) 1980 err = have; 1981 if (pool_ns) 1982 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1983 pool, (int)pool_ns->len, pool_ns->str, err); 1984 else 1985 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1986 return err; 1987 } 1988 1989 int ceph_pool_perm_check(struct inode *inode, int need) 1990 { 1991 struct ceph_inode_info *ci = ceph_inode(inode); 1992 struct ceph_string *pool_ns; 1993 s64 pool; 1994 int ret, flags; 1995 1996 /* Only need to do this for regular files */ 1997 if (!S_ISREG(inode->i_mode)) 1998 return 0; 1999 2000 if (ci->i_vino.snap != CEPH_NOSNAP) { 2001 /* 2002 * Pool permission check needs to write to the first object. 2003 * But for snapshot, head of the first object may have alread 2004 * been deleted. Skip check to avoid creating orphan object. 2005 */ 2006 return 0; 2007 } 2008 2009 if (ceph_test_mount_opt(ceph_inode_to_client(inode), 2010 NOPOOLPERM)) 2011 return 0; 2012 2013 spin_lock(&ci->i_ceph_lock); 2014 flags = ci->i_ceph_flags; 2015 pool = ci->i_layout.pool_id; 2016 spin_unlock(&ci->i_ceph_lock); 2017 check: 2018 if (flags & CEPH_I_POOL_PERM) { 2019 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2020 dout("ceph_pool_perm_check pool %lld no read perm\n", 2021 pool); 2022 return -EPERM; 2023 } 2024 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2025 dout("ceph_pool_perm_check pool %lld no write perm\n", 2026 pool); 2027 return -EPERM; 2028 } 2029 return 0; 2030 } 2031 2032 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2033 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2034 ceph_put_string(pool_ns); 2035 if (ret < 0) 2036 return ret; 2037 2038 flags = CEPH_I_POOL_PERM; 2039 if (ret & POOL_READ) 2040 flags |= CEPH_I_POOL_RD; 2041 if (ret & POOL_WRITE) 2042 flags |= CEPH_I_POOL_WR; 2043 2044 spin_lock(&ci->i_ceph_lock); 2045 if (pool == ci->i_layout.pool_id && 2046 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2047 ci->i_ceph_flags |= flags; 2048 } else { 2049 pool = ci->i_layout.pool_id; 2050 flags = ci->i_ceph_flags; 2051 } 2052 spin_unlock(&ci->i_ceph_lock); 2053 goto check; 2054 } 2055 2056 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2057 { 2058 struct ceph_pool_perm *perm; 2059 struct rb_node *n; 2060 2061 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2062 n = rb_first(&mdsc->pool_perm_tree); 2063 perm = rb_entry(n, struct ceph_pool_perm, node); 2064 rb_erase(n, &mdsc->pool_perm_tree); 2065 kfree(perm); 2066 } 2067 } 2068