1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/swap.h> 8 #include <linux/pagemap.h> 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 #include <trace/events/netfs.h> 17 18 #include "super.h" 19 #include "mds_client.h" 20 #include "cache.h" 21 #include "metric.h" 22 #include "crypto.h" 23 #include <linux/ceph/osd_client.h> 24 #include <linux/ceph/striper.h> 25 26 /* 27 * Ceph address space ops. 28 * 29 * There are a few funny things going on here. 30 * 31 * The page->private field is used to reference a struct 32 * ceph_snap_context for _every_ dirty page. This indicates which 33 * snapshot the page was logically dirtied in, and thus which snap 34 * context needs to be associated with the osd write during writeback. 35 * 36 * Similarly, struct ceph_inode_info maintains a set of counters to 37 * count dirty pages on the inode. In the absence of snapshots, 38 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 39 * 40 * When a snapshot is taken (that is, when the client receives 41 * notification that a snapshot was taken), each inode with caps and 42 * with dirty pages (dirty pages implies there is a cap) gets a new 43 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 44 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 45 * moved to capsnap->dirty. (Unless a sync write is currently in 46 * progress. In that case, the capsnap is said to be "pending", new 47 * writes cannot start, and the capsnap isn't "finalized" until the 48 * write completes (or fails) and a final size/mtime for the inode for 49 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 50 * 51 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 52 * we look for the first capsnap in i_cap_snaps and write out pages in 53 * that snap context _only_. Then we move on to the next capsnap, 54 * eventually reaching the "live" or "head" context (i.e., pages that 55 * are not yet snapped) and are writing the most recently dirtied 56 * pages. 57 * 58 * Invalidate and so forth must take care to ensure the dirty page 59 * accounting is preserved. 60 */ 61 62 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 63 #define CONGESTION_OFF_THRESH(congestion_kb) \ 64 (CONGESTION_ON_THRESH(congestion_kb) - \ 65 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 66 67 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 68 struct folio **foliop, void **_fsdata); 69 70 static inline struct ceph_snap_context *page_snap_context(struct page *page) 71 { 72 if (PagePrivate(page)) 73 return (void *)page->private; 74 return NULL; 75 } 76 77 /* 78 * Dirty a page. Optimistically adjust accounting, on the assumption 79 * that we won't race with invalidate. If we do, readjust. 80 */ 81 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) 82 { 83 struct inode *inode = mapping->host; 84 struct ceph_client *cl = ceph_inode_to_client(inode); 85 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 86 struct ceph_inode_info *ci; 87 struct ceph_snap_context *snapc; 88 89 if (folio_test_dirty(folio)) { 90 doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n", 91 ceph_vinop(inode), folio, folio->index); 92 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio); 93 return false; 94 } 95 96 atomic64_inc(&mdsc->dirty_folios); 97 98 ci = ceph_inode(inode); 99 100 /* dirty the head */ 101 spin_lock(&ci->i_ceph_lock); 102 if (__ceph_have_pending_cap_snap(ci)) { 103 struct ceph_cap_snap *capsnap = 104 list_last_entry(&ci->i_cap_snaps, 105 struct ceph_cap_snap, 106 ci_item); 107 snapc = ceph_get_snap_context(capsnap->context); 108 capsnap->dirty_pages++; 109 } else { 110 BUG_ON(!ci->i_head_snapc); 111 snapc = ceph_get_snap_context(ci->i_head_snapc); 112 ++ci->i_wrbuffer_ref_head; 113 } 114 if (ci->i_wrbuffer_ref == 0) 115 ihold(inode); 116 ++ci->i_wrbuffer_ref; 117 doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d " 118 "snapc %p seq %lld (%d snaps)\n", 119 ceph_vinop(inode), folio, folio->index, 120 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 121 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 122 snapc, snapc->seq, snapc->num_snaps); 123 spin_unlock(&ci->i_ceph_lock); 124 125 /* 126 * Reference snap context in folio->private. Also set 127 * PagePrivate so that we get invalidate_folio callback. 128 */ 129 VM_WARN_ON_FOLIO(folio->private, folio); 130 folio_attach_private(folio, snapc); 131 132 return ceph_fscache_dirty_folio(mapping, folio); 133 } 134 135 /* 136 * If we are truncating the full folio (i.e. offset == 0), adjust the 137 * dirty folio counters appropriately. Only called if there is private 138 * data on the folio. 139 */ 140 static void ceph_invalidate_folio(struct folio *folio, size_t offset, 141 size_t length) 142 { 143 struct inode *inode = folio->mapping->host; 144 struct ceph_client *cl = ceph_inode_to_client(inode); 145 struct ceph_inode_info *ci = ceph_inode(inode); 146 struct ceph_snap_context *snapc; 147 148 149 if (offset != 0 || length != folio_size(folio)) { 150 doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n", 151 ceph_vinop(inode), folio->index, offset, length); 152 return; 153 } 154 155 WARN_ON(!folio_test_locked(folio)); 156 if (folio_test_private(folio)) { 157 doutc(cl, "%llx.%llx idx %lu full dirty page\n", 158 ceph_vinop(inode), folio->index); 159 160 snapc = folio_detach_private(folio); 161 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 162 ceph_put_snap_context(snapc); 163 } 164 165 netfs_invalidate_folio(folio, offset, length); 166 } 167 168 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) 169 { 170 struct inode *inode = rreq->inode; 171 struct ceph_inode_info *ci = ceph_inode(inode); 172 struct ceph_file_layout *lo = &ci->i_layout; 173 unsigned long max_pages = inode->i_sb->s_bdi->ra_pages; 174 loff_t end = rreq->start + rreq->len, new_end; 175 struct ceph_netfs_request_data *priv = rreq->netfs_priv; 176 unsigned long max_len; 177 u32 blockoff; 178 179 if (priv) { 180 /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */ 181 if (priv->file_ra_disabled) 182 max_pages = 0; 183 else 184 max_pages = priv->file_ra_pages; 185 186 } 187 188 /* Readahead is disabled */ 189 if (!max_pages) 190 return; 191 192 max_len = max_pages << PAGE_SHIFT; 193 194 /* 195 * Try to expand the length forward by rounding up it to the next 196 * block, but do not exceed the file size, unless the original 197 * request already exceeds it. 198 */ 199 new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size); 200 if (new_end > end && new_end <= rreq->start + max_len) 201 rreq->len = new_end - rreq->start; 202 203 /* Try to expand the start downward */ 204 div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 205 if (rreq->len + blockoff <= max_len) { 206 rreq->start -= blockoff; 207 rreq->len += blockoff; 208 } 209 } 210 211 static void finish_netfs_read(struct ceph_osd_request *req) 212 { 213 struct inode *inode = req->r_inode; 214 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 215 struct ceph_client *cl = fsc->client; 216 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 217 struct netfs_io_subrequest *subreq = req->r_priv; 218 struct ceph_osd_req_op *op = &req->r_ops[0]; 219 int err = req->r_result; 220 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ); 221 222 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 223 req->r_end_latency, osd_data->length, err); 224 225 doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result, 226 subreq->len, i_size_read(req->r_inode)); 227 228 /* no object means success but no data */ 229 if (err == -ENOENT) { 230 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 231 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 232 err = 0; 233 } else if (err == -EBLOCKLISTED) { 234 fsc->blocklisted = true; 235 } 236 237 if (err >= 0) { 238 if (sparse && err > 0) 239 err = ceph_sparse_ext_map_end(op); 240 if (err < subreq->len && 241 subreq->rreq->origin != NETFS_UNBUFFERED_READ && 242 subreq->rreq->origin != NETFS_DIO_READ) 243 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 244 if (IS_ENCRYPTED(inode) && err > 0) { 245 err = ceph_fscrypt_decrypt_extents(inode, 246 osd_data->pages, subreq->start, 247 op->extent.sparse_ext, 248 op->extent.sparse_ext_cnt); 249 if (err > subreq->len) 250 err = subreq->len; 251 } 252 if (err > 0) 253 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 254 } 255 256 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 257 ceph_put_page_vector(osd_data->pages, 258 calc_pages_for(osd_data->alignment, 259 osd_data->length), false); 260 } 261 if (err > 0) { 262 subreq->transferred = err; 263 err = 0; 264 } 265 subreq->error = err; 266 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); 267 netfs_read_subreq_terminated(subreq); 268 iput(req->r_inode); 269 ceph_dec_osd_stopping_blocker(fsc->mdsc); 270 } 271 272 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) 273 { 274 struct netfs_io_request *rreq = subreq->rreq; 275 struct inode *inode = rreq->inode; 276 struct ceph_mds_reply_info_parsed *rinfo; 277 struct ceph_mds_reply_info_in *iinfo; 278 struct ceph_mds_request *req; 279 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 280 struct ceph_inode_info *ci = ceph_inode(inode); 281 ssize_t err = 0; 282 size_t len; 283 int mode; 284 285 if (rreq->origin != NETFS_UNBUFFERED_READ && 286 rreq->origin != NETFS_DIO_READ) 287 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 288 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 289 290 if (subreq->start >= inode->i_size) 291 goto out; 292 293 /* We need to fetch the inline data. */ 294 mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA); 295 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode); 296 if (IS_ERR(req)) { 297 err = PTR_ERR(req); 298 goto out; 299 } 300 req->r_ino1 = ci->i_vino; 301 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA); 302 req->r_num_caps = 2; 303 304 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 305 err = ceph_mdsc_do_request(mdsc, NULL, req); 306 if (err < 0) 307 goto out; 308 309 rinfo = &req->r_reply_info; 310 iinfo = &rinfo->targeti; 311 if (iinfo->inline_version == CEPH_INLINE_NONE) { 312 /* The data got uninlined */ 313 ceph_mdsc_put_request(req); 314 return false; 315 } 316 317 len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len); 318 err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter); 319 if (err == 0) { 320 err = -EFAULT; 321 } else { 322 subreq->transferred += err; 323 err = 0; 324 } 325 326 ceph_mdsc_put_request(req); 327 out: 328 subreq->error = err; 329 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); 330 netfs_read_subreq_terminated(subreq); 331 return true; 332 } 333 334 static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq) 335 { 336 struct netfs_io_request *rreq = subreq->rreq; 337 struct inode *inode = rreq->inode; 338 struct ceph_inode_info *ci = ceph_inode(inode); 339 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 340 u64 objno, objoff; 341 u32 xlen; 342 343 /* Truncate the extent at the end of the current block */ 344 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 345 &objno, &objoff, &xlen); 346 rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize); 347 return 0; 348 } 349 350 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) 351 { 352 struct netfs_io_request *rreq = subreq->rreq; 353 struct inode *inode = rreq->inode; 354 struct ceph_inode_info *ci = ceph_inode(inode); 355 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 356 struct ceph_client *cl = fsc->client; 357 struct ceph_osd_request *req = NULL; 358 struct ceph_vino vino = ceph_vino(inode); 359 int err; 360 u64 len; 361 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD); 362 u64 off = subreq->start; 363 int extent_cnt; 364 365 if (ceph_inode_is_shutdown(inode)) { 366 err = -EIO; 367 goto out; 368 } 369 370 if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) 371 return; 372 373 // TODO: This rounding here is slightly dodgy. It *should* work, for 374 // now, as the cache only deals in blocks that are a multiple of 375 // PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to 376 // happen is for the fscrypt driving to be moved into netfslib and the 377 // data in the cache also to be stored encrypted. 378 len = subreq->len; 379 ceph_fscrypt_adjust_off_and_len(inode, &off, &len); 380 381 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, 382 off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ, 383 CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq, 384 ci->i_truncate_size, false); 385 if (IS_ERR(req)) { 386 err = PTR_ERR(req); 387 req = NULL; 388 goto out; 389 } 390 391 if (sparse) { 392 extent_cnt = __ceph_sparse_read_ext_count(inode, len); 393 err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt); 394 if (err) 395 goto out; 396 } 397 398 doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n", 399 ceph_vinop(inode), subreq->start, subreq->len, len); 400 401 /* 402 * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for 403 * encrypted inodes. We'd need infrastructure that handles an iov_iter 404 * instead of page arrays, and we don't have that as of yet. Once the 405 * dust settles on the write helpers and encrypt/decrypt routines for 406 * netfs, we should be able to rework this. 407 */ 408 if (IS_ENCRYPTED(inode)) { 409 struct page **pages; 410 size_t page_off; 411 412 err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off); 413 if (err < 0) { 414 doutc(cl, "%llx.%llx failed to allocate pages, %d\n", 415 ceph_vinop(inode), err); 416 goto out; 417 } 418 419 /* should always give us a page-aligned read */ 420 WARN_ON_ONCE(page_off); 421 len = err; 422 err = 0; 423 424 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, 425 false); 426 } else { 427 osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter); 428 } 429 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { 430 err = -EIO; 431 goto out; 432 } 433 req->r_callback = finish_netfs_read; 434 req->r_priv = subreq; 435 req->r_inode = inode; 436 ihold(inode); 437 438 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 439 ceph_osdc_start_request(req->r_osdc, req); 440 out: 441 ceph_osdc_put_request(req); 442 if (err) { 443 subreq->error = err; 444 netfs_read_subreq_terminated(subreq); 445 } 446 doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); 447 } 448 449 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) 450 { 451 struct inode *inode = rreq->inode; 452 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 453 struct ceph_client *cl = ceph_inode_to_client(inode); 454 int got = 0, want = CEPH_CAP_FILE_CACHE; 455 struct ceph_netfs_request_data *priv; 456 int ret = 0; 457 458 /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ 459 __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); 460 461 if (rreq->origin != NETFS_READAHEAD) 462 return 0; 463 464 priv = kzalloc(sizeof(*priv), GFP_NOFS); 465 if (!priv) 466 return -ENOMEM; 467 468 if (file) { 469 struct ceph_rw_context *rw_ctx; 470 struct ceph_file_info *fi = file->private_data; 471 472 priv->file_ra_pages = file->f_ra.ra_pages; 473 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM; 474 475 rw_ctx = ceph_find_rw_context(fi); 476 if (rw_ctx) { 477 rreq->netfs_priv = priv; 478 return 0; 479 } 480 } 481 482 /* 483 * readahead callers do not necessarily hold Fcb caps 484 * (e.g. fadvise, madvise). 485 */ 486 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 487 if (ret < 0) { 488 doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode)); 489 goto out; 490 } 491 492 if (!(got & want)) { 493 doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode)); 494 ret = -EACCES; 495 goto out; 496 } 497 if (ret == 0) { 498 ret = -EACCES; 499 goto out; 500 } 501 502 priv->caps = got; 503 rreq->netfs_priv = priv; 504 rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize; 505 506 out: 507 if (ret < 0) { 508 if (got) 509 ceph_put_cap_refs(ceph_inode(inode), got); 510 kfree(priv); 511 } 512 513 return ret; 514 } 515 516 static void ceph_netfs_free_request(struct netfs_io_request *rreq) 517 { 518 struct ceph_netfs_request_data *priv = rreq->netfs_priv; 519 520 if (!priv) 521 return; 522 523 if (priv->caps) 524 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps); 525 kfree(priv); 526 rreq->netfs_priv = NULL; 527 } 528 529 const struct netfs_request_ops ceph_netfs_ops = { 530 .init_request = ceph_init_request, 531 .free_request = ceph_netfs_free_request, 532 .prepare_read = ceph_netfs_prepare_read, 533 .issue_read = ceph_netfs_issue_read, 534 .expand_readahead = ceph_netfs_expand_readahead, 535 .check_write_begin = ceph_netfs_check_write_begin, 536 }; 537 538 #ifdef CONFIG_CEPH_FSCACHE 539 static void ceph_set_page_fscache(struct page *page) 540 { 541 folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ 542 } 543 544 static void ceph_fscache_write_terminated(void *priv, ssize_t error) 545 { 546 struct inode *inode = priv; 547 548 if (IS_ERR_VALUE(error) && error != -ENOBUFS) 549 ceph_fscache_invalidate(inode, false); 550 } 551 552 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 553 { 554 struct ceph_inode_info *ci = ceph_inode(inode); 555 struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 556 557 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 558 ceph_fscache_write_terminated, inode, true, caching); 559 } 560 #else 561 static inline void ceph_set_page_fscache(struct page *page) 562 { 563 } 564 565 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 566 { 567 } 568 #endif /* CONFIG_CEPH_FSCACHE */ 569 570 struct ceph_writeback_ctl 571 { 572 loff_t i_size; 573 u64 truncate_size; 574 u32 truncate_seq; 575 bool size_stable; 576 577 bool head_snapc; 578 struct ceph_snap_context *snapc; 579 struct ceph_snap_context *last_snapc; 580 581 bool done; 582 bool should_loop; 583 bool range_whole; 584 pgoff_t start_index; 585 pgoff_t index; 586 pgoff_t end; 587 xa_mark_t tag; 588 589 pgoff_t strip_unit_end; 590 unsigned int wsize; 591 unsigned int nr_folios; 592 unsigned int max_pages; 593 unsigned int locked_pages; 594 595 int op_idx; 596 int num_ops; 597 u64 offset; 598 u64 len; 599 600 struct folio_batch fbatch; 601 unsigned int processed_in_fbatch; 602 603 bool from_pool; 604 struct page **pages; 605 struct page **data_pages; 606 }; 607 608 /* 609 * Get ref for the oldest snapc for an inode with dirty data... that is, the 610 * only snap context we are allowed to write back. 611 */ 612 static struct ceph_snap_context * 613 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 614 struct ceph_snap_context *page_snapc) 615 { 616 struct ceph_inode_info *ci = ceph_inode(inode); 617 struct ceph_client *cl = ceph_inode_to_client(inode); 618 struct ceph_snap_context *snapc = NULL; 619 struct ceph_cap_snap *capsnap = NULL; 620 621 spin_lock(&ci->i_ceph_lock); 622 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 623 doutc(cl, " capsnap %p snapc %p has %d dirty pages\n", 624 capsnap, capsnap->context, capsnap->dirty_pages); 625 if (!capsnap->dirty_pages) 626 continue; 627 628 /* get i_size, truncate_{seq,size} for page_snapc? */ 629 if (snapc && capsnap->context != page_snapc) 630 continue; 631 632 if (ctl) { 633 if (capsnap->writing) { 634 ctl->i_size = i_size_read(inode); 635 ctl->size_stable = false; 636 } else { 637 ctl->i_size = capsnap->size; 638 ctl->size_stable = true; 639 } 640 ctl->truncate_size = capsnap->truncate_size; 641 ctl->truncate_seq = capsnap->truncate_seq; 642 ctl->head_snapc = false; 643 } 644 645 if (snapc) 646 break; 647 648 snapc = ceph_get_snap_context(capsnap->context); 649 if (!page_snapc || 650 page_snapc == snapc || 651 page_snapc->seq > snapc->seq) 652 break; 653 } 654 if (!snapc && ci->i_wrbuffer_ref_head) { 655 snapc = ceph_get_snap_context(ci->i_head_snapc); 656 doutc(cl, " head snapc %p has %d dirty pages\n", snapc, 657 ci->i_wrbuffer_ref_head); 658 if (ctl) { 659 ctl->i_size = i_size_read(inode); 660 ctl->truncate_size = ci->i_truncate_size; 661 ctl->truncate_seq = ci->i_truncate_seq; 662 ctl->size_stable = false; 663 ctl->head_snapc = true; 664 } 665 } 666 spin_unlock(&ci->i_ceph_lock); 667 return snapc; 668 } 669 670 static u64 get_writepages_data_length(struct inode *inode, 671 struct page *page, u64 start) 672 { 673 struct ceph_inode_info *ci = ceph_inode(inode); 674 struct ceph_snap_context *snapc; 675 struct ceph_cap_snap *capsnap = NULL; 676 u64 end = i_size_read(inode); 677 u64 ret; 678 679 snapc = page_snap_context(ceph_fscrypt_pagecache_page(page)); 680 if (snapc != ci->i_head_snapc) { 681 bool found = false; 682 spin_lock(&ci->i_ceph_lock); 683 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 684 if (capsnap->context == snapc) { 685 if (!capsnap->writing) 686 end = capsnap->size; 687 found = true; 688 break; 689 } 690 } 691 spin_unlock(&ci->i_ceph_lock); 692 WARN_ON(!found); 693 } 694 if (end > ceph_fscrypt_page_offset(page) + thp_size(page)) 695 end = ceph_fscrypt_page_offset(page) + thp_size(page); 696 ret = end > start ? end - start : 0; 697 if (ret && fscrypt_is_bounce_page(page)) 698 ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE); 699 return ret; 700 } 701 702 /* 703 * Write a folio, but leave it locked. 704 * 705 * If we get a write error, mark the mapping for error, but still adjust the 706 * dirty page accounting (i.e., folio is no longer dirty). 707 */ 708 static int write_folio_nounlock(struct folio *folio, 709 struct writeback_control *wbc) 710 { 711 struct page *page = &folio->page; 712 struct inode *inode = folio->mapping->host; 713 struct ceph_inode_info *ci = ceph_inode(inode); 714 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 715 struct ceph_client *cl = fsc->client; 716 struct ceph_snap_context *snapc, *oldest; 717 loff_t page_off = folio_pos(folio); 718 int err; 719 loff_t len = folio_size(folio); 720 loff_t wlen; 721 struct ceph_writeback_ctl ceph_wbc; 722 struct ceph_osd_client *osdc = &fsc->client->osdc; 723 struct ceph_osd_request *req; 724 bool caching = ceph_is_cache_enabled(inode); 725 struct page *bounce_page = NULL; 726 727 doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio, 728 folio->index); 729 730 if (ceph_inode_is_shutdown(inode)) 731 return -EIO; 732 733 /* verify this is a writeable snap context */ 734 snapc = page_snap_context(&folio->page); 735 if (!snapc) { 736 doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode), 737 folio); 738 return 0; 739 } 740 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 741 if (snapc->seq > oldest->seq) { 742 doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n", 743 ceph_vinop(inode), folio, snapc); 744 /* we should only noop if called by kswapd */ 745 WARN_ON(!(current->flags & PF_MEMALLOC)); 746 ceph_put_snap_context(oldest); 747 folio_redirty_for_writepage(wbc, folio); 748 return 0; 749 } 750 ceph_put_snap_context(oldest); 751 752 /* is this a partial page at end of file? */ 753 if (page_off >= ceph_wbc.i_size) { 754 doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n", 755 ceph_vinop(inode), folio->index, ceph_wbc.i_size); 756 folio_invalidate(folio, 0, folio_size(folio)); 757 return 0; 758 } 759 760 if (ceph_wbc.i_size < page_off + len) 761 len = ceph_wbc.i_size - page_off; 762 763 wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len; 764 doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n", 765 ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc, 766 snapc->seq); 767 768 if (atomic_long_inc_return(&fsc->writeback_count) > 769 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 770 fsc->write_congested = true; 771 772 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), 773 page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE, 774 CEPH_OSD_FLAG_WRITE, snapc, 775 ceph_wbc.truncate_seq, 776 ceph_wbc.truncate_size, true); 777 if (IS_ERR(req)) { 778 folio_redirty_for_writepage(wbc, folio); 779 return PTR_ERR(req); 780 } 781 782 if (wlen < len) 783 len = wlen; 784 785 folio_start_writeback(folio); 786 if (caching) 787 ceph_set_page_fscache(&folio->page); 788 ceph_fscache_write_to_cache(inode, page_off, len, caching); 789 790 if (IS_ENCRYPTED(inode)) { 791 bounce_page = fscrypt_encrypt_pagecache_blocks(folio, 792 CEPH_FSCRYPT_BLOCK_SIZE, 0, 793 GFP_NOFS); 794 if (IS_ERR(bounce_page)) { 795 folio_redirty_for_writepage(wbc, folio); 796 folio_end_writeback(folio); 797 ceph_osdc_put_request(req); 798 return PTR_ERR(bounce_page); 799 } 800 } 801 802 /* it may be a short write due to an object boundary */ 803 WARN_ON_ONCE(len > folio_size(folio)); 804 osd_req_op_extent_osd_data_pages(req, 0, 805 bounce_page ? &bounce_page : &page, wlen, 0, 806 false, false); 807 doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n", 808 ceph_vinop(inode), page_off, len, wlen, 809 IS_ENCRYPTED(inode) ? "" : "not "); 810 811 req->r_mtime = inode_get_mtime(inode); 812 ceph_osdc_start_request(osdc, req); 813 err = ceph_osdc_wait_request(osdc, req); 814 815 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 816 req->r_end_latency, len, err); 817 fscrypt_free_bounce_page(bounce_page); 818 ceph_osdc_put_request(req); 819 if (err == 0) 820 err = len; 821 822 if (err < 0) { 823 struct writeback_control tmp_wbc; 824 if (!wbc) 825 wbc = &tmp_wbc; 826 if (err == -ERESTARTSYS) { 827 /* killed by SIGKILL */ 828 doutc(cl, "%llx.%llx interrupted page %p\n", 829 ceph_vinop(inode), folio); 830 folio_redirty_for_writepage(wbc, folio); 831 folio_end_writeback(folio); 832 return err; 833 } 834 if (err == -EBLOCKLISTED) 835 fsc->blocklisted = true; 836 doutc(cl, "%llx.%llx setting mapping error %d %p\n", 837 ceph_vinop(inode), err, folio); 838 mapping_set_error(&inode->i_data, err); 839 wbc->pages_skipped++; 840 } else { 841 doutc(cl, "%llx.%llx cleaned page %p\n", 842 ceph_vinop(inode), folio); 843 err = 0; /* vfs expects us to return 0 */ 844 } 845 oldest = folio_detach_private(folio); 846 WARN_ON_ONCE(oldest != snapc); 847 folio_end_writeback(folio); 848 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 849 ceph_put_snap_context(snapc); /* page's reference */ 850 851 if (atomic_long_dec_return(&fsc->writeback_count) < 852 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 853 fsc->write_congested = false; 854 855 return err; 856 } 857 858 /* 859 * async writeback completion handler. 860 * 861 * If we get an error, set the mapping error bit, but not the individual 862 * page error bits. 863 */ 864 static void writepages_finish(struct ceph_osd_request *req) 865 { 866 struct inode *inode = req->r_inode; 867 struct ceph_inode_info *ci = ceph_inode(inode); 868 struct ceph_client *cl = ceph_inode_to_client(inode); 869 struct ceph_osd_data *osd_data; 870 struct page *page; 871 int num_pages, total_pages = 0; 872 int i, j; 873 int rc = req->r_result; 874 struct ceph_snap_context *snapc = req->r_snapc; 875 struct address_space *mapping = inode->i_mapping; 876 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 877 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 878 unsigned int len = 0; 879 bool remove_page; 880 881 doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc); 882 if (rc < 0) { 883 mapping_set_error(mapping, rc); 884 ceph_set_error_write(ci); 885 if (rc == -EBLOCKLISTED) 886 fsc->blocklisted = true; 887 } else { 888 ceph_clear_error_write(ci); 889 } 890 891 /* 892 * We lost the cache cap, need to truncate the page before 893 * it is unlocked, otherwise we'd truncate it later in the 894 * page truncation thread, possibly losing some data that 895 * raced its way in 896 */ 897 remove_page = !(ceph_caps_issued(ci) & 898 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 899 900 /* clean all pages */ 901 for (i = 0; i < req->r_num_ops; i++) { 902 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) { 903 pr_warn_client(cl, 904 "%llx.%llx incorrect op %d req %p index %d tid %llu\n", 905 ceph_vinop(inode), req->r_ops[i].op, req, i, 906 req->r_tid); 907 break; 908 } 909 910 osd_data = osd_req_op_extent_osd_data(req, i); 911 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 912 len += osd_data->length; 913 num_pages = calc_pages_for((u64)osd_data->alignment, 914 (u64)osd_data->length); 915 total_pages += num_pages; 916 for (j = 0; j < num_pages; j++) { 917 page = osd_data->pages[j]; 918 if (fscrypt_is_bounce_page(page)) { 919 page = fscrypt_pagecache_page(page); 920 fscrypt_free_bounce_page(osd_data->pages[j]); 921 osd_data->pages[j] = page; 922 } 923 BUG_ON(!page); 924 WARN_ON(!PageUptodate(page)); 925 926 if (atomic_long_dec_return(&fsc->writeback_count) < 927 CONGESTION_OFF_THRESH( 928 fsc->mount_options->congestion_kb)) 929 fsc->write_congested = false; 930 931 ceph_put_snap_context(detach_page_private(page)); 932 end_page_writeback(page); 933 934 if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) { 935 wake_up_all(&mdsc->flush_end_wq); 936 WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0); 937 } 938 939 doutc(cl, "unlocking %p\n", page); 940 941 if (remove_page) 942 generic_error_remove_folio(inode->i_mapping, 943 page_folio(page)); 944 945 unlock_page(page); 946 } 947 doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n", 948 ceph_vinop(inode), osd_data->length, 949 rc >= 0 ? num_pages : 0); 950 951 release_pages(osd_data->pages, num_pages); 952 } 953 954 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 955 req->r_end_latency, len, rc); 956 957 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 958 959 osd_data = osd_req_op_extent_osd_data(req, 0); 960 if (osd_data->pages_from_pool) 961 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 962 else 963 kfree(osd_data->pages); 964 ceph_osdc_put_request(req); 965 ceph_dec_osd_stopping_blocker(fsc->mdsc); 966 } 967 968 static inline 969 bool is_forced_umount(struct address_space *mapping) 970 { 971 struct inode *inode = mapping->host; 972 struct ceph_inode_info *ci = ceph_inode(inode); 973 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 974 struct ceph_client *cl = fsc->client; 975 976 if (ceph_inode_is_shutdown(inode)) { 977 if (ci->i_wrbuffer_ref > 0) { 978 pr_warn_ratelimited_client(cl, 979 "%llx.%llx %lld forced umount\n", 980 ceph_vinop(inode), ceph_ino(inode)); 981 } 982 mapping_set_error(mapping, -EIO); 983 return true; 984 } 985 986 return false; 987 } 988 989 static inline 990 unsigned int ceph_define_write_size(struct address_space *mapping) 991 { 992 struct inode *inode = mapping->host; 993 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 994 unsigned int wsize = i_blocksize(inode); 995 996 if (fsc->mount_options->wsize < wsize) 997 wsize = fsc->mount_options->wsize; 998 999 return wsize; 1000 } 1001 1002 static inline 1003 void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc) 1004 { 1005 folio_batch_init(&ceph_wbc->fbatch); 1006 ceph_wbc->processed_in_fbatch = 0; 1007 } 1008 1009 static inline 1010 void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc) 1011 { 1012 folio_batch_release(&ceph_wbc->fbatch); 1013 ceph_folio_batch_init(ceph_wbc); 1014 } 1015 1016 static inline 1017 void ceph_init_writeback_ctl(struct address_space *mapping, 1018 struct writeback_control *wbc, 1019 struct ceph_writeback_ctl *ceph_wbc) 1020 { 1021 ceph_wbc->snapc = NULL; 1022 ceph_wbc->last_snapc = NULL; 1023 1024 ceph_wbc->strip_unit_end = 0; 1025 ceph_wbc->wsize = ceph_define_write_size(mapping); 1026 1027 ceph_wbc->nr_folios = 0; 1028 ceph_wbc->max_pages = 0; 1029 ceph_wbc->locked_pages = 0; 1030 1031 ceph_wbc->done = false; 1032 ceph_wbc->should_loop = false; 1033 ceph_wbc->range_whole = false; 1034 1035 ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 1036 ceph_wbc->index = ceph_wbc->start_index; 1037 ceph_wbc->end = -1; 1038 1039 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { 1040 ceph_wbc->tag = PAGECACHE_TAG_TOWRITE; 1041 } else { 1042 ceph_wbc->tag = PAGECACHE_TAG_DIRTY; 1043 } 1044 1045 ceph_wbc->op_idx = -1; 1046 ceph_wbc->num_ops = 0; 1047 ceph_wbc->offset = 0; 1048 ceph_wbc->len = 0; 1049 ceph_wbc->from_pool = false; 1050 1051 ceph_folio_batch_init(ceph_wbc); 1052 1053 ceph_wbc->pages = NULL; 1054 ceph_wbc->data_pages = NULL; 1055 } 1056 1057 static inline 1058 int ceph_define_writeback_range(struct address_space *mapping, 1059 struct writeback_control *wbc, 1060 struct ceph_writeback_ctl *ceph_wbc) 1061 { 1062 struct inode *inode = mapping->host; 1063 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1064 struct ceph_client *cl = fsc->client; 1065 1066 /* find oldest snap context with dirty data */ 1067 ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL); 1068 if (!ceph_wbc->snapc) { 1069 /* hmm, why does writepages get called when there 1070 is no dirty data? */ 1071 doutc(cl, " no snap context with dirty data?\n"); 1072 return -ENODATA; 1073 } 1074 1075 doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", 1076 ceph_wbc->snapc, ceph_wbc->snapc->seq, 1077 ceph_wbc->snapc->num_snaps); 1078 1079 ceph_wbc->should_loop = false; 1080 1081 if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) { 1082 /* where to start/end? */ 1083 if (wbc->range_cyclic) { 1084 ceph_wbc->index = ceph_wbc->start_index; 1085 ceph_wbc->end = -1; 1086 if (ceph_wbc->index > 0) 1087 ceph_wbc->should_loop = true; 1088 doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index); 1089 } else { 1090 ceph_wbc->index = wbc->range_start >> PAGE_SHIFT; 1091 ceph_wbc->end = wbc->range_end >> PAGE_SHIFT; 1092 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1093 ceph_wbc->range_whole = true; 1094 doutc(cl, " not cyclic, %lu to %lu\n", 1095 ceph_wbc->index, ceph_wbc->end); 1096 } 1097 } else if (!ceph_wbc->head_snapc) { 1098 /* Do not respect wbc->range_{start,end}. Dirty pages 1099 * in that range can be associated with newer snapc. 1100 * They are not writeable until we write all dirty pages 1101 * associated with 'snapc' get written */ 1102 if (ceph_wbc->index > 0) 1103 ceph_wbc->should_loop = true; 1104 doutc(cl, " non-head snapc, range whole\n"); 1105 } 1106 1107 ceph_put_snap_context(ceph_wbc->last_snapc); 1108 ceph_wbc->last_snapc = ceph_wbc->snapc; 1109 1110 return 0; 1111 } 1112 1113 static inline 1114 bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc) 1115 { 1116 return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end; 1117 } 1118 1119 static inline 1120 bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc, 1121 unsigned index) 1122 { 1123 return index < ceph_wbc->nr_folios && 1124 ceph_wbc->locked_pages < ceph_wbc->max_pages; 1125 } 1126 1127 static 1128 int ceph_check_page_before_write(struct address_space *mapping, 1129 struct writeback_control *wbc, 1130 struct ceph_writeback_ctl *ceph_wbc, 1131 struct folio *folio) 1132 { 1133 struct inode *inode = mapping->host; 1134 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1135 struct ceph_client *cl = fsc->client; 1136 struct ceph_snap_context *pgsnapc; 1137 1138 /* only dirty folios, or our accounting breaks */ 1139 if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) { 1140 doutc(cl, "!dirty or !mapping %p\n", folio); 1141 return -ENODATA; 1142 } 1143 1144 /* only if matching snap context */ 1145 pgsnapc = page_snap_context(&folio->page); 1146 if (pgsnapc != ceph_wbc->snapc) { 1147 doutc(cl, "folio snapc %p %lld != oldest %p %lld\n", 1148 pgsnapc, pgsnapc->seq, 1149 ceph_wbc->snapc, ceph_wbc->snapc->seq); 1150 1151 if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc && 1152 wbc->sync_mode != WB_SYNC_NONE) 1153 ceph_wbc->should_loop = true; 1154 1155 return -ENODATA; 1156 } 1157 1158 if (folio_pos(folio) >= ceph_wbc->i_size) { 1159 doutc(cl, "folio at %lu beyond eof %llu\n", 1160 folio->index, ceph_wbc->i_size); 1161 1162 if ((ceph_wbc->size_stable || 1163 folio_pos(folio) >= i_size_read(inode)) && 1164 folio_clear_dirty_for_io(folio)) 1165 folio_invalidate(folio, 0, folio_size(folio)); 1166 1167 return -ENODATA; 1168 } 1169 1170 if (ceph_wbc->strip_unit_end && 1171 (folio->index > ceph_wbc->strip_unit_end)) { 1172 doutc(cl, "end of strip unit %p\n", folio); 1173 return -E2BIG; 1174 } 1175 1176 return 0; 1177 } 1178 1179 static inline 1180 void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc, 1181 unsigned int max_pages) 1182 { 1183 ceph_wbc->pages = kmalloc_array(max_pages, 1184 sizeof(*ceph_wbc->pages), 1185 GFP_NOFS); 1186 if (!ceph_wbc->pages) { 1187 ceph_wbc->from_pool = true; 1188 ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1189 BUG_ON(!ceph_wbc->pages); 1190 } 1191 } 1192 1193 static inline 1194 void ceph_allocate_page_array(struct address_space *mapping, 1195 struct ceph_writeback_ctl *ceph_wbc, 1196 struct folio *folio) 1197 { 1198 struct inode *inode = mapping->host; 1199 struct ceph_inode_info *ci = ceph_inode(inode); 1200 u64 objnum; 1201 u64 objoff; 1202 u32 xlen; 1203 1204 /* prepare async write request */ 1205 ceph_wbc->offset = (u64)folio_pos(folio); 1206 ceph_calc_file_object_mapping(&ci->i_layout, 1207 ceph_wbc->offset, ceph_wbc->wsize, 1208 &objnum, &objoff, &xlen); 1209 1210 ceph_wbc->num_ops = 1; 1211 ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT); 1212 1213 BUG_ON(ceph_wbc->pages); 1214 ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen); 1215 __ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages); 1216 1217 ceph_wbc->len = 0; 1218 } 1219 1220 static inline 1221 bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc, 1222 const struct folio *folio) 1223 { 1224 return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT; 1225 } 1226 1227 static inline 1228 bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc) 1229 { 1230 return ceph_wbc->num_ops >= 1231 (ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS); 1232 } 1233 1234 static inline 1235 bool is_write_congestion_happened(struct ceph_fs_client *fsc) 1236 { 1237 return atomic_long_inc_return(&fsc->writeback_count) > 1238 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb); 1239 } 1240 1241 static inline int move_dirty_folio_in_page_array(struct address_space *mapping, 1242 struct writeback_control *wbc, 1243 struct ceph_writeback_ctl *ceph_wbc, struct folio *folio) 1244 { 1245 struct inode *inode = mapping->host; 1246 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1247 struct ceph_client *cl = fsc->client; 1248 struct page **pages = ceph_wbc->pages; 1249 unsigned int index = ceph_wbc->locked_pages; 1250 gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS; 1251 1252 if (IS_ENCRYPTED(inode)) { 1253 pages[index] = fscrypt_encrypt_pagecache_blocks(folio, 1254 PAGE_SIZE, 1255 0, 1256 gfp_flags); 1257 if (IS_ERR(pages[index])) { 1258 if (PTR_ERR(pages[index]) == -EINVAL) { 1259 pr_err_client(cl, "inode->i_blkbits=%hhu\n", 1260 inode->i_blkbits); 1261 } 1262 1263 /* better not fail on first page! */ 1264 BUG_ON(ceph_wbc->locked_pages == 0); 1265 1266 pages[index] = NULL; 1267 return PTR_ERR(pages[index]); 1268 } 1269 } else { 1270 pages[index] = &folio->page; 1271 } 1272 1273 ceph_wbc->locked_pages++; 1274 1275 return 0; 1276 } 1277 1278 static 1279 int ceph_process_folio_batch(struct address_space *mapping, 1280 struct writeback_control *wbc, 1281 struct ceph_writeback_ctl *ceph_wbc) 1282 { 1283 struct inode *inode = mapping->host; 1284 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1285 struct ceph_client *cl = fsc->client; 1286 struct folio *folio = NULL; 1287 unsigned i; 1288 int rc = 0; 1289 1290 for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) { 1291 folio = ceph_wbc->fbatch.folios[i]; 1292 1293 if (!folio) 1294 continue; 1295 1296 doutc(cl, "? %p idx %lu, folio_test_writeback %#x, " 1297 "folio_test_dirty %#x, folio_test_locked %#x\n", 1298 folio, folio->index, folio_test_writeback(folio), 1299 folio_test_dirty(folio), 1300 folio_test_locked(folio)); 1301 1302 if (folio_test_writeback(folio) || 1303 folio_test_private_2(folio) /* [DEPRECATED] */) { 1304 doutc(cl, "waiting on writeback %p\n", folio); 1305 folio_wait_writeback(folio); 1306 folio_wait_private_2(folio); /* [DEPRECATED] */ 1307 continue; 1308 } 1309 1310 if (ceph_wbc->locked_pages == 0) 1311 folio_lock(folio); 1312 else if (!folio_trylock(folio)) 1313 break; 1314 1315 rc = ceph_check_page_before_write(mapping, wbc, 1316 ceph_wbc, folio); 1317 if (rc == -ENODATA) { 1318 rc = 0; 1319 folio_unlock(folio); 1320 ceph_wbc->fbatch.folios[i] = NULL; 1321 continue; 1322 } else if (rc == -E2BIG) { 1323 rc = 0; 1324 folio_unlock(folio); 1325 ceph_wbc->fbatch.folios[i] = NULL; 1326 break; 1327 } 1328 1329 if (!folio_clear_dirty_for_io(folio)) { 1330 doutc(cl, "%p !folio_clear_dirty_for_io\n", folio); 1331 folio_unlock(folio); 1332 ceph_wbc->fbatch.folios[i] = NULL; 1333 continue; 1334 } 1335 1336 /* 1337 * We have something to write. If this is 1338 * the first locked page this time through, 1339 * calculate max possible write size and 1340 * allocate a page array 1341 */ 1342 if (ceph_wbc->locked_pages == 0) { 1343 ceph_allocate_page_array(mapping, ceph_wbc, folio); 1344 } else if (!is_folio_index_contiguous(ceph_wbc, folio)) { 1345 if (is_num_ops_too_big(ceph_wbc)) { 1346 folio_redirty_for_writepage(wbc, folio); 1347 folio_unlock(folio); 1348 break; 1349 } 1350 1351 ceph_wbc->num_ops++; 1352 ceph_wbc->offset = (u64)folio_pos(folio); 1353 ceph_wbc->len = 0; 1354 } 1355 1356 /* note position of first page in fbatch */ 1357 doutc(cl, "%llx.%llx will write folio %p idx %lu\n", 1358 ceph_vinop(inode), folio, folio->index); 1359 1360 fsc->write_congested = is_write_congestion_happened(fsc); 1361 1362 rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc, 1363 folio); 1364 if (rc) { 1365 folio_redirty_for_writepage(wbc, folio); 1366 folio_unlock(folio); 1367 break; 1368 } 1369 1370 ceph_wbc->fbatch.folios[i] = NULL; 1371 ceph_wbc->len += folio_size(folio); 1372 } 1373 1374 ceph_wbc->processed_in_fbatch = i; 1375 1376 return rc; 1377 } 1378 1379 static inline 1380 void ceph_shift_unused_folios_left(struct folio_batch *fbatch) 1381 { 1382 unsigned j, n = 0; 1383 1384 /* shift unused page to beginning of fbatch */ 1385 for (j = 0; j < folio_batch_count(fbatch); j++) { 1386 if (!fbatch->folios[j]) 1387 continue; 1388 1389 if (n < j) { 1390 fbatch->folios[n] = fbatch->folios[j]; 1391 } 1392 1393 n++; 1394 } 1395 1396 fbatch->nr = n; 1397 } 1398 1399 static 1400 int ceph_submit_write(struct address_space *mapping, 1401 struct writeback_control *wbc, 1402 struct ceph_writeback_ctl *ceph_wbc) 1403 { 1404 struct inode *inode = mapping->host; 1405 struct ceph_inode_info *ci = ceph_inode(inode); 1406 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1407 struct ceph_client *cl = fsc->client; 1408 struct ceph_vino vino = ceph_vino(inode); 1409 struct ceph_osd_request *req = NULL; 1410 struct page *page = NULL; 1411 bool caching = ceph_is_cache_enabled(inode); 1412 u64 offset; 1413 u64 len; 1414 unsigned i; 1415 1416 new_request: 1417 offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]); 1418 len = ceph_wbc->wsize; 1419 1420 req = ceph_osdc_new_request(&fsc->client->osdc, 1421 &ci->i_layout, vino, 1422 offset, &len, 0, ceph_wbc->num_ops, 1423 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1424 ceph_wbc->snapc, ceph_wbc->truncate_seq, 1425 ceph_wbc->truncate_size, false); 1426 if (IS_ERR(req)) { 1427 req = ceph_osdc_new_request(&fsc->client->osdc, 1428 &ci->i_layout, vino, 1429 offset, &len, 0, 1430 min(ceph_wbc->num_ops, 1431 CEPH_OSD_SLAB_OPS), 1432 CEPH_OSD_OP_WRITE, 1433 CEPH_OSD_FLAG_WRITE, 1434 ceph_wbc->snapc, 1435 ceph_wbc->truncate_seq, 1436 ceph_wbc->truncate_size, 1437 true); 1438 BUG_ON(IS_ERR(req)); 1439 } 1440 1441 page = ceph_wbc->pages[ceph_wbc->locked_pages - 1]; 1442 BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset); 1443 1444 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { 1445 for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) { 1446 struct folio *folio = ceph_wbc->fbatch.folios[i]; 1447 1448 if (!folio) 1449 continue; 1450 1451 page = &folio->page; 1452 redirty_page_for_writepage(wbc, page); 1453 unlock_page(page); 1454 } 1455 1456 for (i = 0; i < ceph_wbc->locked_pages; i++) { 1457 page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]); 1458 1459 if (!page) 1460 continue; 1461 1462 redirty_page_for_writepage(wbc, page); 1463 unlock_page(page); 1464 } 1465 1466 ceph_osdc_put_request(req); 1467 return -EIO; 1468 } 1469 1470 req->r_callback = writepages_finish; 1471 req->r_inode = inode; 1472 1473 /* Format the osd request message and submit the write */ 1474 len = 0; 1475 ceph_wbc->data_pages = ceph_wbc->pages; 1476 ceph_wbc->op_idx = 0; 1477 for (i = 0; i < ceph_wbc->locked_pages; i++) { 1478 u64 cur_offset; 1479 1480 page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]); 1481 cur_offset = page_offset(page); 1482 1483 /* 1484 * Discontinuity in page range? Ceph can handle that by just passing 1485 * multiple extents in the write op. 1486 */ 1487 if (offset + len != cur_offset) { 1488 /* If it's full, stop here */ 1489 if (ceph_wbc->op_idx + 1 == req->r_num_ops) 1490 break; 1491 1492 /* Kick off an fscache write with what we have so far. */ 1493 ceph_fscache_write_to_cache(inode, offset, len, caching); 1494 1495 /* Start a new extent */ 1496 osd_req_op_extent_dup_last(req, ceph_wbc->op_idx, 1497 cur_offset - offset); 1498 1499 doutc(cl, "got pages at %llu~%llu\n", offset, len); 1500 1501 osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx, 1502 ceph_wbc->data_pages, 1503 len, 0, 1504 ceph_wbc->from_pool, 1505 false); 1506 osd_req_op_extent_update(req, ceph_wbc->op_idx, len); 1507 1508 len = 0; 1509 offset = cur_offset; 1510 ceph_wbc->data_pages = ceph_wbc->pages + i; 1511 ceph_wbc->op_idx++; 1512 } 1513 1514 set_page_writeback(page); 1515 1516 if (caching) 1517 ceph_set_page_fscache(page); 1518 1519 len += thp_size(page); 1520 } 1521 1522 ceph_fscache_write_to_cache(inode, offset, len, caching); 1523 1524 if (ceph_wbc->size_stable) { 1525 len = min(len, ceph_wbc->i_size - offset); 1526 } else if (i == ceph_wbc->locked_pages) { 1527 /* writepages_finish() clears writeback pages 1528 * according to the data length, so make sure 1529 * data length covers all locked pages */ 1530 u64 min_len = len + 1 - thp_size(page); 1531 len = get_writepages_data_length(inode, 1532 ceph_wbc->pages[i - 1], 1533 offset); 1534 len = max(len, min_len); 1535 } 1536 1537 if (IS_ENCRYPTED(inode)) 1538 len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE); 1539 1540 doutc(cl, "got pages at %llu~%llu\n", offset, len); 1541 1542 if (IS_ENCRYPTED(inode) && 1543 ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) { 1544 pr_warn_client(cl, 1545 "bad encrypted write offset=%lld len=%llu\n", 1546 offset, len); 1547 } 1548 1549 osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx, 1550 ceph_wbc->data_pages, len, 1551 0, ceph_wbc->from_pool, false); 1552 osd_req_op_extent_update(req, ceph_wbc->op_idx, len); 1553 1554 BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops); 1555 1556 ceph_wbc->from_pool = false; 1557 if (i < ceph_wbc->locked_pages) { 1558 BUG_ON(ceph_wbc->num_ops <= req->r_num_ops); 1559 ceph_wbc->num_ops -= req->r_num_ops; 1560 ceph_wbc->locked_pages -= i; 1561 1562 /* allocate new pages array for next request */ 1563 ceph_wbc->data_pages = ceph_wbc->pages; 1564 __ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages); 1565 memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i, 1566 ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages)); 1567 memset(ceph_wbc->data_pages + i, 0, 1568 ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages)); 1569 } else { 1570 BUG_ON(ceph_wbc->num_ops != req->r_num_ops); 1571 /* request message now owns the pages array */ 1572 ceph_wbc->pages = NULL; 1573 } 1574 1575 req->r_mtime = inode_get_mtime(inode); 1576 ceph_osdc_start_request(&fsc->client->osdc, req); 1577 req = NULL; 1578 1579 wbc->nr_to_write -= i; 1580 if (ceph_wbc->pages) 1581 goto new_request; 1582 1583 return 0; 1584 } 1585 1586 static 1587 void ceph_wait_until_current_writes_complete(struct address_space *mapping, 1588 struct writeback_control *wbc, 1589 struct ceph_writeback_ctl *ceph_wbc) 1590 { 1591 struct page *page; 1592 unsigned i, nr; 1593 1594 if (wbc->sync_mode != WB_SYNC_NONE && 1595 ceph_wbc->start_index == 0 && /* all dirty pages were checked */ 1596 !ceph_wbc->head_snapc) { 1597 ceph_wbc->index = 0; 1598 1599 while ((ceph_wbc->index <= ceph_wbc->end) && 1600 (nr = filemap_get_folios_tag(mapping, 1601 &ceph_wbc->index, 1602 (pgoff_t)-1, 1603 PAGECACHE_TAG_WRITEBACK, 1604 &ceph_wbc->fbatch))) { 1605 for (i = 0; i < nr; i++) { 1606 page = &ceph_wbc->fbatch.folios[i]->page; 1607 if (page_snap_context(page) != ceph_wbc->snapc) 1608 continue; 1609 wait_on_page_writeback(page); 1610 } 1611 1612 folio_batch_release(&ceph_wbc->fbatch); 1613 cond_resched(); 1614 } 1615 } 1616 } 1617 1618 /* 1619 * initiate async writeback 1620 */ 1621 static int ceph_writepages_start(struct address_space *mapping, 1622 struct writeback_control *wbc) 1623 { 1624 struct inode *inode = mapping->host; 1625 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1626 struct ceph_client *cl = fsc->client; 1627 struct ceph_writeback_ctl ceph_wbc; 1628 int rc = 0; 1629 1630 if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested) 1631 return 0; 1632 1633 doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode), 1634 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 1635 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 1636 1637 if (is_forced_umount(mapping)) { 1638 /* we're in a forced umount, don't write! */ 1639 return -EIO; 1640 } 1641 1642 ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc); 1643 1644 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) { 1645 rc = -EIO; 1646 goto out; 1647 } 1648 1649 retry: 1650 rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc); 1651 if (rc == -ENODATA) { 1652 /* hmm, why does writepages get called when there 1653 is no dirty data? */ 1654 rc = 0; 1655 goto dec_osd_stopping_blocker; 1656 } 1657 1658 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1659 tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end); 1660 1661 while (!has_writeback_done(&ceph_wbc)) { 1662 ceph_wbc.locked_pages = 0; 1663 ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT; 1664 1665 get_more_pages: 1666 ceph_folio_batch_reinit(&ceph_wbc); 1667 1668 ceph_wbc.nr_folios = filemap_get_folios_tag(mapping, 1669 &ceph_wbc.index, 1670 ceph_wbc.end, 1671 ceph_wbc.tag, 1672 &ceph_wbc.fbatch); 1673 doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n", 1674 ceph_wbc.tag, ceph_wbc.nr_folios); 1675 1676 if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages) 1677 break; 1678 1679 process_folio_batch: 1680 rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc); 1681 if (rc) 1682 goto release_folios; 1683 1684 /* did we get anything? */ 1685 if (!ceph_wbc.locked_pages) 1686 goto release_folios; 1687 1688 if (ceph_wbc.processed_in_fbatch) { 1689 ceph_shift_unused_folios_left(&ceph_wbc.fbatch); 1690 1691 if (folio_batch_count(&ceph_wbc.fbatch) == 0 && 1692 ceph_wbc.locked_pages < ceph_wbc.max_pages) { 1693 doutc(cl, "reached end fbatch, trying for more\n"); 1694 goto get_more_pages; 1695 } 1696 } 1697 1698 rc = ceph_submit_write(mapping, wbc, &ceph_wbc); 1699 if (rc) 1700 goto release_folios; 1701 1702 ceph_wbc.locked_pages = 0; 1703 ceph_wbc.strip_unit_end = 0; 1704 1705 if (folio_batch_count(&ceph_wbc.fbatch) > 0) { 1706 ceph_wbc.nr_folios = 1707 folio_batch_count(&ceph_wbc.fbatch); 1708 goto process_folio_batch; 1709 } 1710 1711 /* 1712 * We stop writing back only if we are not doing 1713 * integrity sync. In case of integrity sync we have to 1714 * keep going until we have written all the pages 1715 * we tagged for writeback prior to entering this loop. 1716 */ 1717 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1718 ceph_wbc.done = true; 1719 1720 release_folios: 1721 doutc(cl, "folio_batch release on %d folios (%p)\n", 1722 (int)ceph_wbc.fbatch.nr, 1723 ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL); 1724 folio_batch_release(&ceph_wbc.fbatch); 1725 } 1726 1727 if (ceph_wbc.should_loop && !ceph_wbc.done) { 1728 /* more to do; loop back to beginning of file */ 1729 doutc(cl, "looping back to beginning of file\n"); 1730 /* OK even when start_index == 0 */ 1731 ceph_wbc.end = ceph_wbc.start_index - 1; 1732 1733 /* to write dirty pages associated with next snapc, 1734 * we need to wait until current writes complete */ 1735 ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc); 1736 1737 ceph_wbc.start_index = 0; 1738 ceph_wbc.index = 0; 1739 goto retry; 1740 } 1741 1742 if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0)) 1743 mapping->writeback_index = ceph_wbc.index; 1744 1745 dec_osd_stopping_blocker: 1746 ceph_dec_osd_stopping_blocker(fsc->mdsc); 1747 1748 out: 1749 ceph_put_snap_context(ceph_wbc.last_snapc); 1750 doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode), 1751 rc); 1752 1753 return rc; 1754 } 1755 1756 /* 1757 * See if a given @snapc is either writeable, or already written. 1758 */ 1759 static int context_is_writeable_or_written(struct inode *inode, 1760 struct ceph_snap_context *snapc) 1761 { 1762 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1763 int ret = !oldest || snapc->seq <= oldest->seq; 1764 1765 ceph_put_snap_context(oldest); 1766 return ret; 1767 } 1768 1769 /** 1770 * ceph_find_incompatible - find an incompatible context and return it 1771 * @folio: folio being dirtied 1772 * 1773 * We are only allowed to write into/dirty a folio if the folio is 1774 * clean, or already dirty within the same snap context. Returns a 1775 * conflicting context if there is one, NULL if there isn't, or a 1776 * negative error code on other errors. 1777 * 1778 * Must be called with folio lock held. 1779 */ 1780 static struct ceph_snap_context * 1781 ceph_find_incompatible(struct folio *folio) 1782 { 1783 struct inode *inode = folio->mapping->host; 1784 struct ceph_client *cl = ceph_inode_to_client(inode); 1785 struct ceph_inode_info *ci = ceph_inode(inode); 1786 1787 if (ceph_inode_is_shutdown(inode)) { 1788 doutc(cl, " %llx.%llx folio %p is shutdown\n", 1789 ceph_vinop(inode), folio); 1790 return ERR_PTR(-ESTALE); 1791 } 1792 1793 for (;;) { 1794 struct ceph_snap_context *snapc, *oldest; 1795 1796 folio_wait_writeback(folio); 1797 1798 snapc = page_snap_context(&folio->page); 1799 if (!snapc || snapc == ci->i_head_snapc) 1800 break; 1801 1802 /* 1803 * this folio is already dirty in another (older) snap 1804 * context! is it writeable now? 1805 */ 1806 oldest = get_oldest_context(inode, NULL, NULL); 1807 if (snapc->seq > oldest->seq) { 1808 /* not writeable -- return it for the caller to deal with */ 1809 ceph_put_snap_context(oldest); 1810 doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n", 1811 ceph_vinop(inode), folio, snapc); 1812 return ceph_get_snap_context(snapc); 1813 } 1814 ceph_put_snap_context(oldest); 1815 1816 /* yay, writeable, do it now (without dropping folio lock) */ 1817 doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n", 1818 ceph_vinop(inode), folio, snapc); 1819 if (folio_clear_dirty_for_io(folio)) { 1820 int r = write_folio_nounlock(folio, NULL); 1821 if (r < 0) 1822 return ERR_PTR(r); 1823 } 1824 } 1825 return NULL; 1826 } 1827 1828 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1829 struct folio **foliop, void **_fsdata) 1830 { 1831 struct inode *inode = file_inode(file); 1832 struct ceph_inode_info *ci = ceph_inode(inode); 1833 struct ceph_snap_context *snapc; 1834 1835 snapc = ceph_find_incompatible(*foliop); 1836 if (snapc) { 1837 int r; 1838 1839 folio_unlock(*foliop); 1840 folio_put(*foliop); 1841 *foliop = NULL; 1842 if (IS_ERR(snapc)) 1843 return PTR_ERR(snapc); 1844 1845 ceph_queue_writeback(inode); 1846 r = wait_event_killable(ci->i_cap_wq, 1847 context_is_writeable_or_written(inode, snapc)); 1848 ceph_put_snap_context(snapc); 1849 return r == 0 ? -EAGAIN : r; 1850 } 1851 return 0; 1852 } 1853 1854 /* 1855 * We are only allowed to write into/dirty the page if the page is 1856 * clean, or already dirty within the same snap context. 1857 */ 1858 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1859 loff_t pos, unsigned len, 1860 struct folio **foliop, void **fsdata) 1861 { 1862 struct inode *inode = file_inode(file); 1863 struct ceph_inode_info *ci = ceph_inode(inode); 1864 int r; 1865 1866 r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL); 1867 if (r < 0) 1868 return r; 1869 1870 folio_wait_private_2(*foliop); /* [DEPRECATED] */ 1871 WARN_ON_ONCE(!folio_test_locked(*foliop)); 1872 return 0; 1873 } 1874 1875 /* 1876 * we don't do anything in here that simple_write_end doesn't do 1877 * except adjust dirty page accounting 1878 */ 1879 static int ceph_write_end(struct file *file, struct address_space *mapping, 1880 loff_t pos, unsigned len, unsigned copied, 1881 struct folio *folio, void *fsdata) 1882 { 1883 struct inode *inode = file_inode(file); 1884 struct ceph_client *cl = ceph_inode_to_client(inode); 1885 bool check_cap = false; 1886 1887 doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode), 1888 file, folio, (int)pos, (int)copied, (int)len); 1889 1890 if (!folio_test_uptodate(folio)) { 1891 /* just return that nothing was copied on a short copy */ 1892 if (copied < len) { 1893 copied = 0; 1894 goto out; 1895 } 1896 folio_mark_uptodate(folio); 1897 } 1898 1899 /* did file size increase? */ 1900 if (pos+copied > i_size_read(inode)) 1901 check_cap = ceph_inode_set_size(inode, pos+copied); 1902 1903 folio_mark_dirty(folio); 1904 1905 out: 1906 folio_unlock(folio); 1907 folio_put(folio); 1908 1909 if (check_cap) 1910 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY); 1911 1912 return copied; 1913 } 1914 1915 const struct address_space_operations ceph_aops = { 1916 .read_folio = netfs_read_folio, 1917 .readahead = netfs_readahead, 1918 .writepages = ceph_writepages_start, 1919 .write_begin = ceph_write_begin, 1920 .write_end = ceph_write_end, 1921 .dirty_folio = ceph_dirty_folio, 1922 .invalidate_folio = ceph_invalidate_folio, 1923 .release_folio = netfs_release_folio, 1924 .direct_IO = noop_direct_IO, 1925 .migrate_folio = filemap_migrate_folio, 1926 }; 1927 1928 static void ceph_block_sigs(sigset_t *oldset) 1929 { 1930 sigset_t mask; 1931 siginitsetinv(&mask, sigmask(SIGKILL)); 1932 sigprocmask(SIG_BLOCK, &mask, oldset); 1933 } 1934 1935 static void ceph_restore_sigs(sigset_t *oldset) 1936 { 1937 sigprocmask(SIG_SETMASK, oldset, NULL); 1938 } 1939 1940 /* 1941 * vm ops 1942 */ 1943 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1944 { 1945 struct vm_area_struct *vma = vmf->vma; 1946 struct inode *inode = file_inode(vma->vm_file); 1947 struct ceph_inode_info *ci = ceph_inode(inode); 1948 struct ceph_client *cl = ceph_inode_to_client(inode); 1949 struct ceph_file_info *fi = vma->vm_file->private_data; 1950 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1951 int want, got, err; 1952 sigset_t oldset; 1953 vm_fault_t ret = VM_FAULT_SIGBUS; 1954 1955 if (ceph_inode_is_shutdown(inode)) 1956 return ret; 1957 1958 ceph_block_sigs(&oldset); 1959 1960 doutc(cl, "%llx.%llx %llu trying to get caps\n", 1961 ceph_vinop(inode), off); 1962 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1963 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1964 else 1965 want = CEPH_CAP_FILE_CACHE; 1966 1967 got = 0; 1968 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1969 if (err < 0) 1970 goto out_restore; 1971 1972 doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode), 1973 off, ceph_cap_string(got)); 1974 1975 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1976 !ceph_has_inline_data(ci)) { 1977 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1978 ceph_add_rw_context(fi, &rw_ctx); 1979 ret = filemap_fault(vmf); 1980 ceph_del_rw_context(fi, &rw_ctx); 1981 doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n", 1982 ceph_vinop(inode), off, ceph_cap_string(got), ret); 1983 } else 1984 err = -EAGAIN; 1985 1986 ceph_put_cap_refs(ci, got); 1987 1988 if (err != -EAGAIN) 1989 goto out_restore; 1990 1991 /* read inline data */ 1992 if (off >= PAGE_SIZE) { 1993 /* does not support inline data > PAGE_SIZE */ 1994 ret = VM_FAULT_SIGBUS; 1995 } else { 1996 struct address_space *mapping = inode->i_mapping; 1997 struct page *page; 1998 1999 filemap_invalidate_lock_shared(mapping); 2000 page = find_or_create_page(mapping, 0, 2001 mapping_gfp_constraint(mapping, ~__GFP_FS)); 2002 if (!page) { 2003 ret = VM_FAULT_OOM; 2004 goto out_inline; 2005 } 2006 err = __ceph_do_getattr(inode, page, 2007 CEPH_STAT_CAP_INLINE_DATA, true); 2008 if (err < 0 || off >= i_size_read(inode)) { 2009 unlock_page(page); 2010 put_page(page); 2011 ret = vmf_error(err); 2012 goto out_inline; 2013 } 2014 if (err < PAGE_SIZE) 2015 zero_user_segment(page, err, PAGE_SIZE); 2016 else 2017 flush_dcache_page(page); 2018 SetPageUptodate(page); 2019 vmf->page = page; 2020 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 2021 out_inline: 2022 filemap_invalidate_unlock_shared(mapping); 2023 doutc(cl, "%llx.%llx %llu read inline data ret %x\n", 2024 ceph_vinop(inode), off, ret); 2025 } 2026 out_restore: 2027 ceph_restore_sigs(&oldset); 2028 if (err < 0) 2029 ret = vmf_error(err); 2030 2031 return ret; 2032 } 2033 2034 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 2035 { 2036 struct vm_area_struct *vma = vmf->vma; 2037 struct inode *inode = file_inode(vma->vm_file); 2038 struct ceph_client *cl = ceph_inode_to_client(inode); 2039 struct ceph_inode_info *ci = ceph_inode(inode); 2040 struct ceph_file_info *fi = vma->vm_file->private_data; 2041 struct ceph_cap_flush *prealloc_cf; 2042 struct folio *folio = page_folio(vmf->page); 2043 loff_t off = folio_pos(folio); 2044 loff_t size = i_size_read(inode); 2045 size_t len; 2046 int want, got, err; 2047 sigset_t oldset; 2048 vm_fault_t ret = VM_FAULT_SIGBUS; 2049 2050 if (ceph_inode_is_shutdown(inode)) 2051 return ret; 2052 2053 prealloc_cf = ceph_alloc_cap_flush(); 2054 if (!prealloc_cf) 2055 return VM_FAULT_OOM; 2056 2057 sb_start_pagefault(inode->i_sb); 2058 ceph_block_sigs(&oldset); 2059 2060 if (off + folio_size(folio) <= size) 2061 len = folio_size(folio); 2062 else 2063 len = offset_in_folio(folio, size); 2064 2065 doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n", 2066 ceph_vinop(inode), off, len, size); 2067 if (fi->fmode & CEPH_FILE_MODE_LAZY) 2068 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 2069 else 2070 want = CEPH_CAP_FILE_BUFFER; 2071 2072 got = 0; 2073 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 2074 if (err < 0) 2075 goto out_free; 2076 2077 doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode), 2078 off, len, ceph_cap_string(got)); 2079 2080 /* Update time before taking folio lock */ 2081 file_update_time(vma->vm_file); 2082 inode_inc_iversion_raw(inode); 2083 2084 do { 2085 struct ceph_snap_context *snapc; 2086 2087 folio_lock(folio); 2088 2089 if (folio_mkwrite_check_truncate(folio, inode) < 0) { 2090 folio_unlock(folio); 2091 ret = VM_FAULT_NOPAGE; 2092 break; 2093 } 2094 2095 snapc = ceph_find_incompatible(folio); 2096 if (!snapc) { 2097 /* success. we'll keep the folio locked. */ 2098 folio_mark_dirty(folio); 2099 ret = VM_FAULT_LOCKED; 2100 break; 2101 } 2102 2103 folio_unlock(folio); 2104 2105 if (IS_ERR(snapc)) { 2106 ret = VM_FAULT_SIGBUS; 2107 break; 2108 } 2109 2110 ceph_queue_writeback(inode); 2111 err = wait_event_killable(ci->i_cap_wq, 2112 context_is_writeable_or_written(inode, snapc)); 2113 ceph_put_snap_context(snapc); 2114 } while (err == 0); 2115 2116 if (ret == VM_FAULT_LOCKED) { 2117 int dirty; 2118 spin_lock(&ci->i_ceph_lock); 2119 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 2120 &prealloc_cf); 2121 spin_unlock(&ci->i_ceph_lock); 2122 if (dirty) 2123 __mark_inode_dirty(inode, dirty); 2124 } 2125 2126 doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n", 2127 ceph_vinop(inode), off, len, ceph_cap_string(got), ret); 2128 ceph_put_cap_refs_async(ci, got); 2129 out_free: 2130 ceph_restore_sigs(&oldset); 2131 sb_end_pagefault(inode->i_sb); 2132 ceph_free_cap_flush(prealloc_cf); 2133 if (err < 0) 2134 ret = vmf_error(err); 2135 return ret; 2136 } 2137 2138 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 2139 char *data, size_t len) 2140 { 2141 struct ceph_client *cl = ceph_inode_to_client(inode); 2142 struct address_space *mapping = inode->i_mapping; 2143 struct page *page; 2144 2145 if (locked_page) { 2146 page = locked_page; 2147 } else { 2148 if (i_size_read(inode) == 0) 2149 return; 2150 page = find_or_create_page(mapping, 0, 2151 mapping_gfp_constraint(mapping, 2152 ~__GFP_FS)); 2153 if (!page) 2154 return; 2155 if (PageUptodate(page)) { 2156 unlock_page(page); 2157 put_page(page); 2158 return; 2159 } 2160 } 2161 2162 doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode, 2163 ceph_vinop(inode), len, locked_page); 2164 2165 if (len > 0) { 2166 void *kaddr = kmap_atomic(page); 2167 memcpy(kaddr, data, len); 2168 kunmap_atomic(kaddr); 2169 } 2170 2171 if (page != locked_page) { 2172 if (len < PAGE_SIZE) 2173 zero_user_segment(page, len, PAGE_SIZE); 2174 else 2175 flush_dcache_page(page); 2176 2177 SetPageUptodate(page); 2178 unlock_page(page); 2179 put_page(page); 2180 } 2181 } 2182 2183 int ceph_uninline_data(struct file *file) 2184 { 2185 struct inode *inode = file_inode(file); 2186 struct ceph_inode_info *ci = ceph_inode(inode); 2187 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 2188 struct ceph_client *cl = fsc->client; 2189 struct ceph_osd_request *req = NULL; 2190 struct ceph_cap_flush *prealloc_cf = NULL; 2191 struct folio *folio = NULL; 2192 u64 inline_version = CEPH_INLINE_NONE; 2193 struct page *pages[1]; 2194 int err = 0; 2195 u64 len; 2196 2197 spin_lock(&ci->i_ceph_lock); 2198 inline_version = ci->i_inline_version; 2199 spin_unlock(&ci->i_ceph_lock); 2200 2201 doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode), 2202 inline_version); 2203 2204 if (ceph_inode_is_shutdown(inode)) { 2205 err = -EIO; 2206 goto out; 2207 } 2208 2209 if (inline_version == CEPH_INLINE_NONE) 2210 return 0; 2211 2212 prealloc_cf = ceph_alloc_cap_flush(); 2213 if (!prealloc_cf) 2214 return -ENOMEM; 2215 2216 if (inline_version == 1) /* initial version, no data */ 2217 goto out_uninline; 2218 2219 folio = read_mapping_folio(inode->i_mapping, 0, file); 2220 if (IS_ERR(folio)) { 2221 err = PTR_ERR(folio); 2222 goto out; 2223 } 2224 2225 folio_lock(folio); 2226 2227 len = i_size_read(inode); 2228 if (len > folio_size(folio)) 2229 len = folio_size(folio); 2230 2231 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 2232 ceph_vino(inode), 0, &len, 0, 1, 2233 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 2234 NULL, 0, 0, false); 2235 if (IS_ERR(req)) { 2236 err = PTR_ERR(req); 2237 goto out_unlock; 2238 } 2239 2240 req->r_mtime = inode_get_mtime(inode); 2241 ceph_osdc_start_request(&fsc->client->osdc, req); 2242 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 2243 ceph_osdc_put_request(req); 2244 if (err < 0) 2245 goto out_unlock; 2246 2247 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 2248 ceph_vino(inode), 0, &len, 1, 3, 2249 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 2250 NULL, ci->i_truncate_seq, 2251 ci->i_truncate_size, false); 2252 if (IS_ERR(req)) { 2253 err = PTR_ERR(req); 2254 goto out_unlock; 2255 } 2256 2257 pages[0] = folio_page(folio, 0); 2258 osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false); 2259 2260 { 2261 __le64 xattr_buf = cpu_to_le64(inline_version); 2262 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 2263 "inline_version", &xattr_buf, 2264 sizeof(xattr_buf), 2265 CEPH_OSD_CMPXATTR_OP_GT, 2266 CEPH_OSD_CMPXATTR_MODE_U64); 2267 if (err) 2268 goto out_put_req; 2269 } 2270 2271 { 2272 char xattr_buf[32]; 2273 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 2274 "%llu", inline_version); 2275 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 2276 "inline_version", 2277 xattr_buf, xattr_len, 0, 0); 2278 if (err) 2279 goto out_put_req; 2280 } 2281 2282 req->r_mtime = inode_get_mtime(inode); 2283 ceph_osdc_start_request(&fsc->client->osdc, req); 2284 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 2285 2286 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 2287 req->r_end_latency, len, err); 2288 2289 out_uninline: 2290 if (!err) { 2291 int dirty; 2292 2293 /* Set to CAP_INLINE_NONE and dirty the caps */ 2294 down_read(&fsc->mdsc->snap_rwsem); 2295 spin_lock(&ci->i_ceph_lock); 2296 ci->i_inline_version = CEPH_INLINE_NONE; 2297 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf); 2298 spin_unlock(&ci->i_ceph_lock); 2299 up_read(&fsc->mdsc->snap_rwsem); 2300 if (dirty) 2301 __mark_inode_dirty(inode, dirty); 2302 } 2303 out_put_req: 2304 ceph_osdc_put_request(req); 2305 if (err == -ECANCELED) 2306 err = 0; 2307 out_unlock: 2308 if (folio) { 2309 folio_unlock(folio); 2310 folio_put(folio); 2311 } 2312 out: 2313 ceph_free_cap_flush(prealloc_cf); 2314 doutc(cl, "%llx.%llx inline_version %llu = %d\n", 2315 ceph_vinop(inode), inline_version, err); 2316 return err; 2317 } 2318 2319 static const struct vm_operations_struct ceph_vmops = { 2320 .fault = ceph_filemap_fault, 2321 .page_mkwrite = ceph_page_mkwrite, 2322 }; 2323 2324 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 2325 { 2326 struct address_space *mapping = file->f_mapping; 2327 2328 if (!mapping->a_ops->read_folio) 2329 return -ENOEXEC; 2330 vma->vm_ops = &ceph_vmops; 2331 return 0; 2332 } 2333 2334 enum { 2335 POOL_READ = 1, 2336 POOL_WRITE = 2, 2337 }; 2338 2339 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 2340 s64 pool, struct ceph_string *pool_ns) 2341 { 2342 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode); 2343 struct ceph_mds_client *mdsc = fsc->mdsc; 2344 struct ceph_client *cl = fsc->client; 2345 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 2346 struct rb_node **p, *parent; 2347 struct ceph_pool_perm *perm; 2348 struct page **pages; 2349 size_t pool_ns_len; 2350 int err = 0, err2 = 0, have = 0; 2351 2352 down_read(&mdsc->pool_perm_rwsem); 2353 p = &mdsc->pool_perm_tree.rb_node; 2354 while (*p) { 2355 perm = rb_entry(*p, struct ceph_pool_perm, node); 2356 if (pool < perm->pool) 2357 p = &(*p)->rb_left; 2358 else if (pool > perm->pool) 2359 p = &(*p)->rb_right; 2360 else { 2361 int ret = ceph_compare_string(pool_ns, 2362 perm->pool_ns, 2363 perm->pool_ns_len); 2364 if (ret < 0) 2365 p = &(*p)->rb_left; 2366 else if (ret > 0) 2367 p = &(*p)->rb_right; 2368 else { 2369 have = perm->perm; 2370 break; 2371 } 2372 } 2373 } 2374 up_read(&mdsc->pool_perm_rwsem); 2375 if (*p) 2376 goto out; 2377 2378 if (pool_ns) 2379 doutc(cl, "pool %lld ns %.*s no perm cached\n", pool, 2380 (int)pool_ns->len, pool_ns->str); 2381 else 2382 doutc(cl, "pool %lld no perm cached\n", pool); 2383 2384 down_write(&mdsc->pool_perm_rwsem); 2385 p = &mdsc->pool_perm_tree.rb_node; 2386 parent = NULL; 2387 while (*p) { 2388 parent = *p; 2389 perm = rb_entry(parent, struct ceph_pool_perm, node); 2390 if (pool < perm->pool) 2391 p = &(*p)->rb_left; 2392 else if (pool > perm->pool) 2393 p = &(*p)->rb_right; 2394 else { 2395 int ret = ceph_compare_string(pool_ns, 2396 perm->pool_ns, 2397 perm->pool_ns_len); 2398 if (ret < 0) 2399 p = &(*p)->rb_left; 2400 else if (ret > 0) 2401 p = &(*p)->rb_right; 2402 else { 2403 have = perm->perm; 2404 break; 2405 } 2406 } 2407 } 2408 if (*p) { 2409 up_write(&mdsc->pool_perm_rwsem); 2410 goto out; 2411 } 2412 2413 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 2414 1, false, GFP_NOFS); 2415 if (!rd_req) { 2416 err = -ENOMEM; 2417 goto out_unlock; 2418 } 2419 2420 rd_req->r_flags = CEPH_OSD_FLAG_READ; 2421 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 2422 rd_req->r_base_oloc.pool = pool; 2423 if (pool_ns) 2424 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 2425 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 2426 2427 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 2428 if (err) 2429 goto out_unlock; 2430 2431 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 2432 1, false, GFP_NOFS); 2433 if (!wr_req) { 2434 err = -ENOMEM; 2435 goto out_unlock; 2436 } 2437 2438 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 2439 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 2440 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 2441 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 2442 2443 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 2444 if (err) 2445 goto out_unlock; 2446 2447 /* one page should be large enough for STAT data */ 2448 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 2449 if (IS_ERR(pages)) { 2450 err = PTR_ERR(pages); 2451 goto out_unlock; 2452 } 2453 2454 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 2455 0, false, true); 2456 ceph_osdc_start_request(&fsc->client->osdc, rd_req); 2457 2458 wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode); 2459 ceph_osdc_start_request(&fsc->client->osdc, wr_req); 2460 2461 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 2462 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 2463 2464 if (err >= 0 || err == -ENOENT) 2465 have |= POOL_READ; 2466 else if (err != -EPERM) { 2467 if (err == -EBLOCKLISTED) 2468 fsc->blocklisted = true; 2469 goto out_unlock; 2470 } 2471 2472 if (err2 == 0 || err2 == -EEXIST) 2473 have |= POOL_WRITE; 2474 else if (err2 != -EPERM) { 2475 if (err2 == -EBLOCKLISTED) 2476 fsc->blocklisted = true; 2477 err = err2; 2478 goto out_unlock; 2479 } 2480 2481 pool_ns_len = pool_ns ? pool_ns->len : 0; 2482 perm = kmalloc(struct_size(perm, pool_ns, pool_ns_len + 1), GFP_NOFS); 2483 if (!perm) { 2484 err = -ENOMEM; 2485 goto out_unlock; 2486 } 2487 2488 perm->pool = pool; 2489 perm->perm = have; 2490 perm->pool_ns_len = pool_ns_len; 2491 if (pool_ns_len > 0) 2492 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 2493 perm->pool_ns[pool_ns_len] = 0; 2494 2495 rb_link_node(&perm->node, parent, p); 2496 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 2497 err = 0; 2498 out_unlock: 2499 up_write(&mdsc->pool_perm_rwsem); 2500 2501 ceph_osdc_put_request(rd_req); 2502 ceph_osdc_put_request(wr_req); 2503 out: 2504 if (!err) 2505 err = have; 2506 if (pool_ns) 2507 doutc(cl, "pool %lld ns %.*s result = %d\n", pool, 2508 (int)pool_ns->len, pool_ns->str, err); 2509 else 2510 doutc(cl, "pool %lld result = %d\n", pool, err); 2511 return err; 2512 } 2513 2514 int ceph_pool_perm_check(struct inode *inode, int need) 2515 { 2516 struct ceph_client *cl = ceph_inode_to_client(inode); 2517 struct ceph_inode_info *ci = ceph_inode(inode); 2518 struct ceph_string *pool_ns; 2519 s64 pool; 2520 int ret, flags; 2521 2522 /* Only need to do this for regular files */ 2523 if (!S_ISREG(inode->i_mode)) 2524 return 0; 2525 2526 if (ci->i_vino.snap != CEPH_NOSNAP) { 2527 /* 2528 * Pool permission check needs to write to the first object. 2529 * But for snapshot, head of the first object may have already 2530 * been deleted. Skip check to avoid creating orphan object. 2531 */ 2532 return 0; 2533 } 2534 2535 if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode), 2536 NOPOOLPERM)) 2537 return 0; 2538 2539 spin_lock(&ci->i_ceph_lock); 2540 flags = ci->i_ceph_flags; 2541 pool = ci->i_layout.pool_id; 2542 spin_unlock(&ci->i_ceph_lock); 2543 check: 2544 if (flags & CEPH_I_POOL_PERM) { 2545 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2546 doutc(cl, "pool %lld no read perm\n", pool); 2547 return -EPERM; 2548 } 2549 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2550 doutc(cl, "pool %lld no write perm\n", pool); 2551 return -EPERM; 2552 } 2553 return 0; 2554 } 2555 2556 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2557 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2558 ceph_put_string(pool_ns); 2559 if (ret < 0) 2560 return ret; 2561 2562 flags = CEPH_I_POOL_PERM; 2563 if (ret & POOL_READ) 2564 flags |= CEPH_I_POOL_RD; 2565 if (ret & POOL_WRITE) 2566 flags |= CEPH_I_POOL_WR; 2567 2568 spin_lock(&ci->i_ceph_lock); 2569 if (pool == ci->i_layout.pool_id && 2570 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2571 ci->i_ceph_flags |= flags; 2572 } else { 2573 pool = ci->i_layout.pool_id; 2574 flags = ci->i_ceph_flags; 2575 } 2576 spin_unlock(&ci->i_ceph_lock); 2577 goto check; 2578 } 2579 2580 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2581 { 2582 struct ceph_pool_perm *perm; 2583 struct rb_node *n; 2584 2585 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2586 n = rb_first(&mdsc->pool_perm_tree); 2587 perm = rb_entry(n, struct ceph_pool_perm, node); 2588 rb_erase(n, &mdsc->pool_perm_tree); 2589 kfree(perm); 2590 } 2591 } 2592