1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 #include <linux/ceph/striper.h> 4 5 #include <linux/module.h> 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/file.h> 9 #include <linux/mount.h> 10 #include <linux/namei.h> 11 #include <linux/writeback.h> 12 #include <linux/falloc.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 16 #include "super.h" 17 #include "mds_client.h" 18 #include "cache.h" 19 #include "io.h" 20 #include "metric.h" 21 22 static __le32 ceph_flags_sys2wire(u32 flags) 23 { 24 u32 wire_flags = 0; 25 26 switch (flags & O_ACCMODE) { 27 case O_RDONLY: 28 wire_flags |= CEPH_O_RDONLY; 29 break; 30 case O_WRONLY: 31 wire_flags |= CEPH_O_WRONLY; 32 break; 33 case O_RDWR: 34 wire_flags |= CEPH_O_RDWR; 35 break; 36 } 37 38 flags &= ~O_ACCMODE; 39 40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; } 41 42 ceph_sys2wire(O_CREAT); 43 ceph_sys2wire(O_EXCL); 44 ceph_sys2wire(O_TRUNC); 45 ceph_sys2wire(O_DIRECTORY); 46 ceph_sys2wire(O_NOFOLLOW); 47 48 #undef ceph_sys2wire 49 50 if (flags) 51 dout("unused open flags: %x\n", flags); 52 53 return cpu_to_le32(wire_flags); 54 } 55 56 /* 57 * Ceph file operations 58 * 59 * Implement basic open/close functionality, and implement 60 * read/write. 61 * 62 * We implement three modes of file I/O: 63 * - buffered uses the generic_file_aio_{read,write} helpers 64 * 65 * - synchronous is used when there is multi-client read/write 66 * sharing, avoids the page cache, and synchronously waits for an 67 * ack from the OSD. 68 * 69 * - direct io takes the variant of the sync path that references 70 * user pages directly. 71 * 72 * fsync() flushes and waits on dirty pages, but just queues metadata 73 * for writeback: since the MDS can recover size and mtime there is no 74 * need to wait for MDS acknowledgement. 75 */ 76 77 /* 78 * How many pages to get in one call to iov_iter_get_pages(). This 79 * determines the size of the on-stack array used as a buffer. 80 */ 81 #define ITER_GET_BVECS_PAGES 64 82 83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize, 84 struct bio_vec *bvecs) 85 { 86 size_t size = 0; 87 int bvec_idx = 0; 88 89 if (maxsize > iov_iter_count(iter)) 90 maxsize = iov_iter_count(iter); 91 92 while (size < maxsize) { 93 struct page *pages[ITER_GET_BVECS_PAGES]; 94 ssize_t bytes; 95 size_t start; 96 int idx = 0; 97 98 bytes = iov_iter_get_pages(iter, pages, maxsize - size, 99 ITER_GET_BVECS_PAGES, &start); 100 if (bytes < 0) 101 return size ?: bytes; 102 103 iov_iter_advance(iter, bytes); 104 size += bytes; 105 106 for ( ; bytes; idx++, bvec_idx++) { 107 struct bio_vec bv = { 108 .bv_page = pages[idx], 109 .bv_len = min_t(int, bytes, PAGE_SIZE - start), 110 .bv_offset = start, 111 }; 112 113 bvecs[bvec_idx] = bv; 114 bytes -= bv.bv_len; 115 start = 0; 116 } 117 } 118 119 return size; 120 } 121 122 /* 123 * iov_iter_get_pages() only considers one iov_iter segment, no matter 124 * what maxsize or maxpages are given. For ITER_BVEC that is a single 125 * page. 126 * 127 * Attempt to get up to @maxsize bytes worth of pages from @iter. 128 * Return the number of bytes in the created bio_vec array, or an error. 129 */ 130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize, 131 struct bio_vec **bvecs, int *num_bvecs) 132 { 133 struct bio_vec *bv; 134 size_t orig_count = iov_iter_count(iter); 135 ssize_t bytes; 136 int npages; 137 138 iov_iter_truncate(iter, maxsize); 139 npages = iov_iter_npages(iter, INT_MAX); 140 iov_iter_reexpand(iter, orig_count); 141 142 /* 143 * __iter_get_bvecs() may populate only part of the array -- zero it 144 * out. 145 */ 146 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO); 147 if (!bv) 148 return -ENOMEM; 149 150 bytes = __iter_get_bvecs(iter, maxsize, bv); 151 if (bytes < 0) { 152 /* 153 * No pages were pinned -- just free the array. 154 */ 155 kvfree(bv); 156 return bytes; 157 } 158 159 *bvecs = bv; 160 *num_bvecs = npages; 161 return bytes; 162 } 163 164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty) 165 { 166 int i; 167 168 for (i = 0; i < num_bvecs; i++) { 169 if (bvecs[i].bv_page) { 170 if (should_dirty) 171 set_page_dirty_lock(bvecs[i].bv_page); 172 put_page(bvecs[i].bv_page); 173 } 174 } 175 kvfree(bvecs); 176 } 177 178 /* 179 * Prepare an open request. Preallocate ceph_cap to avoid an 180 * inopportune ENOMEM later. 181 */ 182 static struct ceph_mds_request * 183 prepare_open_request(struct super_block *sb, int flags, int create_mode) 184 { 185 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb); 186 struct ceph_mds_request *req; 187 int want_auth = USE_ANY_MDS; 188 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 189 190 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 191 want_auth = USE_AUTH_MDS; 192 193 req = ceph_mdsc_create_request(mdsc, op, want_auth); 194 if (IS_ERR(req)) 195 goto out; 196 req->r_fmode = ceph_flags_to_mode(flags); 197 req->r_args.open.flags = ceph_flags_sys2wire(flags); 198 req->r_args.open.mode = cpu_to_le32(create_mode); 199 out: 200 return req; 201 } 202 203 static int ceph_init_file_info(struct inode *inode, struct file *file, 204 int fmode, bool isdir) 205 { 206 struct ceph_inode_info *ci = ceph_inode(inode); 207 struct ceph_mount_options *opt = 208 ceph_inode_to_client(&ci->vfs_inode)->mount_options; 209 struct ceph_file_info *fi; 210 int ret; 211 212 dout("%s %p %p 0%o (%s)\n", __func__, inode, file, 213 inode->i_mode, isdir ? "dir" : "regular"); 214 BUG_ON(inode->i_fop->release != ceph_release); 215 216 if (isdir) { 217 struct ceph_dir_file_info *dfi = 218 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL); 219 if (!dfi) 220 return -ENOMEM; 221 222 file->private_data = dfi; 223 fi = &dfi->file_info; 224 dfi->next_offset = 2; 225 dfi->readdir_cache_idx = -1; 226 } else { 227 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 228 if (!fi) 229 return -ENOMEM; 230 231 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE) 232 fi->flags |= CEPH_F_SYNC; 233 234 file->private_data = fi; 235 } 236 237 ceph_get_fmode(ci, fmode, 1); 238 fi->fmode = fmode; 239 240 spin_lock_init(&fi->rw_contexts_lock); 241 INIT_LIST_HEAD(&fi->rw_contexts); 242 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen); 243 244 if ((file->f_mode & FMODE_WRITE) && 245 ci->i_inline_version != CEPH_INLINE_NONE) { 246 ret = ceph_uninline_data(file); 247 if (ret < 0) 248 goto error; 249 } 250 251 return 0; 252 253 error: 254 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE); 255 ceph_put_fmode(ci, fi->fmode, 1); 256 kmem_cache_free(ceph_file_cachep, fi); 257 /* wake up anyone waiting for caps on this inode */ 258 wake_up_all(&ci->i_cap_wq); 259 return ret; 260 } 261 262 /* 263 * initialize private struct file data. 264 * if we fail, clean up by dropping fmode reference on the ceph_inode 265 */ 266 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 267 { 268 int ret = 0; 269 270 switch (inode->i_mode & S_IFMT) { 271 case S_IFREG: 272 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE); 273 fallthrough; 274 case S_IFDIR: 275 ret = ceph_init_file_info(inode, file, fmode, 276 S_ISDIR(inode->i_mode)); 277 break; 278 279 case S_IFLNK: 280 dout("init_file %p %p 0%o (symlink)\n", inode, file, 281 inode->i_mode); 282 break; 283 284 default: 285 dout("init_file %p %p 0%o (special)\n", inode, file, 286 inode->i_mode); 287 /* 288 * we need to drop the open ref now, since we don't 289 * have .release set to ceph_release. 290 */ 291 BUG_ON(inode->i_fop->release == ceph_release); 292 293 /* call the proper open fop */ 294 ret = inode->i_fop->open(inode, file); 295 } 296 return ret; 297 } 298 299 /* 300 * try renew caps after session gets killed. 301 */ 302 int ceph_renew_caps(struct inode *inode, int fmode) 303 { 304 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 305 struct ceph_inode_info *ci = ceph_inode(inode); 306 struct ceph_mds_request *req; 307 int err, flags, wanted; 308 309 spin_lock(&ci->i_ceph_lock); 310 __ceph_touch_fmode(ci, mdsc, fmode); 311 wanted = __ceph_caps_file_wanted(ci); 312 if (__ceph_is_any_real_caps(ci) && 313 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { 314 int issued = __ceph_caps_issued(ci, NULL); 315 spin_unlock(&ci->i_ceph_lock); 316 dout("renew caps %p want %s issued %s updating mds_wanted\n", 317 inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 318 ceph_check_caps(ci, 0, NULL); 319 return 0; 320 } 321 spin_unlock(&ci->i_ceph_lock); 322 323 flags = 0; 324 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) 325 flags = O_RDWR; 326 else if (wanted & CEPH_CAP_FILE_RD) 327 flags = O_RDONLY; 328 else if (wanted & CEPH_CAP_FILE_WR) 329 flags = O_WRONLY; 330 #ifdef O_LAZY 331 if (wanted & CEPH_CAP_FILE_LAZYIO) 332 flags |= O_LAZY; 333 #endif 334 335 req = prepare_open_request(inode->i_sb, flags, 0); 336 if (IS_ERR(req)) { 337 err = PTR_ERR(req); 338 goto out; 339 } 340 341 req->r_inode = inode; 342 ihold(inode); 343 req->r_num_caps = 1; 344 345 err = ceph_mdsc_do_request(mdsc, NULL, req); 346 ceph_mdsc_put_request(req); 347 out: 348 dout("renew caps %p open result=%d\n", inode, err); 349 return err < 0 ? err : 0; 350 } 351 352 /* 353 * If we already have the requisite capabilities, we can satisfy 354 * the open request locally (no need to request new caps from the 355 * MDS). We do, however, need to inform the MDS (asynchronously) 356 * if our wanted caps set expands. 357 */ 358 int ceph_open(struct inode *inode, struct file *file) 359 { 360 struct ceph_inode_info *ci = ceph_inode(inode); 361 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 362 struct ceph_mds_client *mdsc = fsc->mdsc; 363 struct ceph_mds_request *req; 364 struct ceph_file_info *fi = file->private_data; 365 int err; 366 int flags, fmode, wanted; 367 368 if (fi) { 369 dout("open file %p is already opened\n", file); 370 return 0; 371 } 372 373 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 374 flags = file->f_flags & ~(O_CREAT|O_EXCL); 375 if (S_ISDIR(inode->i_mode)) 376 flags = O_DIRECTORY; /* mds likes to know */ 377 378 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 379 ceph_vinop(inode), file, flags, file->f_flags); 380 fmode = ceph_flags_to_mode(flags); 381 wanted = ceph_caps_for_mode(fmode); 382 383 /* snapped files are read-only */ 384 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 385 return -EROFS; 386 387 /* trivially open snapdir */ 388 if (ceph_snap(inode) == CEPH_SNAPDIR) { 389 return ceph_init_file(inode, file, fmode); 390 } 391 392 /* 393 * No need to block if we have caps on the auth MDS (for 394 * write) or any MDS (for read). Update wanted set 395 * asynchronously. 396 */ 397 spin_lock(&ci->i_ceph_lock); 398 if (__ceph_is_any_real_caps(ci) && 399 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 400 int mds_wanted = __ceph_caps_mds_wanted(ci, true); 401 int issued = __ceph_caps_issued(ci, NULL); 402 403 dout("open %p fmode %d want %s issued %s using existing\n", 404 inode, fmode, ceph_cap_string(wanted), 405 ceph_cap_string(issued)); 406 __ceph_touch_fmode(ci, mdsc, fmode); 407 spin_unlock(&ci->i_ceph_lock); 408 409 /* adjust wanted? */ 410 if ((issued & wanted) != wanted && 411 (mds_wanted & wanted) != wanted && 412 ceph_snap(inode) != CEPH_SNAPDIR) 413 ceph_check_caps(ci, 0, NULL); 414 415 return ceph_init_file(inode, file, fmode); 416 } else if (ceph_snap(inode) != CEPH_NOSNAP && 417 (ci->i_snap_caps & wanted) == wanted) { 418 __ceph_touch_fmode(ci, mdsc, fmode); 419 spin_unlock(&ci->i_ceph_lock); 420 return ceph_init_file(inode, file, fmode); 421 } 422 423 spin_unlock(&ci->i_ceph_lock); 424 425 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 426 req = prepare_open_request(inode->i_sb, flags, 0); 427 if (IS_ERR(req)) { 428 err = PTR_ERR(req); 429 goto out; 430 } 431 req->r_inode = inode; 432 ihold(inode); 433 434 req->r_num_caps = 1; 435 err = ceph_mdsc_do_request(mdsc, NULL, req); 436 if (!err) 437 err = ceph_init_file(inode, file, req->r_fmode); 438 ceph_mdsc_put_request(req); 439 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 440 out: 441 return err; 442 } 443 444 /* Clone the layout from a synchronous create, if the dir now has Dc caps */ 445 static void 446 cache_file_layout(struct inode *dst, struct inode *src) 447 { 448 struct ceph_inode_info *cdst = ceph_inode(dst); 449 struct ceph_inode_info *csrc = ceph_inode(src); 450 451 spin_lock(&cdst->i_ceph_lock); 452 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) && 453 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) { 454 memcpy(&cdst->i_cached_layout, &csrc->i_layout, 455 sizeof(cdst->i_cached_layout)); 456 rcu_assign_pointer(cdst->i_cached_layout.pool_ns, 457 ceph_try_get_string(csrc->i_layout.pool_ns)); 458 } 459 spin_unlock(&cdst->i_ceph_lock); 460 } 461 462 /* 463 * Try to set up an async create. We need caps, a file layout, and inode number, 464 * and either a lease on the dentry or complete dir info. If any of those 465 * criteria are not satisfied, then return false and the caller can go 466 * synchronous. 467 */ 468 static int try_prep_async_create(struct inode *dir, struct dentry *dentry, 469 struct ceph_file_layout *lo, u64 *pino) 470 { 471 struct ceph_inode_info *ci = ceph_inode(dir); 472 struct ceph_dentry_info *di = ceph_dentry(dentry); 473 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE; 474 u64 ino; 475 476 spin_lock(&ci->i_ceph_lock); 477 /* No auth cap means no chance for Dc caps */ 478 if (!ci->i_auth_cap) 479 goto no_async; 480 481 /* Any delegated inos? */ 482 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos)) 483 goto no_async; 484 485 if (!ceph_file_layout_is_valid(&ci->i_cached_layout)) 486 goto no_async; 487 488 if ((__ceph_caps_issued(ci, NULL) & want) != want) 489 goto no_async; 490 491 if (d_in_lookup(dentry)) { 492 if (!__ceph_dir_is_complete(ci)) 493 goto no_async; 494 spin_lock(&dentry->d_lock); 495 di->lease_shared_gen = atomic_read(&ci->i_shared_gen); 496 spin_unlock(&dentry->d_lock); 497 } else if (atomic_read(&ci->i_shared_gen) != 498 READ_ONCE(di->lease_shared_gen)) { 499 goto no_async; 500 } 501 502 ino = ceph_get_deleg_ino(ci->i_auth_cap->session); 503 if (!ino) 504 goto no_async; 505 506 *pino = ino; 507 ceph_take_cap_refs(ci, want, false); 508 memcpy(lo, &ci->i_cached_layout, sizeof(*lo)); 509 rcu_assign_pointer(lo->pool_ns, 510 ceph_try_get_string(ci->i_cached_layout.pool_ns)); 511 got = want; 512 no_async: 513 spin_unlock(&ci->i_ceph_lock); 514 return got; 515 } 516 517 static void restore_deleg_ino(struct inode *dir, u64 ino) 518 { 519 struct ceph_inode_info *ci = ceph_inode(dir); 520 struct ceph_mds_session *s = NULL; 521 522 spin_lock(&ci->i_ceph_lock); 523 if (ci->i_auth_cap) 524 s = ceph_get_mds_session(ci->i_auth_cap->session); 525 spin_unlock(&ci->i_ceph_lock); 526 if (s) { 527 int err = ceph_restore_deleg_ino(s, ino); 528 if (err) 529 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n", 530 ino, err); 531 ceph_put_mds_session(s); 532 } 533 } 534 535 static void wake_async_create_waiters(struct inode *inode, 536 struct ceph_mds_session *session) 537 { 538 struct ceph_inode_info *ci = ceph_inode(inode); 539 540 spin_lock(&ci->i_ceph_lock); 541 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { 542 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE; 543 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT); 544 } 545 ceph_kick_flushing_inode_caps(session, ci); 546 spin_unlock(&ci->i_ceph_lock); 547 } 548 549 static void ceph_async_create_cb(struct ceph_mds_client *mdsc, 550 struct ceph_mds_request *req) 551 { 552 struct dentry *dentry = req->r_dentry; 553 struct inode *dinode = d_inode(dentry); 554 struct inode *tinode = req->r_target_inode; 555 int result = req->r_err ? req->r_err : 556 le32_to_cpu(req->r_reply_info.head->result); 557 558 WARN_ON_ONCE(dinode && tinode && dinode != tinode); 559 560 /* MDS changed -- caller must resubmit */ 561 if (result == -EJUKEBOX) 562 goto out; 563 564 mapping_set_error(req->r_parent->i_mapping, result); 565 566 if (result) { 567 int pathlen = 0; 568 u64 base = 0; 569 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen, 570 &base, 0); 571 572 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n", 573 base, IS_ERR(path) ? "<<bad>>" : path, result); 574 ceph_mdsc_free_path(path, pathlen); 575 576 ceph_dir_clear_complete(req->r_parent); 577 if (!d_unhashed(dentry)) 578 d_drop(dentry); 579 580 if (dinode) { 581 mapping_set_error(dinode->i_mapping, result); 582 ceph_inode_shutdown(dinode); 583 wake_async_create_waiters(dinode, req->r_session); 584 } 585 } 586 587 if (tinode) { 588 u64 ino = ceph_vino(tinode).ino; 589 590 if (req->r_deleg_ino != ino) 591 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n", 592 __func__, req->r_err, req->r_deleg_ino, ino); 593 594 mapping_set_error(tinode->i_mapping, result); 595 wake_async_create_waiters(tinode, req->r_session); 596 } else if (!result) { 597 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__, 598 req->r_deleg_ino); 599 } 600 out: 601 ceph_mdsc_release_dir_caps(req); 602 } 603 604 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, 605 struct file *file, umode_t mode, 606 struct ceph_mds_request *req, 607 struct ceph_acl_sec_ctx *as_ctx, 608 struct ceph_file_layout *lo) 609 { 610 int ret; 611 char xattr_buf[4]; 612 struct ceph_mds_reply_inode in = { }; 613 struct ceph_mds_reply_info_in iinfo = { .in = &in }; 614 struct ceph_inode_info *ci = ceph_inode(dir); 615 struct inode *inode; 616 struct timespec64 now; 617 struct ceph_string *pool_ns; 618 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 619 struct ceph_vino vino = { .ino = req->r_deleg_ino, 620 .snap = CEPH_NOSNAP }; 621 622 ktime_get_real_ts64(&now); 623 624 inode = ceph_get_inode(dentry->d_sb, vino); 625 if (IS_ERR(inode)) 626 return PTR_ERR(inode); 627 628 iinfo.inline_version = CEPH_INLINE_NONE; 629 iinfo.change_attr = 1; 630 ceph_encode_timespec64(&iinfo.btime, &now); 631 632 if (req->r_pagelist) { 633 iinfo.xattr_len = req->r_pagelist->length; 634 iinfo.xattr_data = req->r_pagelist->mapped_tail; 635 } else { 636 /* fake it */ 637 iinfo.xattr_len = ARRAY_SIZE(xattr_buf); 638 iinfo.xattr_data = xattr_buf; 639 memset(iinfo.xattr_data, 0, iinfo.xattr_len); 640 } 641 642 in.ino = cpu_to_le64(vino.ino); 643 in.snapid = cpu_to_le64(CEPH_NOSNAP); 644 in.version = cpu_to_le64(1); // ??? 645 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE); 646 in.cap.cap_id = cpu_to_le64(1); 647 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino); 648 in.cap.flags = CEPH_CAP_FLAG_AUTH; 649 in.ctime = in.mtime = in.atime = iinfo.btime; 650 in.truncate_seq = cpu_to_le32(1); 651 in.truncate_size = cpu_to_le64(-1ULL); 652 in.xattr_version = cpu_to_le64(1); 653 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid())); 654 if (dir->i_mode & S_ISGID) { 655 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid)); 656 657 /* Directories always inherit the setgid bit. */ 658 if (S_ISDIR(mode)) 659 mode |= S_ISGID; 660 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 661 !in_group_p(dir->i_gid) && 662 !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID)) 663 mode &= ~S_ISGID; 664 } else { 665 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid())); 666 } 667 in.mode = cpu_to_le32((u32)mode); 668 669 in.nlink = cpu_to_le32(1); 670 in.max_size = cpu_to_le64(lo->stripe_unit); 671 672 ceph_file_layout_to_legacy(lo, &in.layout); 673 /* lo is private, so pool_ns can't change */ 674 pool_ns = rcu_dereference_raw(lo->pool_ns); 675 if (pool_ns) { 676 iinfo.pool_ns_len = pool_ns->len; 677 iinfo.pool_ns_data = pool_ns->str; 678 } 679 680 down_read(&mdsc->snap_rwsem); 681 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, 682 req->r_fmode, NULL); 683 up_read(&mdsc->snap_rwsem); 684 if (ret) { 685 dout("%s failed to fill inode: %d\n", __func__, ret); 686 ceph_dir_clear_complete(dir); 687 if (!d_unhashed(dentry)) 688 d_drop(dentry); 689 if (inode->i_state & I_NEW) 690 discard_new_inode(inode); 691 } else { 692 struct dentry *dn; 693 694 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__, 695 vino.ino, ceph_ino(dir), dentry->d_name.name); 696 ceph_dir_clear_ordered(dir); 697 ceph_init_inode_acls(inode, as_ctx); 698 if (inode->i_state & I_NEW) { 699 /* 700 * If it's not I_NEW, then someone created this before 701 * we got here. Assume the server is aware of it at 702 * that point and don't worry about setting 703 * CEPH_I_ASYNC_CREATE. 704 */ 705 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE; 706 unlock_new_inode(inode); 707 } 708 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) { 709 if (!d_unhashed(dentry)) 710 d_drop(dentry); 711 dn = d_splice_alias(inode, dentry); 712 WARN_ON_ONCE(dn && dn != dentry); 713 } 714 file->f_mode |= FMODE_CREATED; 715 ret = finish_open(file, dentry, ceph_open); 716 } 717 return ret; 718 } 719 720 /* 721 * Do a lookup + open with a single request. If we get a non-existent 722 * file or symlink, return 1 so the VFS can retry. 723 */ 724 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 725 struct file *file, unsigned flags, umode_t mode) 726 { 727 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 728 struct ceph_mds_client *mdsc = fsc->mdsc; 729 struct ceph_mds_request *req; 730 struct dentry *dn; 731 struct ceph_acl_sec_ctx as_ctx = {}; 732 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); 733 int mask; 734 int err; 735 736 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 737 dir, dentry, dentry, 738 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 739 740 if (dentry->d_name.len > NAME_MAX) 741 return -ENAMETOOLONG; 742 743 if (flags & O_CREAT) { 744 if (ceph_quota_is_max_files_exceeded(dir)) 745 return -EDQUOT; 746 err = ceph_pre_init_acls(dir, &mode, &as_ctx); 747 if (err < 0) 748 return err; 749 err = ceph_security_init_secctx(dentry, mode, &as_ctx); 750 if (err < 0) 751 goto out_ctx; 752 /* Async create can't handle more than a page of xattrs */ 753 if (as_ctx.pagelist && 754 !list_is_singular(&as_ctx.pagelist->head)) 755 try_async = false; 756 } else if (!d_in_lookup(dentry)) { 757 /* If it's not being looked up, it's negative */ 758 return -ENOENT; 759 } 760 retry: 761 /* do the open */ 762 req = prepare_open_request(dir->i_sb, flags, mode); 763 if (IS_ERR(req)) { 764 err = PTR_ERR(req); 765 goto out_ctx; 766 } 767 req->r_dentry = dget(dentry); 768 req->r_num_caps = 2; 769 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 770 if (ceph_security_xattr_wanted(dir)) 771 mask |= CEPH_CAP_XATTR_SHARED; 772 req->r_args.open.mask = cpu_to_le32(mask); 773 req->r_parent = dir; 774 ihold(dir); 775 776 if (flags & O_CREAT) { 777 struct ceph_file_layout lo; 778 779 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 780 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 781 if (as_ctx.pagelist) { 782 req->r_pagelist = as_ctx.pagelist; 783 as_ctx.pagelist = NULL; 784 } 785 if (try_async && 786 (req->r_dir_caps = 787 try_prep_async_create(dir, dentry, &lo, 788 &req->r_deleg_ino))) { 789 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); 790 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL); 791 req->r_callback = ceph_async_create_cb; 792 err = ceph_mdsc_submit_request(mdsc, dir, req); 793 if (!err) { 794 err = ceph_finish_async_create(dir, dentry, 795 file, mode, req, 796 &as_ctx, &lo); 797 } else if (err == -EJUKEBOX) { 798 restore_deleg_ino(dir, req->r_deleg_ino); 799 ceph_mdsc_put_request(req); 800 try_async = false; 801 ceph_put_string(rcu_dereference_raw(lo.pool_ns)); 802 goto retry; 803 } 804 ceph_put_string(rcu_dereference_raw(lo.pool_ns)); 805 goto out_req; 806 } 807 } 808 809 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 810 err = ceph_mdsc_do_request(mdsc, 811 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 812 req); 813 if (err == -ENOENT) { 814 dentry = ceph_handle_snapdir(req, dentry); 815 if (IS_ERR(dentry)) { 816 err = PTR_ERR(dentry); 817 goto out_req; 818 } 819 err = 0; 820 } 821 822 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 823 err = ceph_handle_notrace_create(dir, dentry); 824 825 if (d_in_lookup(dentry)) { 826 dn = ceph_finish_lookup(req, dentry, err); 827 if (IS_ERR(dn)) 828 err = PTR_ERR(dn); 829 } else { 830 /* we were given a hashed negative dentry */ 831 dn = NULL; 832 } 833 if (err) 834 goto out_req; 835 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 836 /* make vfs retry on splice, ENOENT, or symlink */ 837 dout("atomic_open finish_no_open on dn %p\n", dn); 838 err = finish_no_open(file, dn); 839 } else { 840 dout("atomic_open finish_open on dn %p\n", dn); 841 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 842 struct inode *newino = d_inode(dentry); 843 844 cache_file_layout(dir, newino); 845 ceph_init_inode_acls(newino, &as_ctx); 846 file->f_mode |= FMODE_CREATED; 847 } 848 err = finish_open(file, dentry, ceph_open); 849 } 850 out_req: 851 ceph_mdsc_put_request(req); 852 out_ctx: 853 ceph_release_acl_sec_ctx(&as_ctx); 854 dout("atomic_open result=%d\n", err); 855 return err; 856 } 857 858 int ceph_release(struct inode *inode, struct file *file) 859 { 860 struct ceph_inode_info *ci = ceph_inode(inode); 861 862 if (S_ISDIR(inode->i_mode)) { 863 struct ceph_dir_file_info *dfi = file->private_data; 864 dout("release inode %p dir file %p\n", inode, file); 865 WARN_ON(!list_empty(&dfi->file_info.rw_contexts)); 866 867 ceph_put_fmode(ci, dfi->file_info.fmode, 1); 868 869 if (dfi->last_readdir) 870 ceph_mdsc_put_request(dfi->last_readdir); 871 kfree(dfi->last_name); 872 kfree(dfi->dir_info); 873 kmem_cache_free(ceph_dir_file_cachep, dfi); 874 } else { 875 struct ceph_file_info *fi = file->private_data; 876 dout("release inode %p regular file %p\n", inode, file); 877 WARN_ON(!list_empty(&fi->rw_contexts)); 878 879 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE); 880 ceph_put_fmode(ci, fi->fmode, 1); 881 882 kmem_cache_free(ceph_file_cachep, fi); 883 } 884 885 /* wake up anyone waiting for caps on this inode */ 886 wake_up_all(&ci->i_cap_wq); 887 return 0; 888 } 889 890 enum { 891 HAVE_RETRIED = 1, 892 CHECK_EOF = 2, 893 READ_INLINE = 3, 894 }; 895 896 /* 897 * Completely synchronous read and write methods. Direct from __user 898 * buffer to osd, or directly to user pages (if O_DIRECT). 899 * 900 * If the read spans object boundary, just do multiple reads. (That's not 901 * atomic, but good enough for now.) 902 * 903 * If we get a short result from the OSD, check against i_size; we need to 904 * only return a short read to the caller if we hit EOF. 905 */ 906 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, 907 int *retry_op) 908 { 909 struct file *file = iocb->ki_filp; 910 struct inode *inode = file_inode(file); 911 struct ceph_inode_info *ci = ceph_inode(inode); 912 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 913 struct ceph_osd_client *osdc = &fsc->client->osdc; 914 ssize_t ret; 915 u64 off = iocb->ki_pos; 916 u64 len = iov_iter_count(to); 917 u64 i_size = i_size_read(inode); 918 919 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len, 920 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 921 922 if (!len) 923 return 0; 924 /* 925 * flush any page cache pages in this range. this 926 * will make concurrent normal and sync io slow, 927 * but it will at least behave sensibly when they are 928 * in sequence. 929 */ 930 ret = filemap_write_and_wait_range(inode->i_mapping, 931 off, off + len - 1); 932 if (ret < 0) 933 return ret; 934 935 ret = 0; 936 while ((len = iov_iter_count(to)) > 0) { 937 struct ceph_osd_request *req; 938 struct page **pages; 939 int num_pages; 940 size_t page_off; 941 bool more; 942 int idx; 943 size_t left; 944 945 req = ceph_osdc_new_request(osdc, &ci->i_layout, 946 ci->i_vino, off, &len, 0, 1, 947 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 948 NULL, ci->i_truncate_seq, 949 ci->i_truncate_size, false); 950 if (IS_ERR(req)) { 951 ret = PTR_ERR(req); 952 break; 953 } 954 955 more = len < iov_iter_count(to); 956 957 num_pages = calc_pages_for(off, len); 958 page_off = off & ~PAGE_MASK; 959 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 960 if (IS_ERR(pages)) { 961 ceph_osdc_put_request(req); 962 ret = PTR_ERR(pages); 963 break; 964 } 965 966 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off, 967 false, false); 968 ret = ceph_osdc_start_request(osdc, req, false); 969 if (!ret) 970 ret = ceph_osdc_wait_request(osdc, req); 971 972 ceph_update_read_metrics(&fsc->mdsc->metric, 973 req->r_start_latency, 974 req->r_end_latency, 975 len, ret); 976 977 ceph_osdc_put_request(req); 978 979 i_size = i_size_read(inode); 980 dout("sync_read %llu~%llu got %zd i_size %llu%s\n", 981 off, len, ret, i_size, (more ? " MORE" : "")); 982 983 if (ret == -ENOENT) 984 ret = 0; 985 if (ret >= 0 && ret < len && (off + ret < i_size)) { 986 int zlen = min(len - ret, i_size - off - ret); 987 int zoff = page_off + ret; 988 dout("sync_read zero gap %llu~%llu\n", 989 off + ret, off + ret + zlen); 990 ceph_zero_page_vector_range(zoff, zlen, pages); 991 ret += zlen; 992 } 993 994 idx = 0; 995 left = ret > 0 ? ret : 0; 996 while (left > 0) { 997 size_t len, copied; 998 page_off = off & ~PAGE_MASK; 999 len = min_t(size_t, left, PAGE_SIZE - page_off); 1000 SetPageUptodate(pages[idx]); 1001 copied = copy_page_to_iter(pages[idx++], 1002 page_off, len, to); 1003 off += copied; 1004 left -= copied; 1005 if (copied < len) { 1006 ret = -EFAULT; 1007 break; 1008 } 1009 } 1010 ceph_release_page_vector(pages, num_pages); 1011 1012 if (ret < 0) { 1013 if (ret == -EBLOCKLISTED) 1014 fsc->blocklisted = true; 1015 break; 1016 } 1017 1018 if (off >= i_size || !more) 1019 break; 1020 } 1021 1022 if (off > iocb->ki_pos) { 1023 if (off >= i_size) { 1024 *retry_op = CHECK_EOF; 1025 ret = i_size - iocb->ki_pos; 1026 iocb->ki_pos = i_size; 1027 } else { 1028 ret = off - iocb->ki_pos; 1029 iocb->ki_pos = off; 1030 } 1031 } 1032 1033 dout("sync_read result %zd retry_op %d\n", ret, *retry_op); 1034 return ret; 1035 } 1036 1037 struct ceph_aio_request { 1038 struct kiocb *iocb; 1039 size_t total_len; 1040 bool write; 1041 bool should_dirty; 1042 int error; 1043 struct list_head osd_reqs; 1044 unsigned num_reqs; 1045 atomic_t pending_reqs; 1046 struct timespec64 mtime; 1047 struct ceph_cap_flush *prealloc_cf; 1048 }; 1049 1050 struct ceph_aio_work { 1051 struct work_struct work; 1052 struct ceph_osd_request *req; 1053 }; 1054 1055 static void ceph_aio_retry_work(struct work_struct *work); 1056 1057 static void ceph_aio_complete(struct inode *inode, 1058 struct ceph_aio_request *aio_req) 1059 { 1060 struct ceph_inode_info *ci = ceph_inode(inode); 1061 int ret; 1062 1063 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 1064 return; 1065 1066 if (aio_req->iocb->ki_flags & IOCB_DIRECT) 1067 inode_dio_end(inode); 1068 1069 ret = aio_req->error; 1070 if (!ret) 1071 ret = aio_req->total_len; 1072 1073 dout("ceph_aio_complete %p rc %d\n", inode, ret); 1074 1075 if (ret >= 0 && aio_req->write) { 1076 int dirty; 1077 1078 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 1079 if (endoff > i_size_read(inode)) { 1080 if (ceph_inode_set_size(inode, endoff)) 1081 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1082 } 1083 1084 spin_lock(&ci->i_ceph_lock); 1085 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1086 &aio_req->prealloc_cf); 1087 spin_unlock(&ci->i_ceph_lock); 1088 if (dirty) 1089 __mark_inode_dirty(inode, dirty); 1090 1091 } 1092 1093 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 1094 CEPH_CAP_FILE_RD)); 1095 1096 aio_req->iocb->ki_complete(aio_req->iocb, ret); 1097 1098 ceph_free_cap_flush(aio_req->prealloc_cf); 1099 kfree(aio_req); 1100 } 1101 1102 static void ceph_aio_complete_req(struct ceph_osd_request *req) 1103 { 1104 int rc = req->r_result; 1105 struct inode *inode = req->r_inode; 1106 struct ceph_aio_request *aio_req = req->r_priv; 1107 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 1108 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric; 1109 unsigned int len = osd_data->bvec_pos.iter.bi_size; 1110 1111 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS); 1112 BUG_ON(!osd_data->num_bvecs); 1113 1114 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len); 1115 1116 if (rc == -EOLDSNAPC) { 1117 struct ceph_aio_work *aio_work; 1118 BUG_ON(!aio_req->write); 1119 1120 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 1121 if (aio_work) { 1122 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 1123 aio_work->req = req; 1124 queue_work(ceph_inode_to_client(inode)->inode_wq, 1125 &aio_work->work); 1126 return; 1127 } 1128 rc = -ENOMEM; 1129 } else if (!aio_req->write) { 1130 if (rc == -ENOENT) 1131 rc = 0; 1132 if (rc >= 0 && len > rc) { 1133 struct iov_iter i; 1134 int zlen = len - rc; 1135 1136 /* 1137 * If read is satisfied by single OSD request, 1138 * it can pass EOF. Otherwise read is within 1139 * i_size. 1140 */ 1141 if (aio_req->num_reqs == 1) { 1142 loff_t i_size = i_size_read(inode); 1143 loff_t endoff = aio_req->iocb->ki_pos + rc; 1144 if (endoff < i_size) 1145 zlen = min_t(size_t, zlen, 1146 i_size - endoff); 1147 aio_req->total_len = rc + zlen; 1148 } 1149 1150 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs, 1151 osd_data->num_bvecs, len); 1152 iov_iter_advance(&i, rc); 1153 iov_iter_zero(zlen, &i); 1154 } 1155 } 1156 1157 /* r_start_latency == 0 means the request was not submitted */ 1158 if (req->r_start_latency) { 1159 if (aio_req->write) 1160 ceph_update_write_metrics(metric, req->r_start_latency, 1161 req->r_end_latency, len, rc); 1162 else 1163 ceph_update_read_metrics(metric, req->r_start_latency, 1164 req->r_end_latency, len, rc); 1165 } 1166 1167 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, 1168 aio_req->should_dirty); 1169 ceph_osdc_put_request(req); 1170 1171 if (rc < 0) 1172 cmpxchg(&aio_req->error, 0, rc); 1173 1174 ceph_aio_complete(inode, aio_req); 1175 return; 1176 } 1177 1178 static void ceph_aio_retry_work(struct work_struct *work) 1179 { 1180 struct ceph_aio_work *aio_work = 1181 container_of(work, struct ceph_aio_work, work); 1182 struct ceph_osd_request *orig_req = aio_work->req; 1183 struct ceph_aio_request *aio_req = orig_req->r_priv; 1184 struct inode *inode = orig_req->r_inode; 1185 struct ceph_inode_info *ci = ceph_inode(inode); 1186 struct ceph_snap_context *snapc; 1187 struct ceph_osd_request *req; 1188 int ret; 1189 1190 spin_lock(&ci->i_ceph_lock); 1191 if (__ceph_have_pending_cap_snap(ci)) { 1192 struct ceph_cap_snap *capsnap = 1193 list_last_entry(&ci->i_cap_snaps, 1194 struct ceph_cap_snap, 1195 ci_item); 1196 snapc = ceph_get_snap_context(capsnap->context); 1197 } else { 1198 BUG_ON(!ci->i_head_snapc); 1199 snapc = ceph_get_snap_context(ci->i_head_snapc); 1200 } 1201 spin_unlock(&ci->i_ceph_lock); 1202 1203 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1, 1204 false, GFP_NOFS); 1205 if (!req) { 1206 ret = -ENOMEM; 1207 req = orig_req; 1208 goto out; 1209 } 1210 1211 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1212 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); 1213 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); 1214 1215 req->r_ops[0] = orig_req->r_ops[0]; 1216 1217 req->r_mtime = aio_req->mtime; 1218 req->r_data_offset = req->r_ops[0].extent.offset; 1219 1220 ret = ceph_osdc_alloc_messages(req, GFP_NOFS); 1221 if (ret) { 1222 ceph_osdc_put_request(req); 1223 req = orig_req; 1224 goto out; 1225 } 1226 1227 ceph_osdc_put_request(orig_req); 1228 1229 req->r_callback = ceph_aio_complete_req; 1230 req->r_inode = inode; 1231 req->r_priv = aio_req; 1232 1233 ret = ceph_osdc_start_request(req->r_osdc, req, false); 1234 out: 1235 if (ret < 0) { 1236 req->r_result = ret; 1237 ceph_aio_complete_req(req); 1238 } 1239 1240 ceph_put_snap_context(snapc); 1241 kfree(aio_work); 1242 } 1243 1244 static ssize_t 1245 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 1246 struct ceph_snap_context *snapc, 1247 struct ceph_cap_flush **pcf) 1248 { 1249 struct file *file = iocb->ki_filp; 1250 struct inode *inode = file_inode(file); 1251 struct ceph_inode_info *ci = ceph_inode(inode); 1252 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1253 struct ceph_client_metric *metric = &fsc->mdsc->metric; 1254 struct ceph_vino vino; 1255 struct ceph_osd_request *req; 1256 struct bio_vec *bvecs; 1257 struct ceph_aio_request *aio_req = NULL; 1258 int num_pages = 0; 1259 int flags; 1260 int ret = 0; 1261 struct timespec64 mtime = current_time(inode); 1262 size_t count = iov_iter_count(iter); 1263 loff_t pos = iocb->ki_pos; 1264 bool write = iov_iter_rw(iter) == WRITE; 1265 bool should_dirty = !write && iter_is_iovec(iter); 1266 1267 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1268 return -EROFS; 1269 1270 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n", 1271 (write ? "write" : "read"), file, pos, (unsigned)count, 1272 snapc, snapc ? snapc->seq : 0); 1273 1274 if (write) { 1275 int ret2; 1276 1277 ceph_fscache_invalidate(inode, true); 1278 1279 ret2 = invalidate_inode_pages2_range(inode->i_mapping, 1280 pos >> PAGE_SHIFT, 1281 (pos + count - 1) >> PAGE_SHIFT); 1282 if (ret2 < 0) 1283 dout("invalidate_inode_pages2_range returned %d\n", ret2); 1284 1285 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1286 } else { 1287 flags = CEPH_OSD_FLAG_READ; 1288 } 1289 1290 while (iov_iter_count(iter) > 0) { 1291 u64 size = iov_iter_count(iter); 1292 ssize_t len; 1293 1294 if (write) 1295 size = min_t(u64, size, fsc->mount_options->wsize); 1296 else 1297 size = min_t(u64, size, fsc->mount_options->rsize); 1298 1299 vino = ceph_vino(inode); 1300 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1301 vino, pos, &size, 0, 1302 1, 1303 write ? CEPH_OSD_OP_WRITE : 1304 CEPH_OSD_OP_READ, 1305 flags, snapc, 1306 ci->i_truncate_seq, 1307 ci->i_truncate_size, 1308 false); 1309 if (IS_ERR(req)) { 1310 ret = PTR_ERR(req); 1311 break; 1312 } 1313 1314 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages); 1315 if (len < 0) { 1316 ceph_osdc_put_request(req); 1317 ret = len; 1318 break; 1319 } 1320 if (len != size) 1321 osd_req_op_extent_update(req, 0, len); 1322 1323 /* 1324 * To simplify error handling, allow AIO when IO within i_size 1325 * or IO can be satisfied by single OSD request. 1326 */ 1327 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 1328 (len == count || pos + count <= i_size_read(inode))) { 1329 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 1330 if (aio_req) { 1331 aio_req->iocb = iocb; 1332 aio_req->write = write; 1333 aio_req->should_dirty = should_dirty; 1334 INIT_LIST_HEAD(&aio_req->osd_reqs); 1335 if (write) { 1336 aio_req->mtime = mtime; 1337 swap(aio_req->prealloc_cf, *pcf); 1338 } 1339 } 1340 /* ignore error */ 1341 } 1342 1343 if (write) { 1344 /* 1345 * throw out any page cache pages in this range. this 1346 * may block. 1347 */ 1348 truncate_inode_pages_range(inode->i_mapping, pos, 1349 PAGE_ALIGN(pos + len) - 1); 1350 1351 req->r_mtime = mtime; 1352 } 1353 1354 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len); 1355 1356 if (aio_req) { 1357 aio_req->total_len += len; 1358 aio_req->num_reqs++; 1359 atomic_inc(&aio_req->pending_reqs); 1360 1361 req->r_callback = ceph_aio_complete_req; 1362 req->r_inode = inode; 1363 req->r_priv = aio_req; 1364 list_add_tail(&req->r_private_item, &aio_req->osd_reqs); 1365 1366 pos += len; 1367 continue; 1368 } 1369 1370 ret = ceph_osdc_start_request(req->r_osdc, req, false); 1371 if (!ret) 1372 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1373 1374 if (write) 1375 ceph_update_write_metrics(metric, req->r_start_latency, 1376 req->r_end_latency, len, ret); 1377 else 1378 ceph_update_read_metrics(metric, req->r_start_latency, 1379 req->r_end_latency, len, ret); 1380 1381 size = i_size_read(inode); 1382 if (!write) { 1383 if (ret == -ENOENT) 1384 ret = 0; 1385 if (ret >= 0 && ret < len && pos + ret < size) { 1386 struct iov_iter i; 1387 int zlen = min_t(size_t, len - ret, 1388 size - pos - ret); 1389 1390 iov_iter_bvec(&i, READ, bvecs, num_pages, len); 1391 iov_iter_advance(&i, ret); 1392 iov_iter_zero(zlen, &i); 1393 ret += zlen; 1394 } 1395 if (ret >= 0) 1396 len = ret; 1397 } 1398 1399 put_bvecs(bvecs, num_pages, should_dirty); 1400 ceph_osdc_put_request(req); 1401 if (ret < 0) 1402 break; 1403 1404 pos += len; 1405 if (!write && pos >= size) 1406 break; 1407 1408 if (write && pos > size) { 1409 if (ceph_inode_set_size(inode, pos)) 1410 ceph_check_caps(ceph_inode(inode), 1411 CHECK_CAPS_AUTHONLY, 1412 NULL); 1413 } 1414 } 1415 1416 if (aio_req) { 1417 LIST_HEAD(osd_reqs); 1418 1419 if (aio_req->num_reqs == 0) { 1420 kfree(aio_req); 1421 return ret; 1422 } 1423 1424 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 1425 CEPH_CAP_FILE_RD); 1426 1427 list_splice(&aio_req->osd_reqs, &osd_reqs); 1428 inode_dio_begin(inode); 1429 while (!list_empty(&osd_reqs)) { 1430 req = list_first_entry(&osd_reqs, 1431 struct ceph_osd_request, 1432 r_private_item); 1433 list_del_init(&req->r_private_item); 1434 if (ret >= 0) 1435 ret = ceph_osdc_start_request(req->r_osdc, 1436 req, false); 1437 if (ret < 0) { 1438 req->r_result = ret; 1439 ceph_aio_complete_req(req); 1440 } 1441 } 1442 return -EIOCBQUEUED; 1443 } 1444 1445 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 1446 ret = pos - iocb->ki_pos; 1447 iocb->ki_pos = pos; 1448 } 1449 return ret; 1450 } 1451 1452 /* 1453 * Synchronous write, straight from __user pointer or user pages. 1454 * 1455 * If write spans object boundary, just do multiple writes. (For a 1456 * correct atomic write, we should e.g. take write locks on all 1457 * objects, rollback on failure, etc.) 1458 */ 1459 static ssize_t 1460 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 1461 struct ceph_snap_context *snapc) 1462 { 1463 struct file *file = iocb->ki_filp; 1464 struct inode *inode = file_inode(file); 1465 struct ceph_inode_info *ci = ceph_inode(inode); 1466 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1467 struct ceph_vino vino; 1468 struct ceph_osd_request *req; 1469 struct page **pages; 1470 u64 len; 1471 int num_pages; 1472 int written = 0; 1473 int flags; 1474 int ret; 1475 bool check_caps = false; 1476 struct timespec64 mtime = current_time(inode); 1477 size_t count = iov_iter_count(from); 1478 1479 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1480 return -EROFS; 1481 1482 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n", 1483 file, pos, (unsigned)count, snapc, snapc->seq); 1484 1485 ret = filemap_write_and_wait_range(inode->i_mapping, 1486 pos, pos + count - 1); 1487 if (ret < 0) 1488 return ret; 1489 1490 ceph_fscache_invalidate(inode, false); 1491 ret = invalidate_inode_pages2_range(inode->i_mapping, 1492 pos >> PAGE_SHIFT, 1493 (pos + count - 1) >> PAGE_SHIFT); 1494 if (ret < 0) 1495 dout("invalidate_inode_pages2_range returned %d\n", ret); 1496 1497 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1498 1499 while ((len = iov_iter_count(from)) > 0) { 1500 size_t left; 1501 int n; 1502 1503 vino = ceph_vino(inode); 1504 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1505 vino, pos, &len, 0, 1, 1506 CEPH_OSD_OP_WRITE, flags, snapc, 1507 ci->i_truncate_seq, 1508 ci->i_truncate_size, 1509 false); 1510 if (IS_ERR(req)) { 1511 ret = PTR_ERR(req); 1512 break; 1513 } 1514 1515 /* 1516 * write from beginning of first page, 1517 * regardless of io alignment 1518 */ 1519 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1520 1521 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1522 if (IS_ERR(pages)) { 1523 ret = PTR_ERR(pages); 1524 goto out; 1525 } 1526 1527 left = len; 1528 for (n = 0; n < num_pages; n++) { 1529 size_t plen = min_t(size_t, left, PAGE_SIZE); 1530 ret = copy_page_from_iter(pages[n], 0, plen, from); 1531 if (ret != plen) { 1532 ret = -EFAULT; 1533 break; 1534 } 1535 left -= ret; 1536 } 1537 1538 if (ret < 0) { 1539 ceph_release_page_vector(pages, num_pages); 1540 goto out; 1541 } 1542 1543 req->r_inode = inode; 1544 1545 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1546 false, true); 1547 1548 req->r_mtime = mtime; 1549 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1550 if (!ret) 1551 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1552 1553 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1554 req->r_end_latency, len, ret); 1555 out: 1556 ceph_osdc_put_request(req); 1557 if (ret != 0) { 1558 ceph_set_error_write(ci); 1559 break; 1560 } 1561 1562 ceph_clear_error_write(ci); 1563 pos += len; 1564 written += len; 1565 if (pos > i_size_read(inode)) { 1566 check_caps = ceph_inode_set_size(inode, pos); 1567 if (check_caps) 1568 ceph_check_caps(ceph_inode(inode), 1569 CHECK_CAPS_AUTHONLY, 1570 NULL); 1571 } 1572 1573 } 1574 1575 if (ret != -EOLDSNAPC && written > 0) { 1576 ret = written; 1577 iocb->ki_pos = pos; 1578 } 1579 return ret; 1580 } 1581 1582 /* 1583 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1584 * Atomically grab references, so that those bits are not released 1585 * back to the MDS mid-read. 1586 * 1587 * Hmm, the sync read case isn't actually async... should it be? 1588 */ 1589 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1590 { 1591 struct file *filp = iocb->ki_filp; 1592 struct ceph_file_info *fi = filp->private_data; 1593 size_t len = iov_iter_count(to); 1594 struct inode *inode = file_inode(filp); 1595 struct ceph_inode_info *ci = ceph_inode(inode); 1596 bool direct_lock = iocb->ki_flags & IOCB_DIRECT; 1597 ssize_t ret; 1598 int want = 0, got = 0; 1599 int retry_op = 0, read = 0; 1600 1601 again: 1602 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1603 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1604 1605 if (ceph_inode_is_shutdown(inode)) 1606 return -ESTALE; 1607 1608 if (direct_lock) 1609 ceph_start_io_direct(inode); 1610 else 1611 ceph_start_io_read(inode); 1612 1613 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock) 1614 want |= CEPH_CAP_FILE_CACHE; 1615 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1616 want |= CEPH_CAP_FILE_LAZYIO; 1617 1618 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got); 1619 if (ret < 0) { 1620 if (direct_lock) 1621 ceph_end_io_direct(inode); 1622 else 1623 ceph_end_io_read(inode); 1624 return ret; 1625 } 1626 1627 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1628 (iocb->ki_flags & IOCB_DIRECT) || 1629 (fi->flags & CEPH_F_SYNC)) { 1630 1631 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1632 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1633 ceph_cap_string(got)); 1634 1635 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1636 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1637 ret = ceph_direct_read_write(iocb, to, 1638 NULL, NULL); 1639 if (ret >= 0 && ret < len) 1640 retry_op = CHECK_EOF; 1641 } else { 1642 ret = ceph_sync_read(iocb, to, &retry_op); 1643 } 1644 } else { 1645 retry_op = READ_INLINE; 1646 } 1647 } else { 1648 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1649 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1650 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1651 ceph_cap_string(got)); 1652 ceph_add_rw_context(fi, &rw_ctx); 1653 ret = generic_file_read_iter(iocb, to); 1654 ceph_del_rw_context(fi, &rw_ctx); 1655 } 1656 1657 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1658 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1659 ceph_put_cap_refs(ci, got); 1660 1661 if (direct_lock) 1662 ceph_end_io_direct(inode); 1663 else 1664 ceph_end_io_read(inode); 1665 1666 if (retry_op > HAVE_RETRIED && ret >= 0) { 1667 int statret; 1668 struct page *page = NULL; 1669 loff_t i_size; 1670 if (retry_op == READ_INLINE) { 1671 page = __page_cache_alloc(GFP_KERNEL); 1672 if (!page) 1673 return -ENOMEM; 1674 } 1675 1676 statret = __ceph_do_getattr(inode, page, 1677 CEPH_STAT_CAP_INLINE_DATA, !!page); 1678 if (statret < 0) { 1679 if (page) 1680 __free_page(page); 1681 if (statret == -ENODATA) { 1682 BUG_ON(retry_op != READ_INLINE); 1683 goto again; 1684 } 1685 return statret; 1686 } 1687 1688 i_size = i_size_read(inode); 1689 if (retry_op == READ_INLINE) { 1690 BUG_ON(ret > 0 || read > 0); 1691 if (iocb->ki_pos < i_size && 1692 iocb->ki_pos < PAGE_SIZE) { 1693 loff_t end = min_t(loff_t, i_size, 1694 iocb->ki_pos + len); 1695 end = min_t(loff_t, end, PAGE_SIZE); 1696 if (statret < end) 1697 zero_user_segment(page, statret, end); 1698 ret = copy_page_to_iter(page, 1699 iocb->ki_pos & ~PAGE_MASK, 1700 end - iocb->ki_pos, to); 1701 iocb->ki_pos += ret; 1702 read += ret; 1703 } 1704 if (iocb->ki_pos < i_size && read < len) { 1705 size_t zlen = min_t(size_t, len - read, 1706 i_size - iocb->ki_pos); 1707 ret = iov_iter_zero(zlen, to); 1708 iocb->ki_pos += ret; 1709 read += ret; 1710 } 1711 __free_pages(page, 0); 1712 return read; 1713 } 1714 1715 /* hit EOF or hole? */ 1716 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1717 ret < len) { 1718 dout("sync_read hit hole, ppos %lld < size %lld" 1719 ", reading more\n", iocb->ki_pos, i_size); 1720 1721 read += ret; 1722 len -= ret; 1723 retry_op = HAVE_RETRIED; 1724 goto again; 1725 } 1726 } 1727 1728 if (ret >= 0) 1729 ret += read; 1730 1731 return ret; 1732 } 1733 1734 /* 1735 * Take cap references to avoid releasing caps to MDS mid-write. 1736 * 1737 * If we are synchronous, and write with an old snap context, the OSD 1738 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1739 * dropping our cap refs and allowing the pending snap to logically 1740 * complete _before_ this write occurs. 1741 * 1742 * If we are near ENOSPC, write synchronously. 1743 */ 1744 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1745 { 1746 struct file *file = iocb->ki_filp; 1747 struct ceph_file_info *fi = file->private_data; 1748 struct inode *inode = file_inode(file); 1749 struct ceph_inode_info *ci = ceph_inode(inode); 1750 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1751 struct ceph_osd_client *osdc = &fsc->client->osdc; 1752 struct ceph_cap_flush *prealloc_cf; 1753 ssize_t count, written = 0; 1754 int err, want = 0, got; 1755 bool direct_lock = false; 1756 u32 map_flags; 1757 u64 pool_flags; 1758 loff_t pos; 1759 loff_t limit = max(i_size_read(inode), fsc->max_file_size); 1760 1761 if (ceph_inode_is_shutdown(inode)) 1762 return -ESTALE; 1763 1764 if (ceph_snap(inode) != CEPH_NOSNAP) 1765 return -EROFS; 1766 1767 prealloc_cf = ceph_alloc_cap_flush(); 1768 if (!prealloc_cf) 1769 return -ENOMEM; 1770 1771 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT) 1772 direct_lock = true; 1773 1774 retry_snap: 1775 if (direct_lock) 1776 ceph_start_io_direct(inode); 1777 else 1778 ceph_start_io_write(inode); 1779 1780 /* We can write back this queue in page reclaim */ 1781 current->backing_dev_info = inode_to_bdi(inode); 1782 1783 if (iocb->ki_flags & IOCB_APPEND) { 1784 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1785 if (err < 0) 1786 goto out; 1787 } 1788 1789 err = generic_write_checks(iocb, from); 1790 if (err <= 0) 1791 goto out; 1792 1793 pos = iocb->ki_pos; 1794 if (unlikely(pos >= limit)) { 1795 err = -EFBIG; 1796 goto out; 1797 } else { 1798 iov_iter_truncate(from, limit - pos); 1799 } 1800 1801 count = iov_iter_count(from); 1802 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) { 1803 err = -EDQUOT; 1804 goto out; 1805 } 1806 1807 down_read(&osdc->lock); 1808 map_flags = osdc->osdmap->flags; 1809 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id); 1810 up_read(&osdc->lock); 1811 if ((map_flags & CEPH_OSDMAP_FULL) || 1812 (pool_flags & CEPH_POOL_FLAG_FULL)) { 1813 err = -ENOSPC; 1814 goto out; 1815 } 1816 1817 err = file_remove_privs(file); 1818 if (err) 1819 goto out; 1820 1821 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1822 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1823 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock) 1824 want |= CEPH_CAP_FILE_BUFFER; 1825 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1826 want |= CEPH_CAP_FILE_LAZYIO; 1827 got = 0; 1828 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got); 1829 if (err < 0) 1830 goto out; 1831 1832 err = file_update_time(file); 1833 if (err) 1834 goto out_caps; 1835 1836 inode_inc_iversion_raw(inode); 1837 1838 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1839 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1840 1841 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1842 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) || 1843 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) { 1844 struct ceph_snap_context *snapc; 1845 struct iov_iter data; 1846 1847 spin_lock(&ci->i_ceph_lock); 1848 if (__ceph_have_pending_cap_snap(ci)) { 1849 struct ceph_cap_snap *capsnap = 1850 list_last_entry(&ci->i_cap_snaps, 1851 struct ceph_cap_snap, 1852 ci_item); 1853 snapc = ceph_get_snap_context(capsnap->context); 1854 } else { 1855 BUG_ON(!ci->i_head_snapc); 1856 snapc = ceph_get_snap_context(ci->i_head_snapc); 1857 } 1858 spin_unlock(&ci->i_ceph_lock); 1859 1860 /* we might need to revert back to that point */ 1861 data = *from; 1862 if (iocb->ki_flags & IOCB_DIRECT) 1863 written = ceph_direct_read_write(iocb, &data, snapc, 1864 &prealloc_cf); 1865 else 1866 written = ceph_sync_write(iocb, &data, pos, snapc); 1867 if (direct_lock) 1868 ceph_end_io_direct(inode); 1869 else 1870 ceph_end_io_write(inode); 1871 if (written > 0) 1872 iov_iter_advance(from, written); 1873 ceph_put_snap_context(snapc); 1874 } else { 1875 /* 1876 * No need to acquire the i_truncate_mutex. Because 1877 * the MDS revokes Fwb caps before sending truncate 1878 * message to us. We can't get Fwb cap while there 1879 * are pending vmtruncate. So write and vmtruncate 1880 * can not run at the same time 1881 */ 1882 written = generic_perform_write(iocb, from); 1883 if (likely(written >= 0)) 1884 iocb->ki_pos = pos + written; 1885 ceph_end_io_write(inode); 1886 } 1887 1888 if (written >= 0) { 1889 int dirty; 1890 1891 spin_lock(&ci->i_ceph_lock); 1892 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1893 &prealloc_cf); 1894 spin_unlock(&ci->i_ceph_lock); 1895 if (dirty) 1896 __mark_inode_dirty(inode, dirty); 1897 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos)) 1898 ceph_check_caps(ci, 0, NULL); 1899 } 1900 1901 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1902 inode, ceph_vinop(inode), pos, (unsigned)count, 1903 ceph_cap_string(got)); 1904 ceph_put_cap_refs(ci, got); 1905 1906 if (written == -EOLDSNAPC) { 1907 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", 1908 inode, ceph_vinop(inode), pos, (unsigned)count); 1909 goto retry_snap; 1910 } 1911 1912 if (written >= 0) { 1913 if ((map_flags & CEPH_OSDMAP_NEARFULL) || 1914 (pool_flags & CEPH_POOL_FLAG_NEARFULL)) 1915 iocb->ki_flags |= IOCB_DSYNC; 1916 written = generic_write_sync(iocb, written); 1917 } 1918 1919 goto out_unlocked; 1920 out_caps: 1921 ceph_put_cap_refs(ci, got); 1922 out: 1923 if (direct_lock) 1924 ceph_end_io_direct(inode); 1925 else 1926 ceph_end_io_write(inode); 1927 out_unlocked: 1928 ceph_free_cap_flush(prealloc_cf); 1929 current->backing_dev_info = NULL; 1930 return written ? written : err; 1931 } 1932 1933 /* 1934 * llseek. be sure to verify file size on SEEK_END. 1935 */ 1936 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1937 { 1938 struct inode *inode = file->f_mapping->host; 1939 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1940 loff_t i_size; 1941 loff_t ret; 1942 1943 inode_lock(inode); 1944 1945 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1946 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1947 if (ret < 0) 1948 goto out; 1949 } 1950 1951 i_size = i_size_read(inode); 1952 switch (whence) { 1953 case SEEK_END: 1954 offset += i_size; 1955 break; 1956 case SEEK_CUR: 1957 /* 1958 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1959 * position-querying operation. Avoid rewriting the "same" 1960 * f_pos value back to the file because a concurrent read(), 1961 * write() or lseek() might have altered it 1962 */ 1963 if (offset == 0) { 1964 ret = file->f_pos; 1965 goto out; 1966 } 1967 offset += file->f_pos; 1968 break; 1969 case SEEK_DATA: 1970 if (offset < 0 || offset >= i_size) { 1971 ret = -ENXIO; 1972 goto out; 1973 } 1974 break; 1975 case SEEK_HOLE: 1976 if (offset < 0 || offset >= i_size) { 1977 ret = -ENXIO; 1978 goto out; 1979 } 1980 offset = i_size; 1981 break; 1982 } 1983 1984 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size)); 1985 1986 out: 1987 inode_unlock(inode); 1988 return ret; 1989 } 1990 1991 static inline void ceph_zero_partial_page( 1992 struct inode *inode, loff_t offset, unsigned size) 1993 { 1994 struct page *page; 1995 pgoff_t index = offset >> PAGE_SHIFT; 1996 1997 page = find_lock_page(inode->i_mapping, index); 1998 if (page) { 1999 wait_on_page_writeback(page); 2000 zero_user(page, offset & (PAGE_SIZE - 1), size); 2001 unlock_page(page); 2002 put_page(page); 2003 } 2004 } 2005 2006 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 2007 loff_t length) 2008 { 2009 loff_t nearly = round_up(offset, PAGE_SIZE); 2010 if (offset < nearly) { 2011 loff_t size = nearly - offset; 2012 if (length < size) 2013 size = length; 2014 ceph_zero_partial_page(inode, offset, size); 2015 offset += size; 2016 length -= size; 2017 } 2018 if (length >= PAGE_SIZE) { 2019 loff_t size = round_down(length, PAGE_SIZE); 2020 truncate_pagecache_range(inode, offset, offset + size - 1); 2021 offset += size; 2022 length -= size; 2023 } 2024 if (length) 2025 ceph_zero_partial_page(inode, offset, length); 2026 } 2027 2028 static int ceph_zero_partial_object(struct inode *inode, 2029 loff_t offset, loff_t *length) 2030 { 2031 struct ceph_inode_info *ci = ceph_inode(inode); 2032 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 2033 struct ceph_osd_request *req; 2034 int ret = 0; 2035 loff_t zero = 0; 2036 int op; 2037 2038 if (!length) { 2039 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 2040 length = &zero; 2041 } else { 2042 op = CEPH_OSD_OP_ZERO; 2043 } 2044 2045 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 2046 ceph_vino(inode), 2047 offset, length, 2048 0, 1, op, 2049 CEPH_OSD_FLAG_WRITE, 2050 NULL, 0, 0, false); 2051 if (IS_ERR(req)) { 2052 ret = PTR_ERR(req); 2053 goto out; 2054 } 2055 2056 req->r_mtime = inode->i_mtime; 2057 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 2058 if (!ret) { 2059 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 2060 if (ret == -ENOENT) 2061 ret = 0; 2062 } 2063 ceph_osdc_put_request(req); 2064 2065 out: 2066 return ret; 2067 } 2068 2069 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 2070 { 2071 int ret = 0; 2072 struct ceph_inode_info *ci = ceph_inode(inode); 2073 s32 stripe_unit = ci->i_layout.stripe_unit; 2074 s32 stripe_count = ci->i_layout.stripe_count; 2075 s32 object_size = ci->i_layout.object_size; 2076 u64 object_set_size = object_size * stripe_count; 2077 u64 nearly, t; 2078 2079 /* round offset up to next period boundary */ 2080 nearly = offset + object_set_size - 1; 2081 t = nearly; 2082 nearly -= do_div(t, object_set_size); 2083 2084 while (length && offset < nearly) { 2085 loff_t size = length; 2086 ret = ceph_zero_partial_object(inode, offset, &size); 2087 if (ret < 0) 2088 return ret; 2089 offset += size; 2090 length -= size; 2091 } 2092 while (length >= object_set_size) { 2093 int i; 2094 loff_t pos = offset; 2095 for (i = 0; i < stripe_count; ++i) { 2096 ret = ceph_zero_partial_object(inode, pos, NULL); 2097 if (ret < 0) 2098 return ret; 2099 pos += stripe_unit; 2100 } 2101 offset += object_set_size; 2102 length -= object_set_size; 2103 } 2104 while (length) { 2105 loff_t size = length; 2106 ret = ceph_zero_partial_object(inode, offset, &size); 2107 if (ret < 0) 2108 return ret; 2109 offset += size; 2110 length -= size; 2111 } 2112 return ret; 2113 } 2114 2115 static long ceph_fallocate(struct file *file, int mode, 2116 loff_t offset, loff_t length) 2117 { 2118 struct ceph_file_info *fi = file->private_data; 2119 struct inode *inode = file_inode(file); 2120 struct ceph_inode_info *ci = ceph_inode(inode); 2121 struct ceph_cap_flush *prealloc_cf; 2122 int want, got = 0; 2123 int dirty; 2124 int ret = 0; 2125 loff_t endoff = 0; 2126 loff_t size; 2127 2128 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2129 return -EOPNOTSUPP; 2130 2131 if (!S_ISREG(inode->i_mode)) 2132 return -EOPNOTSUPP; 2133 2134 prealloc_cf = ceph_alloc_cap_flush(); 2135 if (!prealloc_cf) 2136 return -ENOMEM; 2137 2138 inode_lock(inode); 2139 2140 if (ceph_snap(inode) != CEPH_NOSNAP) { 2141 ret = -EROFS; 2142 goto unlock; 2143 } 2144 2145 size = i_size_read(inode); 2146 2147 /* Are we punching a hole beyond EOF? */ 2148 if (offset >= size) 2149 goto unlock; 2150 if ((offset + length) > size) 2151 length = size - offset; 2152 2153 if (fi->fmode & CEPH_FILE_MODE_LAZY) 2154 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 2155 else 2156 want = CEPH_CAP_FILE_BUFFER; 2157 2158 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got); 2159 if (ret < 0) 2160 goto unlock; 2161 2162 filemap_invalidate_lock(inode->i_mapping); 2163 ceph_fscache_invalidate(inode, false); 2164 ceph_zero_pagecache_range(inode, offset, length); 2165 ret = ceph_zero_objects(inode, offset, length); 2166 2167 if (!ret) { 2168 spin_lock(&ci->i_ceph_lock); 2169 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 2170 &prealloc_cf); 2171 spin_unlock(&ci->i_ceph_lock); 2172 if (dirty) 2173 __mark_inode_dirty(inode, dirty); 2174 } 2175 filemap_invalidate_unlock(inode->i_mapping); 2176 2177 ceph_put_cap_refs(ci, got); 2178 unlock: 2179 inode_unlock(inode); 2180 ceph_free_cap_flush(prealloc_cf); 2181 return ret; 2182 } 2183 2184 /* 2185 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for 2186 * src_ci. Two attempts are made to obtain both caps, and an error is return if 2187 * this fails; zero is returned on success. 2188 */ 2189 static int get_rd_wr_caps(struct file *src_filp, int *src_got, 2190 struct file *dst_filp, 2191 loff_t dst_endoff, int *dst_got) 2192 { 2193 int ret = 0; 2194 bool retrying = false; 2195 2196 retry_caps: 2197 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, 2198 dst_endoff, dst_got); 2199 if (ret < 0) 2200 return ret; 2201 2202 /* 2203 * Since we're already holding the FILE_WR capability for the dst file, 2204 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some 2205 * retry dance instead to try to get both capabilities. 2206 */ 2207 ret = ceph_try_get_caps(file_inode(src_filp), 2208 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED, 2209 false, src_got); 2210 if (ret <= 0) { 2211 /* Start by dropping dst_ci caps and getting src_ci caps */ 2212 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got); 2213 if (retrying) { 2214 if (!ret) 2215 /* ceph_try_get_caps masks EAGAIN */ 2216 ret = -EAGAIN; 2217 return ret; 2218 } 2219 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD, 2220 CEPH_CAP_FILE_SHARED, -1, src_got); 2221 if (ret < 0) 2222 return ret; 2223 /*... drop src_ci caps too, and retry */ 2224 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got); 2225 retrying = true; 2226 goto retry_caps; 2227 } 2228 return ret; 2229 } 2230 2231 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got, 2232 struct ceph_inode_info *dst_ci, int dst_got) 2233 { 2234 ceph_put_cap_refs(src_ci, src_got); 2235 ceph_put_cap_refs(dst_ci, dst_got); 2236 } 2237 2238 /* 2239 * This function does several size-related checks, returning an error if: 2240 * - source file is smaller than off+len 2241 * - destination file size is not OK (inode_newsize_ok()) 2242 * - max bytes quotas is exceeded 2243 */ 2244 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode, 2245 loff_t src_off, loff_t dst_off, size_t len) 2246 { 2247 loff_t size, endoff; 2248 2249 size = i_size_read(src_inode); 2250 /* 2251 * Don't copy beyond source file EOF. Instead of simply setting length 2252 * to (size - src_off), just drop to VFS default implementation, as the 2253 * local i_size may be stale due to other clients writing to the source 2254 * inode. 2255 */ 2256 if (src_off + len > size) { 2257 dout("Copy beyond EOF (%llu + %zu > %llu)\n", 2258 src_off, len, size); 2259 return -EOPNOTSUPP; 2260 } 2261 size = i_size_read(dst_inode); 2262 2263 endoff = dst_off + len; 2264 if (inode_newsize_ok(dst_inode, endoff)) 2265 return -EOPNOTSUPP; 2266 2267 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff)) 2268 return -EDQUOT; 2269 2270 return 0; 2271 } 2272 2273 static struct ceph_osd_request * 2274 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc, 2275 u64 src_snapid, 2276 struct ceph_object_id *src_oid, 2277 struct ceph_object_locator *src_oloc, 2278 struct ceph_object_id *dst_oid, 2279 struct ceph_object_locator *dst_oloc, 2280 u32 truncate_seq, u64 truncate_size) 2281 { 2282 struct ceph_osd_request *req; 2283 int ret; 2284 u32 src_fadvise_flags = 2285 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | 2286 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE; 2287 u32 dst_fadvise_flags = 2288 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | 2289 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED; 2290 2291 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 2292 if (!req) 2293 return ERR_PTR(-ENOMEM); 2294 2295 req->r_flags = CEPH_OSD_FLAG_WRITE; 2296 2297 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc); 2298 ceph_oid_copy(&req->r_t.base_oid, dst_oid); 2299 2300 ret = osd_req_op_copy_from_init(req, src_snapid, 0, 2301 src_oid, src_oloc, 2302 src_fadvise_flags, 2303 dst_fadvise_flags, 2304 truncate_seq, 2305 truncate_size, 2306 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ); 2307 if (ret) 2308 goto out; 2309 2310 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 2311 if (ret) 2312 goto out; 2313 2314 return req; 2315 2316 out: 2317 ceph_osdc_put_request(req); 2318 return ERR_PTR(ret); 2319 } 2320 2321 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off, 2322 struct ceph_inode_info *dst_ci, u64 *dst_off, 2323 struct ceph_fs_client *fsc, 2324 size_t len, unsigned int flags) 2325 { 2326 struct ceph_object_locator src_oloc, dst_oloc; 2327 struct ceph_object_id src_oid, dst_oid; 2328 struct ceph_osd_client *osdc; 2329 struct ceph_osd_request *req; 2330 size_t bytes = 0; 2331 u64 src_objnum, src_objoff, dst_objnum, dst_objoff; 2332 u32 src_objlen, dst_objlen; 2333 u32 object_size = src_ci->i_layout.object_size; 2334 int ret; 2335 2336 src_oloc.pool = src_ci->i_layout.pool_id; 2337 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns); 2338 dst_oloc.pool = dst_ci->i_layout.pool_id; 2339 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns); 2340 osdc = &fsc->client->osdc; 2341 2342 while (len >= object_size) { 2343 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off, 2344 object_size, &src_objnum, 2345 &src_objoff, &src_objlen); 2346 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off, 2347 object_size, &dst_objnum, 2348 &dst_objoff, &dst_objlen); 2349 ceph_oid_init(&src_oid); 2350 ceph_oid_printf(&src_oid, "%llx.%08llx", 2351 src_ci->i_vino.ino, src_objnum); 2352 ceph_oid_init(&dst_oid); 2353 ceph_oid_printf(&dst_oid, "%llx.%08llx", 2354 dst_ci->i_vino.ino, dst_objnum); 2355 /* Do an object remote copy */ 2356 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap, 2357 &src_oid, &src_oloc, 2358 &dst_oid, &dst_oloc, 2359 dst_ci->i_truncate_seq, 2360 dst_ci->i_truncate_size); 2361 if (IS_ERR(req)) 2362 ret = PTR_ERR(req); 2363 else { 2364 ceph_osdc_start_request(osdc, req, false); 2365 ret = ceph_osdc_wait_request(osdc, req); 2366 ceph_update_copyfrom_metrics(&fsc->mdsc->metric, 2367 req->r_start_latency, 2368 req->r_end_latency, 2369 object_size, ret); 2370 ceph_osdc_put_request(req); 2371 } 2372 if (ret) { 2373 if (ret == -EOPNOTSUPP) { 2374 fsc->have_copy_from2 = false; 2375 pr_notice("OSDs don't support copy-from2; disabling copy offload\n"); 2376 } 2377 dout("ceph_osdc_copy_from returned %d\n", ret); 2378 if (!bytes) 2379 bytes = ret; 2380 goto out; 2381 } 2382 len -= object_size; 2383 bytes += object_size; 2384 *src_off += object_size; 2385 *dst_off += object_size; 2386 } 2387 2388 out: 2389 ceph_oloc_destroy(&src_oloc); 2390 ceph_oloc_destroy(&dst_oloc); 2391 return bytes; 2392 } 2393 2394 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, 2395 struct file *dst_file, loff_t dst_off, 2396 size_t len, unsigned int flags) 2397 { 2398 struct inode *src_inode = file_inode(src_file); 2399 struct inode *dst_inode = file_inode(dst_file); 2400 struct ceph_inode_info *src_ci = ceph_inode(src_inode); 2401 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode); 2402 struct ceph_cap_flush *prealloc_cf; 2403 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode); 2404 loff_t size; 2405 ssize_t ret = -EIO, bytes; 2406 u64 src_objnum, dst_objnum, src_objoff, dst_objoff; 2407 u32 src_objlen, dst_objlen; 2408 int src_got = 0, dst_got = 0, err, dirty; 2409 2410 if (src_inode->i_sb != dst_inode->i_sb) { 2411 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode); 2412 2413 if (ceph_fsid_compare(&src_fsc->client->fsid, 2414 &dst_fsc->client->fsid)) { 2415 dout("Copying files across clusters: src: %pU dst: %pU\n", 2416 &src_fsc->client->fsid, &dst_fsc->client->fsid); 2417 return -EXDEV; 2418 } 2419 } 2420 if (ceph_snap(dst_inode) != CEPH_NOSNAP) 2421 return -EROFS; 2422 2423 /* 2424 * Some of the checks below will return -EOPNOTSUPP, which will force a 2425 * fallback to the default VFS copy_file_range implementation. This is 2426 * desirable in several cases (for ex, the 'len' is smaller than the 2427 * size of the objects, or in cases where that would be more 2428 * efficient). 2429 */ 2430 2431 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM)) 2432 return -EOPNOTSUPP; 2433 2434 if (!src_fsc->have_copy_from2) 2435 return -EOPNOTSUPP; 2436 2437 /* 2438 * Striped file layouts require that we copy partial objects, but the 2439 * OSD copy-from operation only supports full-object copies. Limit 2440 * this to non-striped file layouts for now. 2441 */ 2442 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) || 2443 (src_ci->i_layout.stripe_count != 1) || 2444 (dst_ci->i_layout.stripe_count != 1) || 2445 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) { 2446 dout("Invalid src/dst files layout\n"); 2447 return -EOPNOTSUPP; 2448 } 2449 2450 if (len < src_ci->i_layout.object_size) 2451 return -EOPNOTSUPP; /* no remote copy will be done */ 2452 2453 prealloc_cf = ceph_alloc_cap_flush(); 2454 if (!prealloc_cf) 2455 return -ENOMEM; 2456 2457 /* Start by sync'ing the source and destination files */ 2458 ret = file_write_and_wait_range(src_file, src_off, (src_off + len)); 2459 if (ret < 0) { 2460 dout("failed to write src file (%zd)\n", ret); 2461 goto out; 2462 } 2463 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len)); 2464 if (ret < 0) { 2465 dout("failed to write dst file (%zd)\n", ret); 2466 goto out; 2467 } 2468 2469 /* 2470 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other 2471 * clients may have dirty data in their caches. And OSDs know nothing 2472 * about caps, so they can't safely do the remote object copies. 2473 */ 2474 err = get_rd_wr_caps(src_file, &src_got, 2475 dst_file, (dst_off + len), &dst_got); 2476 if (err < 0) { 2477 dout("get_rd_wr_caps returned %d\n", err); 2478 ret = -EOPNOTSUPP; 2479 goto out; 2480 } 2481 2482 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len); 2483 if (ret < 0) 2484 goto out_caps; 2485 2486 /* Drop dst file cached pages */ 2487 ceph_fscache_invalidate(dst_inode, false); 2488 ret = invalidate_inode_pages2_range(dst_inode->i_mapping, 2489 dst_off >> PAGE_SHIFT, 2490 (dst_off + len) >> PAGE_SHIFT); 2491 if (ret < 0) { 2492 dout("Failed to invalidate inode pages (%zd)\n", ret); 2493 ret = 0; /* XXX */ 2494 } 2495 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, 2496 src_ci->i_layout.object_size, 2497 &src_objnum, &src_objoff, &src_objlen); 2498 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off, 2499 dst_ci->i_layout.object_size, 2500 &dst_objnum, &dst_objoff, &dst_objlen); 2501 /* object-level offsets need to the same */ 2502 if (src_objoff != dst_objoff) { 2503 ret = -EOPNOTSUPP; 2504 goto out_caps; 2505 } 2506 2507 /* 2508 * Do a manual copy if the object offset isn't object aligned. 2509 * 'src_objlen' contains the bytes left until the end of the object, 2510 * starting at the src_off 2511 */ 2512 if (src_objoff) { 2513 dout("Initial partial copy of %u bytes\n", src_objlen); 2514 2515 /* 2516 * we need to temporarily drop all caps as we'll be calling 2517 * {read,write}_iter, which will get caps again. 2518 */ 2519 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); 2520 ret = do_splice_direct(src_file, &src_off, dst_file, 2521 &dst_off, src_objlen, flags); 2522 /* Abort on short copies or on error */ 2523 if (ret < src_objlen) { 2524 dout("Failed partial copy (%zd)\n", ret); 2525 goto out; 2526 } 2527 len -= ret; 2528 err = get_rd_wr_caps(src_file, &src_got, 2529 dst_file, (dst_off + len), &dst_got); 2530 if (err < 0) 2531 goto out; 2532 err = is_file_size_ok(src_inode, dst_inode, 2533 src_off, dst_off, len); 2534 if (err < 0) 2535 goto out_caps; 2536 } 2537 2538 size = i_size_read(dst_inode); 2539 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off, 2540 src_fsc, len, flags); 2541 if (bytes <= 0) { 2542 if (!ret) 2543 ret = bytes; 2544 goto out_caps; 2545 } 2546 dout("Copied %zu bytes out of %zu\n", bytes, len); 2547 len -= bytes; 2548 ret += bytes; 2549 2550 file_update_time(dst_file); 2551 inode_inc_iversion_raw(dst_inode); 2552 2553 if (dst_off > size) { 2554 /* Let the MDS know about dst file size change */ 2555 if (ceph_inode_set_size(dst_inode, dst_off) || 2556 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off)) 2557 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL); 2558 } 2559 /* Mark Fw dirty */ 2560 spin_lock(&dst_ci->i_ceph_lock); 2561 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf); 2562 spin_unlock(&dst_ci->i_ceph_lock); 2563 if (dirty) 2564 __mark_inode_dirty(dst_inode, dirty); 2565 2566 out_caps: 2567 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); 2568 2569 /* 2570 * Do the final manual copy if we still have some bytes left, unless 2571 * there were errors in remote object copies (len >= object_size). 2572 */ 2573 if (len && (len < src_ci->i_layout.object_size)) { 2574 dout("Final partial copy of %zu bytes\n", len); 2575 bytes = do_splice_direct(src_file, &src_off, dst_file, 2576 &dst_off, len, flags); 2577 if (bytes > 0) 2578 ret += bytes; 2579 else 2580 dout("Failed partial copy (%zd)\n", bytes); 2581 } 2582 2583 out: 2584 ceph_free_cap_flush(prealloc_cf); 2585 2586 return ret; 2587 } 2588 2589 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off, 2590 struct file *dst_file, loff_t dst_off, 2591 size_t len, unsigned int flags) 2592 { 2593 ssize_t ret; 2594 2595 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off, 2596 len, flags); 2597 2598 if (ret == -EOPNOTSUPP || ret == -EXDEV) 2599 ret = generic_copy_file_range(src_file, src_off, dst_file, 2600 dst_off, len, flags); 2601 return ret; 2602 } 2603 2604 const struct file_operations ceph_file_fops = { 2605 .open = ceph_open, 2606 .release = ceph_release, 2607 .llseek = ceph_llseek, 2608 .read_iter = ceph_read_iter, 2609 .write_iter = ceph_write_iter, 2610 .mmap = ceph_mmap, 2611 .fsync = ceph_fsync, 2612 .lock = ceph_lock, 2613 .setlease = simple_nosetlease, 2614 .flock = ceph_flock, 2615 .splice_read = generic_file_splice_read, 2616 .splice_write = iter_file_splice_write, 2617 .unlocked_ioctl = ceph_ioctl, 2618 .compat_ioctl = compat_ptr_ioctl, 2619 .fallocate = ceph_fallocate, 2620 .copy_file_range = ceph_copy_file_range, 2621 }; 2622