1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/mount.h> 8 #include <linux/namei.h> 9 #include <linux/writeback.h> 10 #include <linux/falloc.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 16 /* 17 * Ceph file operations 18 * 19 * Implement basic open/close functionality, and implement 20 * read/write. 21 * 22 * We implement three modes of file I/O: 23 * - buffered uses the generic_file_aio_{read,write} helpers 24 * 25 * - synchronous is used when there is multi-client read/write 26 * sharing, avoids the page cache, and synchronously waits for an 27 * ack from the OSD. 28 * 29 * - direct io takes the variant of the sync path that references 30 * user pages directly. 31 * 32 * fsync() flushes and waits on dirty pages, but just queues metadata 33 * for writeback: since the MDS can recover size and mtime there is no 34 * need to wait for MDS acknowledgement. 35 */ 36 37 /* 38 * Calculate the length sum of direct io vectors that can 39 * be combined into one page vector. 40 */ 41 static size_t dio_get_pagev_size(const struct iov_iter *it) 42 { 43 const struct iovec *iov = it->iov; 44 const struct iovec *iovend = iov + it->nr_segs; 45 size_t size; 46 47 size = iov->iov_len - it->iov_offset; 48 /* 49 * An iov can be page vectored when both the current tail 50 * and the next base are page aligned. 51 */ 52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && 53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { 54 size += iov->iov_len; 55 } 56 dout("dio_get_pagevlen len = %zu\n", size); 57 return size; 58 } 59 60 /* 61 * Allocate a page vector based on (@it, @nbytes). 62 * The return value is the tuple describing a page vector, 63 * that is (@pages, @page_align, @num_pages). 64 */ 65 static struct page ** 66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, 67 size_t *page_align, int *num_pages) 68 { 69 struct iov_iter tmp_it = *it; 70 size_t align; 71 struct page **pages; 72 int ret = 0, idx, npages; 73 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 75 (PAGE_SIZE - 1); 76 npages = calc_pages_for(align, nbytes); 77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 if (!pages) { 79 pages = vmalloc(sizeof(*pages) * npages); 80 if (!pages) 81 return ERR_PTR(-ENOMEM); 82 } 83 84 for (idx = 0; idx < npages; ) { 85 size_t start; 86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, 87 npages - idx, &start); 88 if (ret < 0) 89 goto fail; 90 91 iov_iter_advance(&tmp_it, ret); 92 nbytes -= ret; 93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; 94 } 95 96 BUG_ON(nbytes != 0); 97 *num_pages = npages; 98 *page_align = align; 99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); 100 return pages; 101 fail: 102 ceph_put_page_vector(pages, idx, false); 103 return ERR_PTR(ret); 104 } 105 106 /* 107 * Prepare an open request. Preallocate ceph_cap to avoid an 108 * inopportune ENOMEM later. 109 */ 110 static struct ceph_mds_request * 111 prepare_open_request(struct super_block *sb, int flags, int create_mode) 112 { 113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 114 struct ceph_mds_client *mdsc = fsc->mdsc; 115 struct ceph_mds_request *req; 116 int want_auth = USE_ANY_MDS; 117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 118 119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 120 want_auth = USE_AUTH_MDS; 121 122 req = ceph_mdsc_create_request(mdsc, op, want_auth); 123 if (IS_ERR(req)) 124 goto out; 125 req->r_fmode = ceph_flags_to_mode(flags); 126 req->r_args.open.flags = cpu_to_le32(flags); 127 req->r_args.open.mode = cpu_to_le32(create_mode); 128 out: 129 return req; 130 } 131 132 /* 133 * initialize private struct file data. 134 * if we fail, clean up by dropping fmode reference on the ceph_inode 135 */ 136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 137 { 138 struct ceph_file_info *cf; 139 int ret = 0; 140 struct ceph_inode_info *ci = ceph_inode(inode); 141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 142 struct ceph_mds_client *mdsc = fsc->mdsc; 143 144 switch (inode->i_mode & S_IFMT) { 145 case S_IFREG: 146 /* First file open request creates the cookie, we want to keep 147 * this cookie around for the filetime of the inode as not to 148 * have to worry about fscache register / revoke / operation 149 * races. 150 * 151 * Also, if we know the operation is going to invalidate data 152 * (non readonly) just nuke the cache right away. 153 */ 154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci); 155 if ((fmode & CEPH_FILE_MODE_WR)) 156 ceph_fscache_invalidate(inode); 157 case S_IFDIR: 158 dout("init_file %p %p 0%o (regular)\n", inode, file, 159 inode->i_mode); 160 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 161 if (cf == NULL) { 162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 163 return -ENOMEM; 164 } 165 cf->fmode = fmode; 166 cf->next_offset = 2; 167 cf->readdir_cache_idx = -1; 168 file->private_data = cf; 169 BUG_ON(inode->i_fop->release != ceph_release); 170 break; 171 172 case S_IFLNK: 173 dout("init_file %p %p 0%o (symlink)\n", inode, file, 174 inode->i_mode); 175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 176 break; 177 178 default: 179 dout("init_file %p %p 0%o (special)\n", inode, file, 180 inode->i_mode); 181 /* 182 * we need to drop the open ref now, since we don't 183 * have .release set to ceph_release. 184 */ 185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 186 BUG_ON(inode->i_fop->release == ceph_release); 187 188 /* call the proper open fop */ 189 ret = inode->i_fop->open(inode, file); 190 } 191 return ret; 192 } 193 194 /* 195 * If we already have the requisite capabilities, we can satisfy 196 * the open request locally (no need to request new caps from the 197 * MDS). We do, however, need to inform the MDS (asynchronously) 198 * if our wanted caps set expands. 199 */ 200 int ceph_open(struct inode *inode, struct file *file) 201 { 202 struct ceph_inode_info *ci = ceph_inode(inode); 203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 204 struct ceph_mds_client *mdsc = fsc->mdsc; 205 struct ceph_mds_request *req; 206 struct ceph_file_info *cf = file->private_data; 207 int err; 208 int flags, fmode, wanted; 209 210 if (cf) { 211 dout("open file %p is already opened\n", file); 212 return 0; 213 } 214 215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 216 flags = file->f_flags & ~(O_CREAT|O_EXCL); 217 if (S_ISDIR(inode->i_mode)) 218 flags = O_DIRECTORY; /* mds likes to know */ 219 220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 221 ceph_vinop(inode), file, flags, file->f_flags); 222 fmode = ceph_flags_to_mode(flags); 223 wanted = ceph_caps_for_mode(fmode); 224 225 /* snapped files are read-only */ 226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 227 return -EROFS; 228 229 /* trivially open snapdir */ 230 if (ceph_snap(inode) == CEPH_SNAPDIR) { 231 spin_lock(&ci->i_ceph_lock); 232 __ceph_get_fmode(ci, fmode); 233 spin_unlock(&ci->i_ceph_lock); 234 return ceph_init_file(inode, file, fmode); 235 } 236 237 /* 238 * No need to block if we have caps on the auth MDS (for 239 * write) or any MDS (for read). Update wanted set 240 * asynchronously. 241 */ 242 spin_lock(&ci->i_ceph_lock); 243 if (__ceph_is_any_real_caps(ci) && 244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 245 int mds_wanted = __ceph_caps_mds_wanted(ci); 246 int issued = __ceph_caps_issued(ci, NULL); 247 248 dout("open %p fmode %d want %s issued %s using existing\n", 249 inode, fmode, ceph_cap_string(wanted), 250 ceph_cap_string(issued)); 251 __ceph_get_fmode(ci, fmode); 252 spin_unlock(&ci->i_ceph_lock); 253 254 /* adjust wanted? */ 255 if ((issued & wanted) != wanted && 256 (mds_wanted & wanted) != wanted && 257 ceph_snap(inode) != CEPH_SNAPDIR) 258 ceph_check_caps(ci, 0, NULL); 259 260 return ceph_init_file(inode, file, fmode); 261 } else if (ceph_snap(inode) != CEPH_NOSNAP && 262 (ci->i_snap_caps & wanted) == wanted) { 263 __ceph_get_fmode(ci, fmode); 264 spin_unlock(&ci->i_ceph_lock); 265 return ceph_init_file(inode, file, fmode); 266 } 267 268 spin_unlock(&ci->i_ceph_lock); 269 270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 271 req = prepare_open_request(inode->i_sb, flags, 0); 272 if (IS_ERR(req)) { 273 err = PTR_ERR(req); 274 goto out; 275 } 276 req->r_inode = inode; 277 ihold(inode); 278 279 req->r_num_caps = 1; 280 err = ceph_mdsc_do_request(mdsc, NULL, req); 281 if (!err) 282 err = ceph_init_file(inode, file, req->r_fmode); 283 ceph_mdsc_put_request(req); 284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 285 out: 286 return err; 287 } 288 289 290 /* 291 * Do a lookup + open with a single request. If we get a non-existent 292 * file or symlink, return 1 so the VFS can retry. 293 */ 294 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 295 struct file *file, unsigned flags, umode_t mode, 296 int *opened) 297 { 298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 299 struct ceph_mds_client *mdsc = fsc->mdsc; 300 struct ceph_mds_request *req; 301 struct dentry *dn; 302 struct ceph_acls_info acls = {}; 303 int mask; 304 int err; 305 306 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 307 dir, dentry, dentry, 308 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 309 310 if (dentry->d_name.len > NAME_MAX) 311 return -ENAMETOOLONG; 312 313 err = ceph_init_dentry(dentry); 314 if (err < 0) 315 return err; 316 317 if (flags & O_CREAT) { 318 err = ceph_pre_init_acls(dir, &mode, &acls); 319 if (err < 0) 320 return err; 321 } 322 323 /* do the open */ 324 req = prepare_open_request(dir->i_sb, flags, mode); 325 if (IS_ERR(req)) { 326 err = PTR_ERR(req); 327 goto out_acl; 328 } 329 req->r_dentry = dget(dentry); 330 req->r_num_caps = 2; 331 if (flags & O_CREAT) { 332 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 333 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 334 if (acls.pagelist) { 335 req->r_pagelist = acls.pagelist; 336 acls.pagelist = NULL; 337 } 338 } 339 340 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 341 if (ceph_security_xattr_wanted(dir)) 342 mask |= CEPH_CAP_XATTR_SHARED; 343 req->r_args.open.mask = cpu_to_le32(mask); 344 345 req->r_locked_dir = dir; /* caller holds dir->i_mutex */ 346 err = ceph_mdsc_do_request(mdsc, 347 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 348 req); 349 err = ceph_handle_snapdir(req, dentry, err); 350 if (err) 351 goto out_req; 352 353 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 354 err = ceph_handle_notrace_create(dir, dentry); 355 356 if (d_unhashed(dentry)) { 357 dn = ceph_finish_lookup(req, dentry, err); 358 if (IS_ERR(dn)) 359 err = PTR_ERR(dn); 360 } else { 361 /* we were given a hashed negative dentry */ 362 dn = NULL; 363 } 364 if (err) 365 goto out_req; 366 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 367 /* make vfs retry on splice, ENOENT, or symlink */ 368 dout("atomic_open finish_no_open on dn %p\n", dn); 369 err = finish_no_open(file, dn); 370 } else { 371 dout("atomic_open finish_open on dn %p\n", dn); 372 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 373 ceph_init_inode_acls(d_inode(dentry), &acls); 374 *opened |= FILE_CREATED; 375 } 376 err = finish_open(file, dentry, ceph_open, opened); 377 } 378 out_req: 379 if (!req->r_err && req->r_target_inode) 380 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 381 ceph_mdsc_put_request(req); 382 out_acl: 383 ceph_release_acls_info(&acls); 384 dout("atomic_open result=%d\n", err); 385 return err; 386 } 387 388 int ceph_release(struct inode *inode, struct file *file) 389 { 390 struct ceph_inode_info *ci = ceph_inode(inode); 391 struct ceph_file_info *cf = file->private_data; 392 393 dout("release inode %p file %p\n", inode, file); 394 ceph_put_fmode(ci, cf->fmode); 395 if (cf->last_readdir) 396 ceph_mdsc_put_request(cf->last_readdir); 397 kfree(cf->last_name); 398 kfree(cf->dir_info); 399 kmem_cache_free(ceph_file_cachep, cf); 400 401 /* wake up anyone waiting for caps on this inode */ 402 wake_up_all(&ci->i_cap_wq); 403 return 0; 404 } 405 406 enum { 407 HAVE_RETRIED = 1, 408 CHECK_EOF = 2, 409 READ_INLINE = 3, 410 }; 411 412 /* 413 * Read a range of bytes striped over one or more objects. Iterate over 414 * objects we stripe over. (That's not atomic, but good enough for now.) 415 * 416 * If we get a short result from the OSD, check against i_size; we need to 417 * only return a short read to the caller if we hit EOF. 418 */ 419 static int striped_read(struct inode *inode, 420 u64 off, u64 len, 421 struct page **pages, int num_pages, 422 int *checkeof) 423 { 424 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 425 struct ceph_inode_info *ci = ceph_inode(inode); 426 u64 pos, this_len, left; 427 loff_t i_size; 428 int page_align, pages_left; 429 int read, ret; 430 struct page **page_pos; 431 bool hit_stripe, was_short; 432 433 /* 434 * we may need to do multiple reads. not atomic, unfortunately. 435 */ 436 pos = off; 437 left = len; 438 page_pos = pages; 439 pages_left = num_pages; 440 read = 0; 441 442 more: 443 page_align = pos & ~PAGE_MASK; 444 this_len = left; 445 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 446 &ci->i_layout, pos, &this_len, 447 ci->i_truncate_seq, 448 ci->i_truncate_size, 449 page_pos, pages_left, page_align); 450 if (ret == -ENOENT) 451 ret = 0; 452 hit_stripe = this_len < left; 453 was_short = ret >= 0 && ret < this_len; 454 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read, 455 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 456 457 i_size = i_size_read(inode); 458 if (ret >= 0) { 459 int didpages; 460 if (was_short && (pos + ret < i_size)) { 461 int zlen = min(this_len - ret, i_size - pos - ret); 462 int zoff = (off & ~PAGE_MASK) + read + ret; 463 dout(" zero gap %llu to %llu\n", 464 pos + ret, pos + ret + zlen); 465 ceph_zero_page_vector_range(zoff, zlen, pages); 466 ret += zlen; 467 } 468 469 didpages = (page_align + ret) >> PAGE_SHIFT; 470 pos += ret; 471 read = pos - off; 472 left -= ret; 473 page_pos += didpages; 474 pages_left -= didpages; 475 476 /* hit stripe and need continue*/ 477 if (left && hit_stripe && pos < i_size) 478 goto more; 479 } 480 481 if (read > 0) { 482 ret = read; 483 /* did we bounce off eof? */ 484 if (pos + left > i_size) 485 *checkeof = CHECK_EOF; 486 } 487 488 dout("striped_read returns %d\n", ret); 489 return ret; 490 } 491 492 /* 493 * Completely synchronous read and write methods. Direct from __user 494 * buffer to osd, or directly to user pages (if O_DIRECT). 495 * 496 * If the read spans object boundary, just do multiple reads. 497 */ 498 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, 499 int *checkeof) 500 { 501 struct file *file = iocb->ki_filp; 502 struct inode *inode = file_inode(file); 503 struct page **pages; 504 u64 off = iocb->ki_pos; 505 int num_pages, ret; 506 size_t len = iov_iter_count(i); 507 508 dout("sync_read on file %p %llu~%u %s\n", file, off, 509 (unsigned)len, 510 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 511 512 if (!len) 513 return 0; 514 /* 515 * flush any page cache pages in this range. this 516 * will make concurrent normal and sync io slow, 517 * but it will at least behave sensibly when they are 518 * in sequence. 519 */ 520 ret = filemap_write_and_wait_range(inode->i_mapping, off, 521 off + len); 522 if (ret < 0) 523 return ret; 524 525 num_pages = calc_pages_for(off, len); 526 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 527 if (IS_ERR(pages)) 528 return PTR_ERR(pages); 529 ret = striped_read(inode, off, len, pages, 530 num_pages, checkeof); 531 if (ret > 0) { 532 int l, k = 0; 533 size_t left = ret; 534 535 while (left) { 536 size_t page_off = off & ~PAGE_MASK; 537 size_t copy = min_t(size_t, left, 538 PAGE_SIZE - page_off); 539 l = copy_page_to_iter(pages[k++], page_off, copy, i); 540 off += l; 541 left -= l; 542 if (l < copy) 543 break; 544 } 545 } 546 ceph_release_page_vector(pages, num_pages); 547 548 if (off > iocb->ki_pos) { 549 ret = off - iocb->ki_pos; 550 iocb->ki_pos = off; 551 } 552 553 dout("sync_read result %d\n", ret); 554 return ret; 555 } 556 557 struct ceph_aio_request { 558 struct kiocb *iocb; 559 size_t total_len; 560 int write; 561 int error; 562 struct list_head osd_reqs; 563 unsigned num_reqs; 564 atomic_t pending_reqs; 565 struct timespec mtime; 566 struct ceph_cap_flush *prealloc_cf; 567 }; 568 569 struct ceph_aio_work { 570 struct work_struct work; 571 struct ceph_osd_request *req; 572 }; 573 574 static void ceph_aio_retry_work(struct work_struct *work); 575 576 static void ceph_aio_complete(struct inode *inode, 577 struct ceph_aio_request *aio_req) 578 { 579 struct ceph_inode_info *ci = ceph_inode(inode); 580 int ret; 581 582 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 583 return; 584 585 ret = aio_req->error; 586 if (!ret) 587 ret = aio_req->total_len; 588 589 dout("ceph_aio_complete %p rc %d\n", inode, ret); 590 591 if (ret >= 0 && aio_req->write) { 592 int dirty; 593 594 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 595 if (endoff > i_size_read(inode)) { 596 if (ceph_inode_set_size(inode, endoff)) 597 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 598 } 599 600 spin_lock(&ci->i_ceph_lock); 601 ci->i_inline_version = CEPH_INLINE_NONE; 602 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 603 &aio_req->prealloc_cf); 604 spin_unlock(&ci->i_ceph_lock); 605 if (dirty) 606 __mark_inode_dirty(inode, dirty); 607 608 } 609 610 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 611 CEPH_CAP_FILE_RD)); 612 613 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 614 615 ceph_free_cap_flush(aio_req->prealloc_cf); 616 kfree(aio_req); 617 } 618 619 static void ceph_aio_complete_req(struct ceph_osd_request *req, 620 struct ceph_msg *msg) 621 { 622 int rc = req->r_result; 623 struct inode *inode = req->r_inode; 624 struct ceph_aio_request *aio_req = req->r_priv; 625 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 626 int num_pages = calc_pages_for((u64)osd_data->alignment, 627 osd_data->length); 628 629 dout("ceph_aio_complete_req %p rc %d bytes %llu\n", 630 inode, rc, osd_data->length); 631 632 if (rc == -EOLDSNAPC) { 633 struct ceph_aio_work *aio_work; 634 BUG_ON(!aio_req->write); 635 636 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 637 if (aio_work) { 638 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 639 aio_work->req = req; 640 queue_work(ceph_inode_to_client(inode)->wb_wq, 641 &aio_work->work); 642 return; 643 } 644 rc = -ENOMEM; 645 } else if (!aio_req->write) { 646 if (rc == -ENOENT) 647 rc = 0; 648 if (rc >= 0 && osd_data->length > rc) { 649 int zoff = osd_data->alignment + rc; 650 int zlen = osd_data->length - rc; 651 /* 652 * If read is satisfied by single OSD request, 653 * it can pass EOF. Otherwise read is within 654 * i_size. 655 */ 656 if (aio_req->num_reqs == 1) { 657 loff_t i_size = i_size_read(inode); 658 loff_t endoff = aio_req->iocb->ki_pos + rc; 659 if (endoff < i_size) 660 zlen = min_t(size_t, zlen, 661 i_size - endoff); 662 aio_req->total_len = rc + zlen; 663 } 664 665 if (zlen > 0) 666 ceph_zero_page_vector_range(zoff, zlen, 667 osd_data->pages); 668 } 669 } 670 671 ceph_put_page_vector(osd_data->pages, num_pages, false); 672 ceph_osdc_put_request(req); 673 674 if (rc < 0) 675 cmpxchg(&aio_req->error, 0, rc); 676 677 ceph_aio_complete(inode, aio_req); 678 return; 679 } 680 681 static void ceph_aio_retry_work(struct work_struct *work) 682 { 683 struct ceph_aio_work *aio_work = 684 container_of(work, struct ceph_aio_work, work); 685 struct ceph_osd_request *orig_req = aio_work->req; 686 struct ceph_aio_request *aio_req = orig_req->r_priv; 687 struct inode *inode = orig_req->r_inode; 688 struct ceph_inode_info *ci = ceph_inode(inode); 689 struct ceph_snap_context *snapc; 690 struct ceph_osd_request *req; 691 int ret; 692 693 spin_lock(&ci->i_ceph_lock); 694 if (__ceph_have_pending_cap_snap(ci)) { 695 struct ceph_cap_snap *capsnap = 696 list_last_entry(&ci->i_cap_snaps, 697 struct ceph_cap_snap, 698 ci_item); 699 snapc = ceph_get_snap_context(capsnap->context); 700 } else { 701 BUG_ON(!ci->i_head_snapc); 702 snapc = ceph_get_snap_context(ci->i_head_snapc); 703 } 704 spin_unlock(&ci->i_ceph_lock); 705 706 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 707 false, GFP_NOFS); 708 if (!req) { 709 ret = -ENOMEM; 710 req = orig_req; 711 goto out; 712 } 713 714 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | 715 CEPH_OSD_FLAG_ONDISK | 716 CEPH_OSD_FLAG_WRITE; 717 req->r_base_oloc = orig_req->r_base_oloc; 718 req->r_base_oid = orig_req->r_base_oid; 719 720 req->r_ops[0] = orig_req->r_ops[0]; 721 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 722 723 ceph_osdc_build_request(req, req->r_ops[0].extent.offset, 724 snapc, CEPH_NOSNAP, &aio_req->mtime); 725 726 ceph_osdc_put_request(orig_req); 727 728 req->r_callback = ceph_aio_complete_req; 729 req->r_inode = inode; 730 req->r_priv = aio_req; 731 732 ret = ceph_osdc_start_request(req->r_osdc, req, false); 733 out: 734 if (ret < 0) { 735 req->r_result = ret; 736 ceph_aio_complete_req(req, NULL); 737 } 738 739 ceph_put_snap_context(snapc); 740 kfree(aio_work); 741 } 742 743 /* 744 * Write commit request unsafe callback, called to tell us when a 745 * request is unsafe (that is, in flight--has been handed to the 746 * messenger to send to its target osd). It is called again when 747 * we've received a response message indicating the request is 748 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request 749 * is completed early (and unsuccessfully) due to a timeout or 750 * interrupt. 751 * 752 * This is used if we requested both an ACK and ONDISK commit reply 753 * from the OSD. 754 */ 755 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) 756 { 757 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 758 759 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, 760 unsafe ? "un" : ""); 761 if (unsafe) { 762 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 763 spin_lock(&ci->i_unsafe_lock); 764 list_add_tail(&req->r_unsafe_item, 765 &ci->i_unsafe_writes); 766 spin_unlock(&ci->i_unsafe_lock); 767 } else { 768 spin_lock(&ci->i_unsafe_lock); 769 list_del_init(&req->r_unsafe_item); 770 spin_unlock(&ci->i_unsafe_lock); 771 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 772 } 773 } 774 775 776 static ssize_t 777 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 778 struct ceph_snap_context *snapc, 779 struct ceph_cap_flush **pcf) 780 { 781 struct file *file = iocb->ki_filp; 782 struct inode *inode = file_inode(file); 783 struct ceph_inode_info *ci = ceph_inode(inode); 784 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 785 struct ceph_vino vino; 786 struct ceph_osd_request *req; 787 struct page **pages; 788 struct ceph_aio_request *aio_req = NULL; 789 int num_pages = 0; 790 int flags; 791 int ret; 792 struct timespec mtime = current_fs_time(inode->i_sb); 793 size_t count = iov_iter_count(iter); 794 loff_t pos = iocb->ki_pos; 795 bool write = iov_iter_rw(iter) == WRITE; 796 797 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 798 return -EROFS; 799 800 dout("sync_direct_read_write (%s) on file %p %lld~%u\n", 801 (write ? "write" : "read"), file, pos, (unsigned)count); 802 803 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 804 if (ret < 0) 805 return ret; 806 807 if (write) { 808 ret = invalidate_inode_pages2_range(inode->i_mapping, 809 pos >> PAGE_SHIFT, 810 (pos + count) >> PAGE_SHIFT); 811 if (ret < 0) 812 dout("invalidate_inode_pages2_range returned %d\n", ret); 813 814 flags = CEPH_OSD_FLAG_ORDERSNAP | 815 CEPH_OSD_FLAG_ONDISK | 816 CEPH_OSD_FLAG_WRITE; 817 } else { 818 flags = CEPH_OSD_FLAG_READ; 819 } 820 821 while (iov_iter_count(iter) > 0) { 822 u64 size = dio_get_pagev_size(iter); 823 size_t start = 0; 824 ssize_t len; 825 826 vino = ceph_vino(inode); 827 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 828 vino, pos, &size, 0, 829 /*include a 'startsync' command*/ 830 write ? 2 : 1, 831 write ? CEPH_OSD_OP_WRITE : 832 CEPH_OSD_OP_READ, 833 flags, snapc, 834 ci->i_truncate_seq, 835 ci->i_truncate_size, 836 false); 837 if (IS_ERR(req)) { 838 ret = PTR_ERR(req); 839 break; 840 } 841 842 len = size; 843 pages = dio_get_pages_alloc(iter, len, &start, &num_pages); 844 if (IS_ERR(pages)) { 845 ceph_osdc_put_request(req); 846 ret = PTR_ERR(pages); 847 break; 848 } 849 850 /* 851 * To simplify error handling, allow AIO when IO within i_size 852 * or IO can be satisfied by single OSD request. 853 */ 854 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 855 (len == count || pos + count <= i_size_read(inode))) { 856 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 857 if (aio_req) { 858 aio_req->iocb = iocb; 859 aio_req->write = write; 860 INIT_LIST_HEAD(&aio_req->osd_reqs); 861 if (write) { 862 aio_req->mtime = mtime; 863 swap(aio_req->prealloc_cf, *pcf); 864 } 865 } 866 /* ignore error */ 867 } 868 869 if (write) { 870 /* 871 * throw out any page cache pages in this range. this 872 * may block. 873 */ 874 truncate_inode_pages_range(inode->i_mapping, pos, 875 (pos+len) | (PAGE_SIZE - 1)); 876 877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 878 } 879 880 881 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, 882 false, false); 883 884 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 885 886 if (aio_req) { 887 aio_req->total_len += len; 888 aio_req->num_reqs++; 889 atomic_inc(&aio_req->pending_reqs); 890 891 req->r_callback = ceph_aio_complete_req; 892 req->r_inode = inode; 893 req->r_priv = aio_req; 894 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 895 896 pos += len; 897 iov_iter_advance(iter, len); 898 continue; 899 } 900 901 ret = ceph_osdc_start_request(req->r_osdc, req, false); 902 if (!ret) 903 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 904 905 size = i_size_read(inode); 906 if (!write) { 907 if (ret == -ENOENT) 908 ret = 0; 909 if (ret >= 0 && ret < len && pos + ret < size) { 910 int zlen = min_t(size_t, len - ret, 911 size - pos - ret); 912 ceph_zero_page_vector_range(start + ret, zlen, 913 pages); 914 ret += zlen; 915 } 916 if (ret >= 0) 917 len = ret; 918 } 919 920 ceph_put_page_vector(pages, num_pages, false); 921 922 ceph_osdc_put_request(req); 923 if (ret < 0) 924 break; 925 926 pos += len; 927 iov_iter_advance(iter, len); 928 929 if (!write && pos >= size) 930 break; 931 932 if (write && pos > size) { 933 if (ceph_inode_set_size(inode, pos)) 934 ceph_check_caps(ceph_inode(inode), 935 CHECK_CAPS_AUTHONLY, 936 NULL); 937 } 938 } 939 940 if (aio_req) { 941 if (aio_req->num_reqs == 0) { 942 kfree(aio_req); 943 return ret; 944 } 945 946 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 947 CEPH_CAP_FILE_RD); 948 949 while (!list_empty(&aio_req->osd_reqs)) { 950 req = list_first_entry(&aio_req->osd_reqs, 951 struct ceph_osd_request, 952 r_unsafe_item); 953 list_del_init(&req->r_unsafe_item); 954 if (ret >= 0) 955 ret = ceph_osdc_start_request(req->r_osdc, 956 req, false); 957 if (ret < 0) { 958 req->r_result = ret; 959 ceph_aio_complete_req(req, NULL); 960 } 961 } 962 return -EIOCBQUEUED; 963 } 964 965 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 966 ret = pos - iocb->ki_pos; 967 iocb->ki_pos = pos; 968 } 969 return ret; 970 } 971 972 /* 973 * Synchronous write, straight from __user pointer or user pages. 974 * 975 * If write spans object boundary, just do multiple writes. (For a 976 * correct atomic write, we should e.g. take write locks on all 977 * objects, rollback on failure, etc.) 978 */ 979 static ssize_t 980 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 981 struct ceph_snap_context *snapc) 982 { 983 struct file *file = iocb->ki_filp; 984 struct inode *inode = file_inode(file); 985 struct ceph_inode_info *ci = ceph_inode(inode); 986 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 987 struct ceph_vino vino; 988 struct ceph_osd_request *req; 989 struct page **pages; 990 u64 len; 991 int num_pages; 992 int written = 0; 993 int flags; 994 int check_caps = 0; 995 int ret; 996 struct timespec mtime = current_fs_time(inode->i_sb); 997 size_t count = iov_iter_count(from); 998 999 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1000 return -EROFS; 1001 1002 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); 1003 1004 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1005 if (ret < 0) 1006 return ret; 1007 1008 ret = invalidate_inode_pages2_range(inode->i_mapping, 1009 pos >> PAGE_SHIFT, 1010 (pos + count) >> PAGE_SHIFT); 1011 if (ret < 0) 1012 dout("invalidate_inode_pages2_range returned %d\n", ret); 1013 1014 flags = CEPH_OSD_FLAG_ORDERSNAP | 1015 CEPH_OSD_FLAG_ONDISK | 1016 CEPH_OSD_FLAG_WRITE | 1017 CEPH_OSD_FLAG_ACK; 1018 1019 while ((len = iov_iter_count(from)) > 0) { 1020 size_t left; 1021 int n; 1022 1023 vino = ceph_vino(inode); 1024 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1025 vino, pos, &len, 0, 1, 1026 CEPH_OSD_OP_WRITE, flags, snapc, 1027 ci->i_truncate_seq, 1028 ci->i_truncate_size, 1029 false); 1030 if (IS_ERR(req)) { 1031 ret = PTR_ERR(req); 1032 break; 1033 } 1034 1035 /* 1036 * write from beginning of first page, 1037 * regardless of io alignment 1038 */ 1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1040 1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1042 if (IS_ERR(pages)) { 1043 ret = PTR_ERR(pages); 1044 goto out; 1045 } 1046 1047 left = len; 1048 for (n = 0; n < num_pages; n++) { 1049 size_t plen = min_t(size_t, left, PAGE_SIZE); 1050 ret = copy_page_from_iter(pages[n], 0, plen, from); 1051 if (ret != plen) { 1052 ret = -EFAULT; 1053 break; 1054 } 1055 left -= ret; 1056 } 1057 1058 if (ret < 0) { 1059 ceph_release_page_vector(pages, num_pages); 1060 goto out; 1061 } 1062 1063 /* get a second commit callback */ 1064 req->r_unsafe_callback = ceph_sync_write_unsafe; 1065 req->r_inode = inode; 1066 1067 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1068 false, true); 1069 1070 /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 1071 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 1072 1073 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1074 if (!ret) 1075 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1076 1077 out: 1078 ceph_osdc_put_request(req); 1079 if (ret == 0) { 1080 pos += len; 1081 written += len; 1082 1083 if (pos > i_size_read(inode)) { 1084 check_caps = ceph_inode_set_size(inode, pos); 1085 if (check_caps) 1086 ceph_check_caps(ceph_inode(inode), 1087 CHECK_CAPS_AUTHONLY, 1088 NULL); 1089 } 1090 } else 1091 break; 1092 } 1093 1094 if (ret != -EOLDSNAPC && written > 0) { 1095 ret = written; 1096 iocb->ki_pos = pos; 1097 } 1098 return ret; 1099 } 1100 1101 /* 1102 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1103 * Atomically grab references, so that those bits are not released 1104 * back to the MDS mid-read. 1105 * 1106 * Hmm, the sync read case isn't actually async... should it be? 1107 */ 1108 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1109 { 1110 struct file *filp = iocb->ki_filp; 1111 struct ceph_file_info *fi = filp->private_data; 1112 size_t len = iov_iter_count(to); 1113 struct inode *inode = file_inode(filp); 1114 struct ceph_inode_info *ci = ceph_inode(inode); 1115 struct page *pinned_page = NULL; 1116 ssize_t ret; 1117 int want, got = 0; 1118 int retry_op = 0, read = 0; 1119 1120 again: 1121 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1122 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1123 1124 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1125 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1126 else 1127 want = CEPH_CAP_FILE_CACHE; 1128 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1129 if (ret < 0) 1130 return ret; 1131 1132 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1133 (iocb->ki_flags & IOCB_DIRECT) || 1134 (fi->flags & CEPH_F_SYNC)) { 1135 1136 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1137 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1138 ceph_cap_string(got)); 1139 1140 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1141 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1142 ret = ceph_direct_read_write(iocb, to, 1143 NULL, NULL); 1144 if (ret >= 0 && ret < len) 1145 retry_op = CHECK_EOF; 1146 } else { 1147 ret = ceph_sync_read(iocb, to, &retry_op); 1148 } 1149 } else { 1150 retry_op = READ_INLINE; 1151 } 1152 } else { 1153 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1154 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1155 ceph_cap_string(got)); 1156 1157 ret = generic_file_read_iter(iocb, to); 1158 } 1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1161 if (pinned_page) { 1162 put_page(pinned_page); 1163 pinned_page = NULL; 1164 } 1165 ceph_put_cap_refs(ci, got); 1166 if (retry_op > HAVE_RETRIED && ret >= 0) { 1167 int statret; 1168 struct page *page = NULL; 1169 loff_t i_size; 1170 if (retry_op == READ_INLINE) { 1171 page = __page_cache_alloc(GFP_KERNEL); 1172 if (!page) 1173 return -ENOMEM; 1174 } 1175 1176 statret = __ceph_do_getattr(inode, page, 1177 CEPH_STAT_CAP_INLINE_DATA, !!page); 1178 if (statret < 0) { 1179 __free_page(page); 1180 if (statret == -ENODATA) { 1181 BUG_ON(retry_op != READ_INLINE); 1182 goto again; 1183 } 1184 return statret; 1185 } 1186 1187 i_size = i_size_read(inode); 1188 if (retry_op == READ_INLINE) { 1189 BUG_ON(ret > 0 || read > 0); 1190 if (iocb->ki_pos < i_size && 1191 iocb->ki_pos < PAGE_SIZE) { 1192 loff_t end = min_t(loff_t, i_size, 1193 iocb->ki_pos + len); 1194 end = min_t(loff_t, end, PAGE_SIZE); 1195 if (statret < end) 1196 zero_user_segment(page, statret, end); 1197 ret = copy_page_to_iter(page, 1198 iocb->ki_pos & ~PAGE_MASK, 1199 end - iocb->ki_pos, to); 1200 iocb->ki_pos += ret; 1201 read += ret; 1202 } 1203 if (iocb->ki_pos < i_size && read < len) { 1204 size_t zlen = min_t(size_t, len - read, 1205 i_size - iocb->ki_pos); 1206 ret = iov_iter_zero(zlen, to); 1207 iocb->ki_pos += ret; 1208 read += ret; 1209 } 1210 __free_pages(page, 0); 1211 return read; 1212 } 1213 1214 /* hit EOF or hole? */ 1215 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1216 ret < len) { 1217 dout("sync_read hit hole, ppos %lld < size %lld" 1218 ", reading more\n", iocb->ki_pos, i_size); 1219 1220 read += ret; 1221 len -= ret; 1222 retry_op = HAVE_RETRIED; 1223 goto again; 1224 } 1225 } 1226 1227 if (ret >= 0) 1228 ret += read; 1229 1230 return ret; 1231 } 1232 1233 /* 1234 * Take cap references to avoid releasing caps to MDS mid-write. 1235 * 1236 * If we are synchronous, and write with an old snap context, the OSD 1237 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1238 * dropping our cap refs and allowing the pending snap to logically 1239 * complete _before_ this write occurs. 1240 * 1241 * If we are near ENOSPC, write synchronously. 1242 */ 1243 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1244 { 1245 struct file *file = iocb->ki_filp; 1246 struct ceph_file_info *fi = file->private_data; 1247 struct inode *inode = file_inode(file); 1248 struct ceph_inode_info *ci = ceph_inode(inode); 1249 struct ceph_osd_client *osdc = 1250 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1251 struct ceph_cap_flush *prealloc_cf; 1252 ssize_t count, written = 0; 1253 int err, want, got; 1254 loff_t pos; 1255 1256 if (ceph_snap(inode) != CEPH_NOSNAP) 1257 return -EROFS; 1258 1259 prealloc_cf = ceph_alloc_cap_flush(); 1260 if (!prealloc_cf) 1261 return -ENOMEM; 1262 1263 inode_lock(inode); 1264 1265 /* We can write back this queue in page reclaim */ 1266 current->backing_dev_info = inode_to_bdi(inode); 1267 1268 if (iocb->ki_flags & IOCB_APPEND) { 1269 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1270 if (err < 0) 1271 goto out; 1272 } 1273 1274 err = generic_write_checks(iocb, from); 1275 if (err <= 0) 1276 goto out; 1277 1278 pos = iocb->ki_pos; 1279 count = iov_iter_count(from); 1280 err = file_remove_privs(file); 1281 if (err) 1282 goto out; 1283 1284 err = file_update_time(file); 1285 if (err) 1286 goto out; 1287 1288 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1289 err = ceph_uninline_data(file, NULL); 1290 if (err < 0) 1291 goto out; 1292 } 1293 1294 retry_snap: 1295 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 1296 err = -ENOSPC; 1297 goto out; 1298 } 1299 1300 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1301 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1302 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1303 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1304 else 1305 want = CEPH_CAP_FILE_BUFFER; 1306 got = 0; 1307 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1308 &got, NULL); 1309 if (err < 0) 1310 goto out; 1311 1312 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1313 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1314 1315 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1316 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { 1317 struct ceph_snap_context *snapc; 1318 struct iov_iter data; 1319 inode_unlock(inode); 1320 1321 spin_lock(&ci->i_ceph_lock); 1322 if (__ceph_have_pending_cap_snap(ci)) { 1323 struct ceph_cap_snap *capsnap = 1324 list_last_entry(&ci->i_cap_snaps, 1325 struct ceph_cap_snap, 1326 ci_item); 1327 snapc = ceph_get_snap_context(capsnap->context); 1328 } else { 1329 BUG_ON(!ci->i_head_snapc); 1330 snapc = ceph_get_snap_context(ci->i_head_snapc); 1331 } 1332 spin_unlock(&ci->i_ceph_lock); 1333 1334 /* we might need to revert back to that point */ 1335 data = *from; 1336 if (iocb->ki_flags & IOCB_DIRECT) 1337 written = ceph_direct_read_write(iocb, &data, snapc, 1338 &prealloc_cf); 1339 else 1340 written = ceph_sync_write(iocb, &data, pos, snapc); 1341 if (written == -EOLDSNAPC) { 1342 dout("aio_write %p %llx.%llx %llu~%u" 1343 "got EOLDSNAPC, retrying\n", 1344 inode, ceph_vinop(inode), 1345 pos, (unsigned)count); 1346 inode_lock(inode); 1347 goto retry_snap; 1348 } 1349 if (written > 0) 1350 iov_iter_advance(from, written); 1351 ceph_put_snap_context(snapc); 1352 } else { 1353 loff_t old_size = i_size_read(inode); 1354 /* 1355 * No need to acquire the i_truncate_mutex. Because 1356 * the MDS revokes Fwb caps before sending truncate 1357 * message to us. We can't get Fwb cap while there 1358 * are pending vmtruncate. So write and vmtruncate 1359 * can not run at the same time 1360 */ 1361 written = generic_perform_write(file, from, pos); 1362 if (likely(written >= 0)) 1363 iocb->ki_pos = pos + written; 1364 if (i_size_read(inode) > old_size) 1365 ceph_fscache_update_objectsize(inode); 1366 inode_unlock(inode); 1367 } 1368 1369 if (written >= 0) { 1370 int dirty; 1371 spin_lock(&ci->i_ceph_lock); 1372 ci->i_inline_version = CEPH_INLINE_NONE; 1373 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1374 &prealloc_cf); 1375 spin_unlock(&ci->i_ceph_lock); 1376 if (dirty) 1377 __mark_inode_dirty(inode, dirty); 1378 } 1379 1380 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1381 inode, ceph_vinop(inode), pos, (unsigned)count, 1382 ceph_cap_string(got)); 1383 ceph_put_cap_refs(ci, got); 1384 1385 if (written >= 0 && 1386 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || 1387 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 1388 err = vfs_fsync_range(file, pos, pos + written - 1, 1); 1389 if (err < 0) 1390 written = err; 1391 } 1392 1393 goto out_unlocked; 1394 1395 out: 1396 inode_unlock(inode); 1397 out_unlocked: 1398 ceph_free_cap_flush(prealloc_cf); 1399 current->backing_dev_info = NULL; 1400 return written ? written : err; 1401 } 1402 1403 /* 1404 * llseek. be sure to verify file size on SEEK_END. 1405 */ 1406 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1407 { 1408 struct inode *inode = file->f_mapping->host; 1409 loff_t i_size; 1410 int ret; 1411 1412 inode_lock(inode); 1413 1414 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1415 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1416 if (ret < 0) { 1417 offset = ret; 1418 goto out; 1419 } 1420 } 1421 1422 i_size = i_size_read(inode); 1423 switch (whence) { 1424 case SEEK_END: 1425 offset += i_size; 1426 break; 1427 case SEEK_CUR: 1428 /* 1429 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1430 * position-querying operation. Avoid rewriting the "same" 1431 * f_pos value back to the file because a concurrent read(), 1432 * write() or lseek() might have altered it 1433 */ 1434 if (offset == 0) { 1435 offset = file->f_pos; 1436 goto out; 1437 } 1438 offset += file->f_pos; 1439 break; 1440 case SEEK_DATA: 1441 if (offset >= i_size) { 1442 ret = -ENXIO; 1443 goto out; 1444 } 1445 break; 1446 case SEEK_HOLE: 1447 if (offset >= i_size) { 1448 ret = -ENXIO; 1449 goto out; 1450 } 1451 offset = i_size; 1452 break; 1453 } 1454 1455 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1456 1457 out: 1458 inode_unlock(inode); 1459 return offset; 1460 } 1461 1462 static inline void ceph_zero_partial_page( 1463 struct inode *inode, loff_t offset, unsigned size) 1464 { 1465 struct page *page; 1466 pgoff_t index = offset >> PAGE_SHIFT; 1467 1468 page = find_lock_page(inode->i_mapping, index); 1469 if (page) { 1470 wait_on_page_writeback(page); 1471 zero_user(page, offset & (PAGE_SIZE - 1), size); 1472 unlock_page(page); 1473 put_page(page); 1474 } 1475 } 1476 1477 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1478 loff_t length) 1479 { 1480 loff_t nearly = round_up(offset, PAGE_SIZE); 1481 if (offset < nearly) { 1482 loff_t size = nearly - offset; 1483 if (length < size) 1484 size = length; 1485 ceph_zero_partial_page(inode, offset, size); 1486 offset += size; 1487 length -= size; 1488 } 1489 if (length >= PAGE_SIZE) { 1490 loff_t size = round_down(length, PAGE_SIZE); 1491 truncate_pagecache_range(inode, offset, offset + size - 1); 1492 offset += size; 1493 length -= size; 1494 } 1495 if (length) 1496 ceph_zero_partial_page(inode, offset, length); 1497 } 1498 1499 static int ceph_zero_partial_object(struct inode *inode, 1500 loff_t offset, loff_t *length) 1501 { 1502 struct ceph_inode_info *ci = ceph_inode(inode); 1503 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1504 struct ceph_osd_request *req; 1505 int ret = 0; 1506 loff_t zero = 0; 1507 int op; 1508 1509 if (!length) { 1510 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1511 length = &zero; 1512 } else { 1513 op = CEPH_OSD_OP_ZERO; 1514 } 1515 1516 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1517 ceph_vino(inode), 1518 offset, length, 1519 0, 1, op, 1520 CEPH_OSD_FLAG_WRITE | 1521 CEPH_OSD_FLAG_ONDISK, 1522 NULL, 0, 0, false); 1523 if (IS_ERR(req)) { 1524 ret = PTR_ERR(req); 1525 goto out; 1526 } 1527 1528 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, 1529 &inode->i_mtime); 1530 1531 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1532 if (!ret) { 1533 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1534 if (ret == -ENOENT) 1535 ret = 0; 1536 } 1537 ceph_osdc_put_request(req); 1538 1539 out: 1540 return ret; 1541 } 1542 1543 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1544 { 1545 int ret = 0; 1546 struct ceph_inode_info *ci = ceph_inode(inode); 1547 s32 stripe_unit = ceph_file_layout_su(ci->i_layout); 1548 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); 1549 s32 object_size = ceph_file_layout_object_size(ci->i_layout); 1550 u64 object_set_size = object_size * stripe_count; 1551 u64 nearly, t; 1552 1553 /* round offset up to next period boundary */ 1554 nearly = offset + object_set_size - 1; 1555 t = nearly; 1556 nearly -= do_div(t, object_set_size); 1557 1558 while (length && offset < nearly) { 1559 loff_t size = length; 1560 ret = ceph_zero_partial_object(inode, offset, &size); 1561 if (ret < 0) 1562 return ret; 1563 offset += size; 1564 length -= size; 1565 } 1566 while (length >= object_set_size) { 1567 int i; 1568 loff_t pos = offset; 1569 for (i = 0; i < stripe_count; ++i) { 1570 ret = ceph_zero_partial_object(inode, pos, NULL); 1571 if (ret < 0) 1572 return ret; 1573 pos += stripe_unit; 1574 } 1575 offset += object_set_size; 1576 length -= object_set_size; 1577 } 1578 while (length) { 1579 loff_t size = length; 1580 ret = ceph_zero_partial_object(inode, offset, &size); 1581 if (ret < 0) 1582 return ret; 1583 offset += size; 1584 length -= size; 1585 } 1586 return ret; 1587 } 1588 1589 static long ceph_fallocate(struct file *file, int mode, 1590 loff_t offset, loff_t length) 1591 { 1592 struct ceph_file_info *fi = file->private_data; 1593 struct inode *inode = file_inode(file); 1594 struct ceph_inode_info *ci = ceph_inode(inode); 1595 struct ceph_osd_client *osdc = 1596 &ceph_inode_to_client(inode)->client->osdc; 1597 struct ceph_cap_flush *prealloc_cf; 1598 int want, got = 0; 1599 int dirty; 1600 int ret = 0; 1601 loff_t endoff = 0; 1602 loff_t size; 1603 1604 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1605 return -EOPNOTSUPP; 1606 1607 if (!S_ISREG(inode->i_mode)) 1608 return -EOPNOTSUPP; 1609 1610 prealloc_cf = ceph_alloc_cap_flush(); 1611 if (!prealloc_cf) 1612 return -ENOMEM; 1613 1614 inode_lock(inode); 1615 1616 if (ceph_snap(inode) != CEPH_NOSNAP) { 1617 ret = -EROFS; 1618 goto unlock; 1619 } 1620 1621 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && 1622 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1623 ret = -ENOSPC; 1624 goto unlock; 1625 } 1626 1627 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1628 ret = ceph_uninline_data(file, NULL); 1629 if (ret < 0) 1630 goto unlock; 1631 } 1632 1633 size = i_size_read(inode); 1634 if (!(mode & FALLOC_FL_KEEP_SIZE)) 1635 endoff = offset + length; 1636 1637 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1638 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1639 else 1640 want = CEPH_CAP_FILE_BUFFER; 1641 1642 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1643 if (ret < 0) 1644 goto unlock; 1645 1646 if (mode & FALLOC_FL_PUNCH_HOLE) { 1647 if (offset < size) 1648 ceph_zero_pagecache_range(inode, offset, length); 1649 ret = ceph_zero_objects(inode, offset, length); 1650 } else if (endoff > size) { 1651 truncate_pagecache_range(inode, size, -1); 1652 if (ceph_inode_set_size(inode, endoff)) 1653 ceph_check_caps(ceph_inode(inode), 1654 CHECK_CAPS_AUTHONLY, NULL); 1655 } 1656 1657 if (!ret) { 1658 spin_lock(&ci->i_ceph_lock); 1659 ci->i_inline_version = CEPH_INLINE_NONE; 1660 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1661 &prealloc_cf); 1662 spin_unlock(&ci->i_ceph_lock); 1663 if (dirty) 1664 __mark_inode_dirty(inode, dirty); 1665 } 1666 1667 ceph_put_cap_refs(ci, got); 1668 unlock: 1669 inode_unlock(inode); 1670 ceph_free_cap_flush(prealloc_cf); 1671 return ret; 1672 } 1673 1674 const struct file_operations ceph_file_fops = { 1675 .open = ceph_open, 1676 .release = ceph_release, 1677 .llseek = ceph_llseek, 1678 .read_iter = ceph_read_iter, 1679 .write_iter = ceph_write_iter, 1680 .mmap = ceph_mmap, 1681 .fsync = ceph_fsync, 1682 .lock = ceph_lock, 1683 .flock = ceph_flock, 1684 .splice_read = generic_file_splice_read, 1685 .splice_write = iter_file_splice_write, 1686 .unlocked_ioctl = ceph_ioctl, 1687 .compat_ioctl = ceph_ioctl, 1688 .fallocate = ceph_fallocate, 1689 }; 1690 1691