xref: /linux/fs/ceph/file.c (revision d195c39052d1da278a00a6744ce59c383b67b191)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4 
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 
15 #include "super.h"
16 #include "mds_client.h"
17 #include "cache.h"
18 #include "io.h"
19 
20 static __le32 ceph_flags_sys2wire(u32 flags)
21 {
22 	u32 wire_flags = 0;
23 
24 	switch (flags & O_ACCMODE) {
25 	case O_RDONLY:
26 		wire_flags |= CEPH_O_RDONLY;
27 		break;
28 	case O_WRONLY:
29 		wire_flags |= CEPH_O_WRONLY;
30 		break;
31 	case O_RDWR:
32 		wire_flags |= CEPH_O_RDWR;
33 		break;
34 	}
35 
36 	flags &= ~O_ACCMODE;
37 
38 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
39 
40 	ceph_sys2wire(O_CREAT);
41 	ceph_sys2wire(O_EXCL);
42 	ceph_sys2wire(O_TRUNC);
43 	ceph_sys2wire(O_DIRECTORY);
44 	ceph_sys2wire(O_NOFOLLOW);
45 
46 #undef ceph_sys2wire
47 
48 	if (flags)
49 		dout("unused open flags: %x\n", flags);
50 
51 	return cpu_to_le32(wire_flags);
52 }
53 
54 /*
55  * Ceph file operations
56  *
57  * Implement basic open/close functionality, and implement
58  * read/write.
59  *
60  * We implement three modes of file I/O:
61  *  - buffered uses the generic_file_aio_{read,write} helpers
62  *
63  *  - synchronous is used when there is multi-client read/write
64  *    sharing, avoids the page cache, and synchronously waits for an
65  *    ack from the OSD.
66  *
67  *  - direct io takes the variant of the sync path that references
68  *    user pages directly.
69  *
70  * fsync() flushes and waits on dirty pages, but just queues metadata
71  * for writeback: since the MDS can recover size and mtime there is no
72  * need to wait for MDS acknowledgement.
73  */
74 
75 /*
76  * How many pages to get in one call to iov_iter_get_pages().  This
77  * determines the size of the on-stack array used as a buffer.
78  */
79 #define ITER_GET_BVECS_PAGES	64
80 
81 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
82 				struct bio_vec *bvecs)
83 {
84 	size_t size = 0;
85 	int bvec_idx = 0;
86 
87 	if (maxsize > iov_iter_count(iter))
88 		maxsize = iov_iter_count(iter);
89 
90 	while (size < maxsize) {
91 		struct page *pages[ITER_GET_BVECS_PAGES];
92 		ssize_t bytes;
93 		size_t start;
94 		int idx = 0;
95 
96 		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
97 					   ITER_GET_BVECS_PAGES, &start);
98 		if (bytes < 0)
99 			return size ?: bytes;
100 
101 		iov_iter_advance(iter, bytes);
102 		size += bytes;
103 
104 		for ( ; bytes; idx++, bvec_idx++) {
105 			struct bio_vec bv = {
106 				.bv_page = pages[idx],
107 				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
108 				.bv_offset = start,
109 			};
110 
111 			bvecs[bvec_idx] = bv;
112 			bytes -= bv.bv_len;
113 			start = 0;
114 		}
115 	}
116 
117 	return size;
118 }
119 
120 /*
121  * iov_iter_get_pages() only considers one iov_iter segment, no matter
122  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
123  * page.
124  *
125  * Attempt to get up to @maxsize bytes worth of pages from @iter.
126  * Return the number of bytes in the created bio_vec array, or an error.
127  */
128 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
129 				    struct bio_vec **bvecs, int *num_bvecs)
130 {
131 	struct bio_vec *bv;
132 	size_t orig_count = iov_iter_count(iter);
133 	ssize_t bytes;
134 	int npages;
135 
136 	iov_iter_truncate(iter, maxsize);
137 	npages = iov_iter_npages(iter, INT_MAX);
138 	iov_iter_reexpand(iter, orig_count);
139 
140 	/*
141 	 * __iter_get_bvecs() may populate only part of the array -- zero it
142 	 * out.
143 	 */
144 	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
145 	if (!bv)
146 		return -ENOMEM;
147 
148 	bytes = __iter_get_bvecs(iter, maxsize, bv);
149 	if (bytes < 0) {
150 		/*
151 		 * No pages were pinned -- just free the array.
152 		 */
153 		kvfree(bv);
154 		return bytes;
155 	}
156 
157 	*bvecs = bv;
158 	*num_bvecs = npages;
159 	return bytes;
160 }
161 
162 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
163 {
164 	int i;
165 
166 	for (i = 0; i < num_bvecs; i++) {
167 		if (bvecs[i].bv_page) {
168 			if (should_dirty)
169 				set_page_dirty_lock(bvecs[i].bv_page);
170 			put_page(bvecs[i].bv_page);
171 		}
172 	}
173 	kvfree(bvecs);
174 }
175 
176 /*
177  * Prepare an open request.  Preallocate ceph_cap to avoid an
178  * inopportune ENOMEM later.
179  */
180 static struct ceph_mds_request *
181 prepare_open_request(struct super_block *sb, int flags, int create_mode)
182 {
183 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
184 	struct ceph_mds_client *mdsc = fsc->mdsc;
185 	struct ceph_mds_request *req;
186 	int want_auth = USE_ANY_MDS;
187 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188 
189 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190 		want_auth = USE_AUTH_MDS;
191 
192 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
193 	if (IS_ERR(req))
194 		goto out;
195 	req->r_fmode = ceph_flags_to_mode(flags);
196 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
197 	req->r_args.open.mode = cpu_to_le32(create_mode);
198 out:
199 	return req;
200 }
201 
202 static int ceph_init_file_info(struct inode *inode, struct file *file,
203 					int fmode, bool isdir)
204 {
205 	struct ceph_inode_info *ci = ceph_inode(inode);
206 	struct ceph_file_info *fi;
207 
208 	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
209 			inode->i_mode, isdir ? "dir" : "regular");
210 	BUG_ON(inode->i_fop->release != ceph_release);
211 
212 	if (isdir) {
213 		struct ceph_dir_file_info *dfi =
214 			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
215 		if (!dfi)
216 			return -ENOMEM;
217 
218 		file->private_data = dfi;
219 		fi = &dfi->file_info;
220 		dfi->next_offset = 2;
221 		dfi->readdir_cache_idx = -1;
222 	} else {
223 		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
224 		if (!fi)
225 			return -ENOMEM;
226 
227 		file->private_data = fi;
228 	}
229 
230 	ceph_get_fmode(ci, fmode, 1);
231 	fi->fmode = fmode;
232 
233 	spin_lock_init(&fi->rw_contexts_lock);
234 	INIT_LIST_HEAD(&fi->rw_contexts);
235 	fi->meta_err = errseq_sample(&ci->i_meta_err);
236 	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
237 
238 	return 0;
239 }
240 
241 /*
242  * initialize private struct file data.
243  * if we fail, clean up by dropping fmode reference on the ceph_inode
244  */
245 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
246 {
247 	int ret = 0;
248 
249 	switch (inode->i_mode & S_IFMT) {
250 	case S_IFREG:
251 		ceph_fscache_register_inode_cookie(inode);
252 		ceph_fscache_file_set_cookie(inode, file);
253 		/* fall through */
254 	case S_IFDIR:
255 		ret = ceph_init_file_info(inode, file, fmode,
256 						S_ISDIR(inode->i_mode));
257 		if (ret)
258 			return ret;
259 		break;
260 
261 	case S_IFLNK:
262 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
263 		     inode->i_mode);
264 		break;
265 
266 	default:
267 		dout("init_file %p %p 0%o (special)\n", inode, file,
268 		     inode->i_mode);
269 		/*
270 		 * we need to drop the open ref now, since we don't
271 		 * have .release set to ceph_release.
272 		 */
273 		BUG_ON(inode->i_fop->release == ceph_release);
274 
275 		/* call the proper open fop */
276 		ret = inode->i_fop->open(inode, file);
277 	}
278 	return ret;
279 }
280 
281 /*
282  * try renew caps after session gets killed.
283  */
284 int ceph_renew_caps(struct inode *inode, int fmode)
285 {
286 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
287 	struct ceph_inode_info *ci = ceph_inode(inode);
288 	struct ceph_mds_request *req;
289 	int err, flags, wanted;
290 
291 	spin_lock(&ci->i_ceph_lock);
292 	__ceph_touch_fmode(ci, mdsc, fmode);
293 	wanted = __ceph_caps_file_wanted(ci);
294 	if (__ceph_is_any_real_caps(ci) &&
295 	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
296 		int issued = __ceph_caps_issued(ci, NULL);
297 		spin_unlock(&ci->i_ceph_lock);
298 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
299 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
300 		ceph_check_caps(ci, 0, NULL);
301 		return 0;
302 	}
303 	spin_unlock(&ci->i_ceph_lock);
304 
305 	flags = 0;
306 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
307 		flags = O_RDWR;
308 	else if (wanted & CEPH_CAP_FILE_RD)
309 		flags = O_RDONLY;
310 	else if (wanted & CEPH_CAP_FILE_WR)
311 		flags = O_WRONLY;
312 #ifdef O_LAZY
313 	if (wanted & CEPH_CAP_FILE_LAZYIO)
314 		flags |= O_LAZY;
315 #endif
316 
317 	req = prepare_open_request(inode->i_sb, flags, 0);
318 	if (IS_ERR(req)) {
319 		err = PTR_ERR(req);
320 		goto out;
321 	}
322 
323 	req->r_inode = inode;
324 	ihold(inode);
325 	req->r_num_caps = 1;
326 
327 	err = ceph_mdsc_do_request(mdsc, NULL, req);
328 	ceph_mdsc_put_request(req);
329 out:
330 	dout("renew caps %p open result=%d\n", inode, err);
331 	return err < 0 ? err : 0;
332 }
333 
334 /*
335  * If we already have the requisite capabilities, we can satisfy
336  * the open request locally (no need to request new caps from the
337  * MDS).  We do, however, need to inform the MDS (asynchronously)
338  * if our wanted caps set expands.
339  */
340 int ceph_open(struct inode *inode, struct file *file)
341 {
342 	struct ceph_inode_info *ci = ceph_inode(inode);
343 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
344 	struct ceph_mds_client *mdsc = fsc->mdsc;
345 	struct ceph_mds_request *req;
346 	struct ceph_file_info *fi = file->private_data;
347 	int err;
348 	int flags, fmode, wanted;
349 
350 	if (fi) {
351 		dout("open file %p is already opened\n", file);
352 		return 0;
353 	}
354 
355 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
356 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
357 	if (S_ISDIR(inode->i_mode))
358 		flags = O_DIRECTORY;  /* mds likes to know */
359 
360 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
361 	     ceph_vinop(inode), file, flags, file->f_flags);
362 	fmode = ceph_flags_to_mode(flags);
363 	wanted = ceph_caps_for_mode(fmode);
364 
365 	/* snapped files are read-only */
366 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
367 		return -EROFS;
368 
369 	/* trivially open snapdir */
370 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
371 		return ceph_init_file(inode, file, fmode);
372 	}
373 
374 	/*
375 	 * No need to block if we have caps on the auth MDS (for
376 	 * write) or any MDS (for read).  Update wanted set
377 	 * asynchronously.
378 	 */
379 	spin_lock(&ci->i_ceph_lock);
380 	if (__ceph_is_any_real_caps(ci) &&
381 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
382 		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
383 		int issued = __ceph_caps_issued(ci, NULL);
384 
385 		dout("open %p fmode %d want %s issued %s using existing\n",
386 		     inode, fmode, ceph_cap_string(wanted),
387 		     ceph_cap_string(issued));
388 		__ceph_touch_fmode(ci, mdsc, fmode);
389 		spin_unlock(&ci->i_ceph_lock);
390 
391 		/* adjust wanted? */
392 		if ((issued & wanted) != wanted &&
393 		    (mds_wanted & wanted) != wanted &&
394 		    ceph_snap(inode) != CEPH_SNAPDIR)
395 			ceph_check_caps(ci, 0, NULL);
396 
397 		return ceph_init_file(inode, file, fmode);
398 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
399 		   (ci->i_snap_caps & wanted) == wanted) {
400 		__ceph_touch_fmode(ci, mdsc, fmode);
401 		spin_unlock(&ci->i_ceph_lock);
402 		return ceph_init_file(inode, file, fmode);
403 	}
404 
405 	spin_unlock(&ci->i_ceph_lock);
406 
407 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
408 	req = prepare_open_request(inode->i_sb, flags, 0);
409 	if (IS_ERR(req)) {
410 		err = PTR_ERR(req);
411 		goto out;
412 	}
413 	req->r_inode = inode;
414 	ihold(inode);
415 
416 	req->r_num_caps = 1;
417 	err = ceph_mdsc_do_request(mdsc, NULL, req);
418 	if (!err)
419 		err = ceph_init_file(inode, file, req->r_fmode);
420 	ceph_mdsc_put_request(req);
421 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
422 out:
423 	return err;
424 }
425 
426 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
427 static void
428 cache_file_layout(struct inode *dst, struct inode *src)
429 {
430 	struct ceph_inode_info *cdst = ceph_inode(dst);
431 	struct ceph_inode_info *csrc = ceph_inode(src);
432 
433 	spin_lock(&cdst->i_ceph_lock);
434 	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
435 	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
436 		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
437 			sizeof(cdst->i_cached_layout));
438 		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
439 				   ceph_try_get_string(csrc->i_layout.pool_ns));
440 	}
441 	spin_unlock(&cdst->i_ceph_lock);
442 }
443 
444 /*
445  * Try to set up an async create. We need caps, a file layout, and inode number,
446  * and either a lease on the dentry or complete dir info. If any of those
447  * criteria are not satisfied, then return false and the caller can go
448  * synchronous.
449  */
450 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
451 				 struct ceph_file_layout *lo, u64 *pino)
452 {
453 	struct ceph_inode_info *ci = ceph_inode(dir);
454 	struct ceph_dentry_info *di = ceph_dentry(dentry);
455 	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
456 	u64 ino;
457 
458 	spin_lock(&ci->i_ceph_lock);
459 	/* No auth cap means no chance for Dc caps */
460 	if (!ci->i_auth_cap)
461 		goto no_async;
462 
463 	/* Any delegated inos? */
464 	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
465 		goto no_async;
466 
467 	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
468 		goto no_async;
469 
470 	if ((__ceph_caps_issued(ci, NULL) & want) != want)
471 		goto no_async;
472 
473 	if (d_in_lookup(dentry)) {
474 		if (!__ceph_dir_is_complete(ci))
475 			goto no_async;
476 		spin_lock(&dentry->d_lock);
477 		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
478 		spin_unlock(&dentry->d_lock);
479 	} else if (atomic_read(&ci->i_shared_gen) !=
480 		   READ_ONCE(di->lease_shared_gen)) {
481 		goto no_async;
482 	}
483 
484 	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
485 	if (!ino)
486 		goto no_async;
487 
488 	*pino = ino;
489 	ceph_take_cap_refs(ci, want, false);
490 	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
491 	rcu_assign_pointer(lo->pool_ns,
492 			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
493 	got = want;
494 no_async:
495 	spin_unlock(&ci->i_ceph_lock);
496 	return got;
497 }
498 
499 static void restore_deleg_ino(struct inode *dir, u64 ino)
500 {
501 	struct ceph_inode_info *ci = ceph_inode(dir);
502 	struct ceph_mds_session *s = NULL;
503 
504 	spin_lock(&ci->i_ceph_lock);
505 	if (ci->i_auth_cap)
506 		s = ceph_get_mds_session(ci->i_auth_cap->session);
507 	spin_unlock(&ci->i_ceph_lock);
508 	if (s) {
509 		int err = ceph_restore_deleg_ino(s, ino);
510 		if (err)
511 			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
512 				ino, err);
513 		ceph_put_mds_session(s);
514 	}
515 }
516 
517 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
518                                  struct ceph_mds_request *req)
519 {
520 	int result = req->r_err ? req->r_err :
521 			le32_to_cpu(req->r_reply_info.head->result);
522 
523 	if (result == -EJUKEBOX)
524 		goto out;
525 
526 	mapping_set_error(req->r_parent->i_mapping, result);
527 
528 	if (result) {
529 		struct dentry *dentry = req->r_dentry;
530 		int pathlen;
531 		u64 base;
532 		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
533 						  &base, 0);
534 
535 		ceph_dir_clear_complete(req->r_parent);
536 		if (!d_unhashed(dentry))
537 			d_drop(dentry);
538 
539 		/* FIXME: start returning I/O errors on all accesses? */
540 		pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
541 			base, IS_ERR(path) ? "<<bad>>" : path, result);
542 		ceph_mdsc_free_path(path, pathlen);
543 	}
544 
545 	if (req->r_target_inode) {
546 		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
547 		u64 ino = ceph_vino(req->r_target_inode).ino;
548 
549 		if (req->r_deleg_ino != ino)
550 			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
551 				__func__, req->r_err, req->r_deleg_ino, ino);
552 		mapping_set_error(req->r_target_inode->i_mapping, result);
553 
554 		spin_lock(&ci->i_ceph_lock);
555 		if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
556 			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
557 			wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
558 		}
559 		ceph_kick_flushing_inode_caps(req->r_session, ci);
560 		spin_unlock(&ci->i_ceph_lock);
561 	} else {
562 		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
563 			req->r_deleg_ino);
564 	}
565 out:
566 	ceph_mdsc_release_dir_caps(req);
567 }
568 
569 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
570 				    struct file *file, umode_t mode,
571 				    struct ceph_mds_request *req,
572 				    struct ceph_acl_sec_ctx *as_ctx,
573 				    struct ceph_file_layout *lo)
574 {
575 	int ret;
576 	char xattr_buf[4];
577 	struct ceph_mds_reply_inode in = { };
578 	struct ceph_mds_reply_info_in iinfo = { .in = &in };
579 	struct ceph_inode_info *ci = ceph_inode(dir);
580 	struct inode *inode;
581 	struct timespec64 now;
582 	struct ceph_vino vino = { .ino = req->r_deleg_ino,
583 				  .snap = CEPH_NOSNAP };
584 
585 	ktime_get_real_ts64(&now);
586 
587 	inode = ceph_get_inode(dentry->d_sb, vino);
588 	if (IS_ERR(inode))
589 		return PTR_ERR(inode);
590 
591 	iinfo.inline_version = CEPH_INLINE_NONE;
592 	iinfo.change_attr = 1;
593 	ceph_encode_timespec64(&iinfo.btime, &now);
594 
595 	iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
596 	iinfo.xattr_data = xattr_buf;
597 	memset(iinfo.xattr_data, 0, iinfo.xattr_len);
598 
599 	in.ino = cpu_to_le64(vino.ino);
600 	in.snapid = cpu_to_le64(CEPH_NOSNAP);
601 	in.version = cpu_to_le64(1);	// ???
602 	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
603 	in.cap.cap_id = cpu_to_le64(1);
604 	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
605 	in.cap.flags = CEPH_CAP_FLAG_AUTH;
606 	in.ctime = in.mtime = in.atime = iinfo.btime;
607 	in.mode = cpu_to_le32((u32)mode);
608 	in.truncate_seq = cpu_to_le32(1);
609 	in.truncate_size = cpu_to_le64(-1ULL);
610 	in.xattr_version = cpu_to_le64(1);
611 	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
612 	in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
613 				dir->i_gid : current_fsgid()));
614 	in.nlink = cpu_to_le32(1);
615 	in.max_size = cpu_to_le64(lo->stripe_unit);
616 
617 	ceph_file_layout_to_legacy(lo, &in.layout);
618 
619 	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
620 			      req->r_fmode, NULL);
621 	if (ret) {
622 		dout("%s failed to fill inode: %d\n", __func__, ret);
623 		ceph_dir_clear_complete(dir);
624 		if (!d_unhashed(dentry))
625 			d_drop(dentry);
626 		if (inode->i_state & I_NEW)
627 			discard_new_inode(inode);
628 	} else {
629 		struct dentry *dn;
630 
631 		dout("%s d_adding new inode 0x%llx to 0x%lx/%s\n", __func__,
632 			vino.ino, dir->i_ino, dentry->d_name.name);
633 		ceph_dir_clear_ordered(dir);
634 		ceph_init_inode_acls(inode, as_ctx);
635 		if (inode->i_state & I_NEW) {
636 			/*
637 			 * If it's not I_NEW, then someone created this before
638 			 * we got here. Assume the server is aware of it at
639 			 * that point and don't worry about setting
640 			 * CEPH_I_ASYNC_CREATE.
641 			 */
642 			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
643 			unlock_new_inode(inode);
644 		}
645 		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
646 			if (!d_unhashed(dentry))
647 				d_drop(dentry);
648 			dn = d_splice_alias(inode, dentry);
649 			WARN_ON_ONCE(dn && dn != dentry);
650 		}
651 		file->f_mode |= FMODE_CREATED;
652 		ret = finish_open(file, dentry, ceph_open);
653 	}
654 	return ret;
655 }
656 
657 /*
658  * Do a lookup + open with a single request.  If we get a non-existent
659  * file or symlink, return 1 so the VFS can retry.
660  */
661 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
662 		     struct file *file, unsigned flags, umode_t mode)
663 {
664 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
665 	struct ceph_mds_client *mdsc = fsc->mdsc;
666 	struct ceph_mds_request *req;
667 	struct dentry *dn;
668 	struct ceph_acl_sec_ctx as_ctx = {};
669 	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
670 	int mask;
671 	int err;
672 
673 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
674 	     dir, dentry, dentry,
675 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
676 
677 	if (dentry->d_name.len > NAME_MAX)
678 		return -ENAMETOOLONG;
679 
680 	if (flags & O_CREAT) {
681 		if (ceph_quota_is_max_files_exceeded(dir))
682 			return -EDQUOT;
683 		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
684 		if (err < 0)
685 			return err;
686 		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
687 		if (err < 0)
688 			goto out_ctx;
689 	} else if (!d_in_lookup(dentry)) {
690 		/* If it's not being looked up, it's negative */
691 		return -ENOENT;
692 	}
693 retry:
694 	/* do the open */
695 	req = prepare_open_request(dir->i_sb, flags, mode);
696 	if (IS_ERR(req)) {
697 		err = PTR_ERR(req);
698 		goto out_ctx;
699 	}
700 	req->r_dentry = dget(dentry);
701 	req->r_num_caps = 2;
702 	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
703 	if (ceph_security_xattr_wanted(dir))
704 		mask |= CEPH_CAP_XATTR_SHARED;
705 	req->r_args.open.mask = cpu_to_le32(mask);
706 	req->r_parent = dir;
707 
708 	if (flags & O_CREAT) {
709 		struct ceph_file_layout lo;
710 
711 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
712 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
713 		if (as_ctx.pagelist) {
714 			req->r_pagelist = as_ctx.pagelist;
715 			as_ctx.pagelist = NULL;
716 		}
717 		if (try_async &&
718 		    (req->r_dir_caps =
719 		      try_prep_async_create(dir, dentry, &lo,
720 					    &req->r_deleg_ino))) {
721 			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
722 			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
723 			req->r_callback = ceph_async_create_cb;
724 			err = ceph_mdsc_submit_request(mdsc, dir, req);
725 			if (!err) {
726 				err = ceph_finish_async_create(dir, dentry,
727 							file, mode, req,
728 							&as_ctx, &lo);
729 			} else if (err == -EJUKEBOX) {
730 				restore_deleg_ino(dir, req->r_deleg_ino);
731 				ceph_mdsc_put_request(req);
732 				try_async = false;
733 				goto retry;
734 			}
735 			goto out_req;
736 		}
737 	}
738 
739 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
740 	err = ceph_mdsc_do_request(mdsc,
741 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
742 				   req);
743 	err = ceph_handle_snapdir(req, dentry, err);
744 	if (err)
745 		goto out_req;
746 
747 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
748 		err = ceph_handle_notrace_create(dir, dentry);
749 
750 	if (d_in_lookup(dentry)) {
751 		dn = ceph_finish_lookup(req, dentry, err);
752 		if (IS_ERR(dn))
753 			err = PTR_ERR(dn);
754 	} else {
755 		/* we were given a hashed negative dentry */
756 		dn = NULL;
757 	}
758 	if (err)
759 		goto out_req;
760 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
761 		/* make vfs retry on splice, ENOENT, or symlink */
762 		dout("atomic_open finish_no_open on dn %p\n", dn);
763 		err = finish_no_open(file, dn);
764 	} else {
765 		dout("atomic_open finish_open on dn %p\n", dn);
766 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
767 			struct inode *newino = d_inode(dentry);
768 
769 			cache_file_layout(dir, newino);
770 			ceph_init_inode_acls(newino, &as_ctx);
771 			file->f_mode |= FMODE_CREATED;
772 		}
773 		err = finish_open(file, dentry, ceph_open);
774 	}
775 out_req:
776 	ceph_mdsc_put_request(req);
777 out_ctx:
778 	ceph_release_acl_sec_ctx(&as_ctx);
779 	dout("atomic_open result=%d\n", err);
780 	return err;
781 }
782 
783 int ceph_release(struct inode *inode, struct file *file)
784 {
785 	struct ceph_inode_info *ci = ceph_inode(inode);
786 
787 	if (S_ISDIR(inode->i_mode)) {
788 		struct ceph_dir_file_info *dfi = file->private_data;
789 		dout("release inode %p dir file %p\n", inode, file);
790 		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
791 
792 		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
793 
794 		if (dfi->last_readdir)
795 			ceph_mdsc_put_request(dfi->last_readdir);
796 		kfree(dfi->last_name);
797 		kfree(dfi->dir_info);
798 		kmem_cache_free(ceph_dir_file_cachep, dfi);
799 	} else {
800 		struct ceph_file_info *fi = file->private_data;
801 		dout("release inode %p regular file %p\n", inode, file);
802 		WARN_ON(!list_empty(&fi->rw_contexts));
803 
804 		ceph_put_fmode(ci, fi->fmode, 1);
805 
806 		kmem_cache_free(ceph_file_cachep, fi);
807 	}
808 
809 	/* wake up anyone waiting for caps on this inode */
810 	wake_up_all(&ci->i_cap_wq);
811 	return 0;
812 }
813 
814 enum {
815 	HAVE_RETRIED = 1,
816 	CHECK_EOF =    2,
817 	READ_INLINE =  3,
818 };
819 
820 /*
821  * Completely synchronous read and write methods.  Direct from __user
822  * buffer to osd, or directly to user pages (if O_DIRECT).
823  *
824  * If the read spans object boundary, just do multiple reads.  (That's not
825  * atomic, but good enough for now.)
826  *
827  * If we get a short result from the OSD, check against i_size; we need to
828  * only return a short read to the caller if we hit EOF.
829  */
830 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
831 			      int *retry_op)
832 {
833 	struct file *file = iocb->ki_filp;
834 	struct inode *inode = file_inode(file);
835 	struct ceph_inode_info *ci = ceph_inode(inode);
836 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
837 	struct ceph_osd_client *osdc = &fsc->client->osdc;
838 	ssize_t ret;
839 	u64 off = iocb->ki_pos;
840 	u64 len = iov_iter_count(to);
841 
842 	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
843 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
844 
845 	if (!len)
846 		return 0;
847 	/*
848 	 * flush any page cache pages in this range.  this
849 	 * will make concurrent normal and sync io slow,
850 	 * but it will at least behave sensibly when they are
851 	 * in sequence.
852 	 */
853 	ret = filemap_write_and_wait_range(inode->i_mapping,
854 					   off, off + len - 1);
855 	if (ret < 0)
856 		return ret;
857 
858 	ret = 0;
859 	while ((len = iov_iter_count(to)) > 0) {
860 		struct ceph_osd_request *req;
861 		struct page **pages;
862 		int num_pages;
863 		size_t page_off;
864 		u64 i_size;
865 		bool more;
866 
867 		req = ceph_osdc_new_request(osdc, &ci->i_layout,
868 					ci->i_vino, off, &len, 0, 1,
869 					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
870 					NULL, ci->i_truncate_seq,
871 					ci->i_truncate_size, false);
872 		if (IS_ERR(req)) {
873 			ret = PTR_ERR(req);
874 			break;
875 		}
876 
877 		more = len < iov_iter_count(to);
878 
879 		if (unlikely(iov_iter_is_pipe(to))) {
880 			ret = iov_iter_get_pages_alloc(to, &pages, len,
881 						       &page_off);
882 			if (ret <= 0) {
883 				ceph_osdc_put_request(req);
884 				ret = -ENOMEM;
885 				break;
886 			}
887 			num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
888 			if (ret < len) {
889 				len = ret;
890 				osd_req_op_extent_update(req, 0, len);
891 				more = false;
892 			}
893 		} else {
894 			num_pages = calc_pages_for(off, len);
895 			page_off = off & ~PAGE_MASK;
896 			pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
897 			if (IS_ERR(pages)) {
898 				ceph_osdc_put_request(req);
899 				ret = PTR_ERR(pages);
900 				break;
901 			}
902 		}
903 
904 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
905 						 false, false);
906 		ret = ceph_osdc_start_request(osdc, req, false);
907 		if (!ret)
908 			ret = ceph_osdc_wait_request(osdc, req);
909 		ceph_osdc_put_request(req);
910 
911 		i_size = i_size_read(inode);
912 		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
913 		     off, len, ret, i_size, (more ? " MORE" : ""));
914 
915 		if (ret == -ENOENT)
916 			ret = 0;
917 		if (ret >= 0 && ret < len && (off + ret < i_size)) {
918 			int zlen = min(len - ret, i_size - off - ret);
919 			int zoff = page_off + ret;
920 			dout("sync_read zero gap %llu~%llu\n",
921                              off + ret, off + ret + zlen);
922 			ceph_zero_page_vector_range(zoff, zlen, pages);
923 			ret += zlen;
924 		}
925 
926 		if (unlikely(iov_iter_is_pipe(to))) {
927 			if (ret > 0) {
928 				iov_iter_advance(to, ret);
929 				off += ret;
930 			} else {
931 				iov_iter_advance(to, 0);
932 			}
933 			ceph_put_page_vector(pages, num_pages, false);
934 		} else {
935 			int idx = 0;
936 			size_t left = ret > 0 ? ret : 0;
937 			while (left > 0) {
938 				size_t len, copied;
939 				page_off = off & ~PAGE_MASK;
940 				len = min_t(size_t, left, PAGE_SIZE - page_off);
941 				copied = copy_page_to_iter(pages[idx++],
942 							   page_off, len, to);
943 				off += copied;
944 				left -= copied;
945 				if (copied < len) {
946 					ret = -EFAULT;
947 					break;
948 				}
949 			}
950 			ceph_release_page_vector(pages, num_pages);
951 		}
952 
953 		if (ret < 0) {
954 			if (ret == -EBLACKLISTED)
955 				fsc->blacklisted = true;
956 			break;
957 		}
958 
959 		if (off >= i_size || !more)
960 			break;
961 	}
962 
963 	if (off > iocb->ki_pos) {
964 		if (ret >= 0 &&
965 		    iov_iter_count(to) > 0 && off >= i_size_read(inode))
966 			*retry_op = CHECK_EOF;
967 		ret = off - iocb->ki_pos;
968 		iocb->ki_pos = off;
969 	}
970 
971 	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
972 	return ret;
973 }
974 
975 struct ceph_aio_request {
976 	struct kiocb *iocb;
977 	size_t total_len;
978 	bool write;
979 	bool should_dirty;
980 	int error;
981 	struct list_head osd_reqs;
982 	unsigned num_reqs;
983 	atomic_t pending_reqs;
984 	struct timespec64 mtime;
985 	struct ceph_cap_flush *prealloc_cf;
986 };
987 
988 struct ceph_aio_work {
989 	struct work_struct work;
990 	struct ceph_osd_request *req;
991 };
992 
993 static void ceph_aio_retry_work(struct work_struct *work);
994 
995 static void ceph_aio_complete(struct inode *inode,
996 			      struct ceph_aio_request *aio_req)
997 {
998 	struct ceph_inode_info *ci = ceph_inode(inode);
999 	int ret;
1000 
1001 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
1002 		return;
1003 
1004 	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1005 		inode_dio_end(inode);
1006 
1007 	ret = aio_req->error;
1008 	if (!ret)
1009 		ret = aio_req->total_len;
1010 
1011 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
1012 
1013 	if (ret >= 0 && aio_req->write) {
1014 		int dirty;
1015 
1016 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1017 		if (endoff > i_size_read(inode)) {
1018 			if (ceph_inode_set_size(inode, endoff))
1019 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1020 		}
1021 
1022 		spin_lock(&ci->i_ceph_lock);
1023 		ci->i_inline_version = CEPH_INLINE_NONE;
1024 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1025 					       &aio_req->prealloc_cf);
1026 		spin_unlock(&ci->i_ceph_lock);
1027 		if (dirty)
1028 			__mark_inode_dirty(inode, dirty);
1029 
1030 	}
1031 
1032 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1033 						CEPH_CAP_FILE_RD));
1034 
1035 	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1036 
1037 	ceph_free_cap_flush(aio_req->prealloc_cf);
1038 	kfree(aio_req);
1039 }
1040 
1041 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1042 {
1043 	int rc = req->r_result;
1044 	struct inode *inode = req->r_inode;
1045 	struct ceph_aio_request *aio_req = req->r_priv;
1046 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1047 
1048 	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1049 	BUG_ON(!osd_data->num_bvecs);
1050 
1051 	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
1052 	     inode, rc, osd_data->bvec_pos.iter.bi_size);
1053 
1054 	if (rc == -EOLDSNAPC) {
1055 		struct ceph_aio_work *aio_work;
1056 		BUG_ON(!aio_req->write);
1057 
1058 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1059 		if (aio_work) {
1060 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1061 			aio_work->req = req;
1062 			queue_work(ceph_inode_to_client(inode)->inode_wq,
1063 				   &aio_work->work);
1064 			return;
1065 		}
1066 		rc = -ENOMEM;
1067 	} else if (!aio_req->write) {
1068 		if (rc == -ENOENT)
1069 			rc = 0;
1070 		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
1071 			struct iov_iter i;
1072 			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
1073 
1074 			/*
1075 			 * If read is satisfied by single OSD request,
1076 			 * it can pass EOF. Otherwise read is within
1077 			 * i_size.
1078 			 */
1079 			if (aio_req->num_reqs == 1) {
1080 				loff_t i_size = i_size_read(inode);
1081 				loff_t endoff = aio_req->iocb->ki_pos + rc;
1082 				if (endoff < i_size)
1083 					zlen = min_t(size_t, zlen,
1084 						     i_size - endoff);
1085 				aio_req->total_len = rc + zlen;
1086 			}
1087 
1088 			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1089 				      osd_data->num_bvecs,
1090 				      osd_data->bvec_pos.iter.bi_size);
1091 			iov_iter_advance(&i, rc);
1092 			iov_iter_zero(zlen, &i);
1093 		}
1094 	}
1095 
1096 	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1097 		  aio_req->should_dirty);
1098 	ceph_osdc_put_request(req);
1099 
1100 	if (rc < 0)
1101 		cmpxchg(&aio_req->error, 0, rc);
1102 
1103 	ceph_aio_complete(inode, aio_req);
1104 	return;
1105 }
1106 
1107 static void ceph_aio_retry_work(struct work_struct *work)
1108 {
1109 	struct ceph_aio_work *aio_work =
1110 		container_of(work, struct ceph_aio_work, work);
1111 	struct ceph_osd_request *orig_req = aio_work->req;
1112 	struct ceph_aio_request *aio_req = orig_req->r_priv;
1113 	struct inode *inode = orig_req->r_inode;
1114 	struct ceph_inode_info *ci = ceph_inode(inode);
1115 	struct ceph_snap_context *snapc;
1116 	struct ceph_osd_request *req;
1117 	int ret;
1118 
1119 	spin_lock(&ci->i_ceph_lock);
1120 	if (__ceph_have_pending_cap_snap(ci)) {
1121 		struct ceph_cap_snap *capsnap =
1122 			list_last_entry(&ci->i_cap_snaps,
1123 					struct ceph_cap_snap,
1124 					ci_item);
1125 		snapc = ceph_get_snap_context(capsnap->context);
1126 	} else {
1127 		BUG_ON(!ci->i_head_snapc);
1128 		snapc = ceph_get_snap_context(ci->i_head_snapc);
1129 	}
1130 	spin_unlock(&ci->i_ceph_lock);
1131 
1132 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1133 			false, GFP_NOFS);
1134 	if (!req) {
1135 		ret = -ENOMEM;
1136 		req = orig_req;
1137 		goto out;
1138 	}
1139 
1140 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1141 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1142 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1143 
1144 	req->r_ops[0] = orig_req->r_ops[0];
1145 
1146 	req->r_mtime = aio_req->mtime;
1147 	req->r_data_offset = req->r_ops[0].extent.offset;
1148 
1149 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1150 	if (ret) {
1151 		ceph_osdc_put_request(req);
1152 		req = orig_req;
1153 		goto out;
1154 	}
1155 
1156 	ceph_osdc_put_request(orig_req);
1157 
1158 	req->r_callback = ceph_aio_complete_req;
1159 	req->r_inode = inode;
1160 	req->r_priv = aio_req;
1161 
1162 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
1163 out:
1164 	if (ret < 0) {
1165 		req->r_result = ret;
1166 		ceph_aio_complete_req(req);
1167 	}
1168 
1169 	ceph_put_snap_context(snapc);
1170 	kfree(aio_work);
1171 }
1172 
1173 static ssize_t
1174 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1175 		       struct ceph_snap_context *snapc,
1176 		       struct ceph_cap_flush **pcf)
1177 {
1178 	struct file *file = iocb->ki_filp;
1179 	struct inode *inode = file_inode(file);
1180 	struct ceph_inode_info *ci = ceph_inode(inode);
1181 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1182 	struct ceph_vino vino;
1183 	struct ceph_osd_request *req;
1184 	struct bio_vec *bvecs;
1185 	struct ceph_aio_request *aio_req = NULL;
1186 	int num_pages = 0;
1187 	int flags;
1188 	int ret = 0;
1189 	struct timespec64 mtime = current_time(inode);
1190 	size_t count = iov_iter_count(iter);
1191 	loff_t pos = iocb->ki_pos;
1192 	bool write = iov_iter_rw(iter) == WRITE;
1193 	bool should_dirty = !write && iter_is_iovec(iter);
1194 
1195 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1196 		return -EROFS;
1197 
1198 	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1199 	     (write ? "write" : "read"), file, pos, (unsigned)count,
1200 	     snapc, snapc ? snapc->seq : 0);
1201 
1202 	if (write) {
1203 		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1204 					pos >> PAGE_SHIFT,
1205 					(pos + count - 1) >> PAGE_SHIFT);
1206 		if (ret2 < 0)
1207 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1208 
1209 		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1210 	} else {
1211 		flags = CEPH_OSD_FLAG_READ;
1212 	}
1213 
1214 	while (iov_iter_count(iter) > 0) {
1215 		u64 size = iov_iter_count(iter);
1216 		ssize_t len;
1217 
1218 		if (write)
1219 			size = min_t(u64, size, fsc->mount_options->wsize);
1220 		else
1221 			size = min_t(u64, size, fsc->mount_options->rsize);
1222 
1223 		vino = ceph_vino(inode);
1224 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1225 					    vino, pos, &size, 0,
1226 					    1,
1227 					    write ? CEPH_OSD_OP_WRITE :
1228 						    CEPH_OSD_OP_READ,
1229 					    flags, snapc,
1230 					    ci->i_truncate_seq,
1231 					    ci->i_truncate_size,
1232 					    false);
1233 		if (IS_ERR(req)) {
1234 			ret = PTR_ERR(req);
1235 			break;
1236 		}
1237 
1238 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1239 		if (len < 0) {
1240 			ceph_osdc_put_request(req);
1241 			ret = len;
1242 			break;
1243 		}
1244 		if (len != size)
1245 			osd_req_op_extent_update(req, 0, len);
1246 
1247 		/*
1248 		 * To simplify error handling, allow AIO when IO within i_size
1249 		 * or IO can be satisfied by single OSD request.
1250 		 */
1251 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1252 		    (len == count || pos + count <= i_size_read(inode))) {
1253 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1254 			if (aio_req) {
1255 				aio_req->iocb = iocb;
1256 				aio_req->write = write;
1257 				aio_req->should_dirty = should_dirty;
1258 				INIT_LIST_HEAD(&aio_req->osd_reqs);
1259 				if (write) {
1260 					aio_req->mtime = mtime;
1261 					swap(aio_req->prealloc_cf, *pcf);
1262 				}
1263 			}
1264 			/* ignore error */
1265 		}
1266 
1267 		if (write) {
1268 			/*
1269 			 * throw out any page cache pages in this range. this
1270 			 * may block.
1271 			 */
1272 			truncate_inode_pages_range(inode->i_mapping, pos,
1273 						   PAGE_ALIGN(pos + len) - 1);
1274 
1275 			req->r_mtime = mtime;
1276 		}
1277 
1278 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1279 
1280 		if (aio_req) {
1281 			aio_req->total_len += len;
1282 			aio_req->num_reqs++;
1283 			atomic_inc(&aio_req->pending_reqs);
1284 
1285 			req->r_callback = ceph_aio_complete_req;
1286 			req->r_inode = inode;
1287 			req->r_priv = aio_req;
1288 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1289 
1290 			pos += len;
1291 			continue;
1292 		}
1293 
1294 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1295 		if (!ret)
1296 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1297 
1298 		size = i_size_read(inode);
1299 		if (!write) {
1300 			if (ret == -ENOENT)
1301 				ret = 0;
1302 			if (ret >= 0 && ret < len && pos + ret < size) {
1303 				struct iov_iter i;
1304 				int zlen = min_t(size_t, len - ret,
1305 						 size - pos - ret);
1306 
1307 				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1308 				iov_iter_advance(&i, ret);
1309 				iov_iter_zero(zlen, &i);
1310 				ret += zlen;
1311 			}
1312 			if (ret >= 0)
1313 				len = ret;
1314 		}
1315 
1316 		put_bvecs(bvecs, num_pages, should_dirty);
1317 		ceph_osdc_put_request(req);
1318 		if (ret < 0)
1319 			break;
1320 
1321 		pos += len;
1322 		if (!write && pos >= size)
1323 			break;
1324 
1325 		if (write && pos > size) {
1326 			if (ceph_inode_set_size(inode, pos))
1327 				ceph_check_caps(ceph_inode(inode),
1328 						CHECK_CAPS_AUTHONLY,
1329 						NULL);
1330 		}
1331 	}
1332 
1333 	if (aio_req) {
1334 		LIST_HEAD(osd_reqs);
1335 
1336 		if (aio_req->num_reqs == 0) {
1337 			kfree(aio_req);
1338 			return ret;
1339 		}
1340 
1341 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1342 					      CEPH_CAP_FILE_RD);
1343 
1344 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1345 		inode_dio_begin(inode);
1346 		while (!list_empty(&osd_reqs)) {
1347 			req = list_first_entry(&osd_reqs,
1348 					       struct ceph_osd_request,
1349 					       r_private_item);
1350 			list_del_init(&req->r_private_item);
1351 			if (ret >= 0)
1352 				ret = ceph_osdc_start_request(req->r_osdc,
1353 							      req, false);
1354 			if (ret < 0) {
1355 				req->r_result = ret;
1356 				ceph_aio_complete_req(req);
1357 			}
1358 		}
1359 		return -EIOCBQUEUED;
1360 	}
1361 
1362 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1363 		ret = pos - iocb->ki_pos;
1364 		iocb->ki_pos = pos;
1365 	}
1366 	return ret;
1367 }
1368 
1369 /*
1370  * Synchronous write, straight from __user pointer or user pages.
1371  *
1372  * If write spans object boundary, just do multiple writes.  (For a
1373  * correct atomic write, we should e.g. take write locks on all
1374  * objects, rollback on failure, etc.)
1375  */
1376 static ssize_t
1377 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1378 		struct ceph_snap_context *snapc)
1379 {
1380 	struct file *file = iocb->ki_filp;
1381 	struct inode *inode = file_inode(file);
1382 	struct ceph_inode_info *ci = ceph_inode(inode);
1383 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1384 	struct ceph_vino vino;
1385 	struct ceph_osd_request *req;
1386 	struct page **pages;
1387 	u64 len;
1388 	int num_pages;
1389 	int written = 0;
1390 	int flags;
1391 	int ret;
1392 	bool check_caps = false;
1393 	struct timespec64 mtime = current_time(inode);
1394 	size_t count = iov_iter_count(from);
1395 
1396 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1397 		return -EROFS;
1398 
1399 	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1400 	     file, pos, (unsigned)count, snapc, snapc->seq);
1401 
1402 	ret = filemap_write_and_wait_range(inode->i_mapping,
1403 					   pos, pos + count - 1);
1404 	if (ret < 0)
1405 		return ret;
1406 
1407 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1408 					    pos >> PAGE_SHIFT,
1409 					    (pos + count - 1) >> PAGE_SHIFT);
1410 	if (ret < 0)
1411 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1412 
1413 	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1414 
1415 	while ((len = iov_iter_count(from)) > 0) {
1416 		size_t left;
1417 		int n;
1418 
1419 		vino = ceph_vino(inode);
1420 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1421 					    vino, pos, &len, 0, 1,
1422 					    CEPH_OSD_OP_WRITE, flags, snapc,
1423 					    ci->i_truncate_seq,
1424 					    ci->i_truncate_size,
1425 					    false);
1426 		if (IS_ERR(req)) {
1427 			ret = PTR_ERR(req);
1428 			break;
1429 		}
1430 
1431 		/*
1432 		 * write from beginning of first page,
1433 		 * regardless of io alignment
1434 		 */
1435 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1436 
1437 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1438 		if (IS_ERR(pages)) {
1439 			ret = PTR_ERR(pages);
1440 			goto out;
1441 		}
1442 
1443 		left = len;
1444 		for (n = 0; n < num_pages; n++) {
1445 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1446 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1447 			if (ret != plen) {
1448 				ret = -EFAULT;
1449 				break;
1450 			}
1451 			left -= ret;
1452 		}
1453 
1454 		if (ret < 0) {
1455 			ceph_release_page_vector(pages, num_pages);
1456 			goto out;
1457 		}
1458 
1459 		req->r_inode = inode;
1460 
1461 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1462 						false, true);
1463 
1464 		req->r_mtime = mtime;
1465 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1466 		if (!ret)
1467 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1468 
1469 out:
1470 		ceph_osdc_put_request(req);
1471 		if (ret != 0) {
1472 			ceph_set_error_write(ci);
1473 			break;
1474 		}
1475 
1476 		ceph_clear_error_write(ci);
1477 		pos += len;
1478 		written += len;
1479 		if (pos > i_size_read(inode)) {
1480 			check_caps = ceph_inode_set_size(inode, pos);
1481 			if (check_caps)
1482 				ceph_check_caps(ceph_inode(inode),
1483 						CHECK_CAPS_AUTHONLY,
1484 						NULL);
1485 		}
1486 
1487 	}
1488 
1489 	if (ret != -EOLDSNAPC && written > 0) {
1490 		ret = written;
1491 		iocb->ki_pos = pos;
1492 	}
1493 	return ret;
1494 }
1495 
1496 /*
1497  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1498  * Atomically grab references, so that those bits are not released
1499  * back to the MDS mid-read.
1500  *
1501  * Hmm, the sync read case isn't actually async... should it be?
1502  */
1503 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1504 {
1505 	struct file *filp = iocb->ki_filp;
1506 	struct ceph_file_info *fi = filp->private_data;
1507 	size_t len = iov_iter_count(to);
1508 	struct inode *inode = file_inode(filp);
1509 	struct ceph_inode_info *ci = ceph_inode(inode);
1510 	struct page *pinned_page = NULL;
1511 	ssize_t ret;
1512 	int want, got = 0;
1513 	int retry_op = 0, read = 0;
1514 
1515 again:
1516 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1517 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1518 
1519 	if (iocb->ki_flags & IOCB_DIRECT)
1520 		ceph_start_io_direct(inode);
1521 	else
1522 		ceph_start_io_read(inode);
1523 
1524 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1525 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1526 	else
1527 		want = CEPH_CAP_FILE_CACHE;
1528 	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1529 			    &got, &pinned_page);
1530 	if (ret < 0) {
1531 		if (iocb->ki_flags & IOCB_DIRECT)
1532 			ceph_end_io_direct(inode);
1533 		else
1534 			ceph_end_io_read(inode);
1535 		return ret;
1536 	}
1537 
1538 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1539 	    (iocb->ki_flags & IOCB_DIRECT) ||
1540 	    (fi->flags & CEPH_F_SYNC)) {
1541 
1542 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1543 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1544 		     ceph_cap_string(got));
1545 
1546 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1547 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1548 				ret = ceph_direct_read_write(iocb, to,
1549 							     NULL, NULL);
1550 				if (ret >= 0 && ret < len)
1551 					retry_op = CHECK_EOF;
1552 			} else {
1553 				ret = ceph_sync_read(iocb, to, &retry_op);
1554 			}
1555 		} else {
1556 			retry_op = READ_INLINE;
1557 		}
1558 	} else {
1559 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1560 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1561 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1562 		     ceph_cap_string(got));
1563 		ceph_add_rw_context(fi, &rw_ctx);
1564 		ret = generic_file_read_iter(iocb, to);
1565 		ceph_del_rw_context(fi, &rw_ctx);
1566 	}
1567 
1568 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1569 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1570 	if (pinned_page) {
1571 		put_page(pinned_page);
1572 		pinned_page = NULL;
1573 	}
1574 	ceph_put_cap_refs(ci, got);
1575 
1576 	if (iocb->ki_flags & IOCB_DIRECT)
1577 		ceph_end_io_direct(inode);
1578 	else
1579 		ceph_end_io_read(inode);
1580 
1581 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1582 		int statret;
1583 		struct page *page = NULL;
1584 		loff_t i_size;
1585 		if (retry_op == READ_INLINE) {
1586 			page = __page_cache_alloc(GFP_KERNEL);
1587 			if (!page)
1588 				return -ENOMEM;
1589 		}
1590 
1591 		statret = __ceph_do_getattr(inode, page,
1592 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1593 		if (statret < 0) {
1594 			if (page)
1595 				__free_page(page);
1596 			if (statret == -ENODATA) {
1597 				BUG_ON(retry_op != READ_INLINE);
1598 				goto again;
1599 			}
1600 			return statret;
1601 		}
1602 
1603 		i_size = i_size_read(inode);
1604 		if (retry_op == READ_INLINE) {
1605 			BUG_ON(ret > 0 || read > 0);
1606 			if (iocb->ki_pos < i_size &&
1607 			    iocb->ki_pos < PAGE_SIZE) {
1608 				loff_t end = min_t(loff_t, i_size,
1609 						   iocb->ki_pos + len);
1610 				end = min_t(loff_t, end, PAGE_SIZE);
1611 				if (statret < end)
1612 					zero_user_segment(page, statret, end);
1613 				ret = copy_page_to_iter(page,
1614 						iocb->ki_pos & ~PAGE_MASK,
1615 						end - iocb->ki_pos, to);
1616 				iocb->ki_pos += ret;
1617 				read += ret;
1618 			}
1619 			if (iocb->ki_pos < i_size && read < len) {
1620 				size_t zlen = min_t(size_t, len - read,
1621 						    i_size - iocb->ki_pos);
1622 				ret = iov_iter_zero(zlen, to);
1623 				iocb->ki_pos += ret;
1624 				read += ret;
1625 			}
1626 			__free_pages(page, 0);
1627 			return read;
1628 		}
1629 
1630 		/* hit EOF or hole? */
1631 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1632 		    ret < len) {
1633 			dout("sync_read hit hole, ppos %lld < size %lld"
1634 			     ", reading more\n", iocb->ki_pos, i_size);
1635 
1636 			read += ret;
1637 			len -= ret;
1638 			retry_op = HAVE_RETRIED;
1639 			goto again;
1640 		}
1641 	}
1642 
1643 	if (ret >= 0)
1644 		ret += read;
1645 
1646 	return ret;
1647 }
1648 
1649 /*
1650  * Take cap references to avoid releasing caps to MDS mid-write.
1651  *
1652  * If we are synchronous, and write with an old snap context, the OSD
1653  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1654  * dropping our cap refs and allowing the pending snap to logically
1655  * complete _before_ this write occurs.
1656  *
1657  * If we are near ENOSPC, write synchronously.
1658  */
1659 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1660 {
1661 	struct file *file = iocb->ki_filp;
1662 	struct ceph_file_info *fi = file->private_data;
1663 	struct inode *inode = file_inode(file);
1664 	struct ceph_inode_info *ci = ceph_inode(inode);
1665 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1666 	struct ceph_osd_client *osdc = &fsc->client->osdc;
1667 	struct ceph_cap_flush *prealloc_cf;
1668 	ssize_t count, written = 0;
1669 	int err, want, got;
1670 	bool direct_lock = false;
1671 	u32 map_flags;
1672 	u64 pool_flags;
1673 	loff_t pos;
1674 	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1675 
1676 	if (ceph_snap(inode) != CEPH_NOSNAP)
1677 		return -EROFS;
1678 
1679 	prealloc_cf = ceph_alloc_cap_flush();
1680 	if (!prealloc_cf)
1681 		return -ENOMEM;
1682 
1683 	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1684 		direct_lock = true;
1685 
1686 retry_snap:
1687 	if (direct_lock)
1688 		ceph_start_io_direct(inode);
1689 	else
1690 		ceph_start_io_write(inode);
1691 
1692 	/* We can write back this queue in page reclaim */
1693 	current->backing_dev_info = inode_to_bdi(inode);
1694 
1695 	if (iocb->ki_flags & IOCB_APPEND) {
1696 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1697 		if (err < 0)
1698 			goto out;
1699 	}
1700 
1701 	err = generic_write_checks(iocb, from);
1702 	if (err <= 0)
1703 		goto out;
1704 
1705 	pos = iocb->ki_pos;
1706 	if (unlikely(pos >= limit)) {
1707 		err = -EFBIG;
1708 		goto out;
1709 	} else {
1710 		iov_iter_truncate(from, limit - pos);
1711 	}
1712 
1713 	count = iov_iter_count(from);
1714 	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1715 		err = -EDQUOT;
1716 		goto out;
1717 	}
1718 
1719 	err = file_remove_privs(file);
1720 	if (err)
1721 		goto out;
1722 
1723 	err = file_update_time(file);
1724 	if (err)
1725 		goto out;
1726 
1727 	inode_inc_iversion_raw(inode);
1728 
1729 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1730 		err = ceph_uninline_data(file, NULL);
1731 		if (err < 0)
1732 			goto out;
1733 	}
1734 
1735 	down_read(&osdc->lock);
1736 	map_flags = osdc->osdmap->flags;
1737 	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1738 	up_read(&osdc->lock);
1739 	if ((map_flags & CEPH_OSDMAP_FULL) ||
1740 	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1741 		err = -ENOSPC;
1742 		goto out;
1743 	}
1744 
1745 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1746 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1747 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1748 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1749 	else
1750 		want = CEPH_CAP_FILE_BUFFER;
1751 	got = 0;
1752 	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1753 			    &got, NULL);
1754 	if (err < 0)
1755 		goto out;
1756 
1757 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1758 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1759 
1760 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1761 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1762 	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1763 		struct ceph_snap_context *snapc;
1764 		struct iov_iter data;
1765 
1766 		spin_lock(&ci->i_ceph_lock);
1767 		if (__ceph_have_pending_cap_snap(ci)) {
1768 			struct ceph_cap_snap *capsnap =
1769 					list_last_entry(&ci->i_cap_snaps,
1770 							struct ceph_cap_snap,
1771 							ci_item);
1772 			snapc = ceph_get_snap_context(capsnap->context);
1773 		} else {
1774 			BUG_ON(!ci->i_head_snapc);
1775 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1776 		}
1777 		spin_unlock(&ci->i_ceph_lock);
1778 
1779 		/* we might need to revert back to that point */
1780 		data = *from;
1781 		if (iocb->ki_flags & IOCB_DIRECT)
1782 			written = ceph_direct_read_write(iocb, &data, snapc,
1783 							 &prealloc_cf);
1784 		else
1785 			written = ceph_sync_write(iocb, &data, pos, snapc);
1786 		if (direct_lock)
1787 			ceph_end_io_direct(inode);
1788 		else
1789 			ceph_end_io_write(inode);
1790 		if (written > 0)
1791 			iov_iter_advance(from, written);
1792 		ceph_put_snap_context(snapc);
1793 	} else {
1794 		/*
1795 		 * No need to acquire the i_truncate_mutex. Because
1796 		 * the MDS revokes Fwb caps before sending truncate
1797 		 * message to us. We can't get Fwb cap while there
1798 		 * are pending vmtruncate. So write and vmtruncate
1799 		 * can not run at the same time
1800 		 */
1801 		written = generic_perform_write(file, from, pos);
1802 		if (likely(written >= 0))
1803 			iocb->ki_pos = pos + written;
1804 		ceph_end_io_write(inode);
1805 	}
1806 
1807 	if (written >= 0) {
1808 		int dirty;
1809 
1810 		spin_lock(&ci->i_ceph_lock);
1811 		ci->i_inline_version = CEPH_INLINE_NONE;
1812 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1813 					       &prealloc_cf);
1814 		spin_unlock(&ci->i_ceph_lock);
1815 		if (dirty)
1816 			__mark_inode_dirty(inode, dirty);
1817 		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1818 			ceph_check_caps(ci, 0, NULL);
1819 	}
1820 
1821 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1822 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1823 	     ceph_cap_string(got));
1824 	ceph_put_cap_refs(ci, got);
1825 
1826 	if (written == -EOLDSNAPC) {
1827 		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1828 		     inode, ceph_vinop(inode), pos, (unsigned)count);
1829 		goto retry_snap;
1830 	}
1831 
1832 	if (written >= 0) {
1833 		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1834 		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1835 			iocb->ki_flags |= IOCB_DSYNC;
1836 		written = generic_write_sync(iocb, written);
1837 	}
1838 
1839 	goto out_unlocked;
1840 out:
1841 	if (direct_lock)
1842 		ceph_end_io_direct(inode);
1843 	else
1844 		ceph_end_io_write(inode);
1845 out_unlocked:
1846 	ceph_free_cap_flush(prealloc_cf);
1847 	current->backing_dev_info = NULL;
1848 	return written ? written : err;
1849 }
1850 
1851 /*
1852  * llseek.  be sure to verify file size on SEEK_END.
1853  */
1854 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1855 {
1856 	struct inode *inode = file->f_mapping->host;
1857 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1858 	loff_t i_size;
1859 	loff_t ret;
1860 
1861 	inode_lock(inode);
1862 
1863 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1864 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1865 		if (ret < 0)
1866 			goto out;
1867 	}
1868 
1869 	i_size = i_size_read(inode);
1870 	switch (whence) {
1871 	case SEEK_END:
1872 		offset += i_size;
1873 		break;
1874 	case SEEK_CUR:
1875 		/*
1876 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1877 		 * position-querying operation.  Avoid rewriting the "same"
1878 		 * f_pos value back to the file because a concurrent read(),
1879 		 * write() or lseek() might have altered it
1880 		 */
1881 		if (offset == 0) {
1882 			ret = file->f_pos;
1883 			goto out;
1884 		}
1885 		offset += file->f_pos;
1886 		break;
1887 	case SEEK_DATA:
1888 		if (offset < 0 || offset >= i_size) {
1889 			ret = -ENXIO;
1890 			goto out;
1891 		}
1892 		break;
1893 	case SEEK_HOLE:
1894 		if (offset < 0 || offset >= i_size) {
1895 			ret = -ENXIO;
1896 			goto out;
1897 		}
1898 		offset = i_size;
1899 		break;
1900 	}
1901 
1902 	ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1903 
1904 out:
1905 	inode_unlock(inode);
1906 	return ret;
1907 }
1908 
1909 static inline void ceph_zero_partial_page(
1910 	struct inode *inode, loff_t offset, unsigned size)
1911 {
1912 	struct page *page;
1913 	pgoff_t index = offset >> PAGE_SHIFT;
1914 
1915 	page = find_lock_page(inode->i_mapping, index);
1916 	if (page) {
1917 		wait_on_page_writeback(page);
1918 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1919 		unlock_page(page);
1920 		put_page(page);
1921 	}
1922 }
1923 
1924 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1925 				      loff_t length)
1926 {
1927 	loff_t nearly = round_up(offset, PAGE_SIZE);
1928 	if (offset < nearly) {
1929 		loff_t size = nearly - offset;
1930 		if (length < size)
1931 			size = length;
1932 		ceph_zero_partial_page(inode, offset, size);
1933 		offset += size;
1934 		length -= size;
1935 	}
1936 	if (length >= PAGE_SIZE) {
1937 		loff_t size = round_down(length, PAGE_SIZE);
1938 		truncate_pagecache_range(inode, offset, offset + size - 1);
1939 		offset += size;
1940 		length -= size;
1941 	}
1942 	if (length)
1943 		ceph_zero_partial_page(inode, offset, length);
1944 }
1945 
1946 static int ceph_zero_partial_object(struct inode *inode,
1947 				    loff_t offset, loff_t *length)
1948 {
1949 	struct ceph_inode_info *ci = ceph_inode(inode);
1950 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1951 	struct ceph_osd_request *req;
1952 	int ret = 0;
1953 	loff_t zero = 0;
1954 	int op;
1955 
1956 	if (!length) {
1957 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1958 		length = &zero;
1959 	} else {
1960 		op = CEPH_OSD_OP_ZERO;
1961 	}
1962 
1963 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1964 					ceph_vino(inode),
1965 					offset, length,
1966 					0, 1, op,
1967 					CEPH_OSD_FLAG_WRITE,
1968 					NULL, 0, 0, false);
1969 	if (IS_ERR(req)) {
1970 		ret = PTR_ERR(req);
1971 		goto out;
1972 	}
1973 
1974 	req->r_mtime = inode->i_mtime;
1975 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1976 	if (!ret) {
1977 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1978 		if (ret == -ENOENT)
1979 			ret = 0;
1980 	}
1981 	ceph_osdc_put_request(req);
1982 
1983 out:
1984 	return ret;
1985 }
1986 
1987 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1988 {
1989 	int ret = 0;
1990 	struct ceph_inode_info *ci = ceph_inode(inode);
1991 	s32 stripe_unit = ci->i_layout.stripe_unit;
1992 	s32 stripe_count = ci->i_layout.stripe_count;
1993 	s32 object_size = ci->i_layout.object_size;
1994 	u64 object_set_size = object_size * stripe_count;
1995 	u64 nearly, t;
1996 
1997 	/* round offset up to next period boundary */
1998 	nearly = offset + object_set_size - 1;
1999 	t = nearly;
2000 	nearly -= do_div(t, object_set_size);
2001 
2002 	while (length && offset < nearly) {
2003 		loff_t size = length;
2004 		ret = ceph_zero_partial_object(inode, offset, &size);
2005 		if (ret < 0)
2006 			return ret;
2007 		offset += size;
2008 		length -= size;
2009 	}
2010 	while (length >= object_set_size) {
2011 		int i;
2012 		loff_t pos = offset;
2013 		for (i = 0; i < stripe_count; ++i) {
2014 			ret = ceph_zero_partial_object(inode, pos, NULL);
2015 			if (ret < 0)
2016 				return ret;
2017 			pos += stripe_unit;
2018 		}
2019 		offset += object_set_size;
2020 		length -= object_set_size;
2021 	}
2022 	while (length) {
2023 		loff_t size = length;
2024 		ret = ceph_zero_partial_object(inode, offset, &size);
2025 		if (ret < 0)
2026 			return ret;
2027 		offset += size;
2028 		length -= size;
2029 	}
2030 	return ret;
2031 }
2032 
2033 static long ceph_fallocate(struct file *file, int mode,
2034 				loff_t offset, loff_t length)
2035 {
2036 	struct ceph_file_info *fi = file->private_data;
2037 	struct inode *inode = file_inode(file);
2038 	struct ceph_inode_info *ci = ceph_inode(inode);
2039 	struct ceph_cap_flush *prealloc_cf;
2040 	int want, got = 0;
2041 	int dirty;
2042 	int ret = 0;
2043 	loff_t endoff = 0;
2044 	loff_t size;
2045 
2046 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2047 		return -EOPNOTSUPP;
2048 
2049 	if (!S_ISREG(inode->i_mode))
2050 		return -EOPNOTSUPP;
2051 
2052 	prealloc_cf = ceph_alloc_cap_flush();
2053 	if (!prealloc_cf)
2054 		return -ENOMEM;
2055 
2056 	inode_lock(inode);
2057 
2058 	if (ceph_snap(inode) != CEPH_NOSNAP) {
2059 		ret = -EROFS;
2060 		goto unlock;
2061 	}
2062 
2063 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
2064 		ret = ceph_uninline_data(file, NULL);
2065 		if (ret < 0)
2066 			goto unlock;
2067 	}
2068 
2069 	size = i_size_read(inode);
2070 
2071 	/* Are we punching a hole beyond EOF? */
2072 	if (offset >= size)
2073 		goto unlock;
2074 	if ((offset + length) > size)
2075 		length = size - offset;
2076 
2077 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2078 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2079 	else
2080 		want = CEPH_CAP_FILE_BUFFER;
2081 
2082 	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
2083 	if (ret < 0)
2084 		goto unlock;
2085 
2086 	ceph_zero_pagecache_range(inode, offset, length);
2087 	ret = ceph_zero_objects(inode, offset, length);
2088 
2089 	if (!ret) {
2090 		spin_lock(&ci->i_ceph_lock);
2091 		ci->i_inline_version = CEPH_INLINE_NONE;
2092 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2093 					       &prealloc_cf);
2094 		spin_unlock(&ci->i_ceph_lock);
2095 		if (dirty)
2096 			__mark_inode_dirty(inode, dirty);
2097 	}
2098 
2099 	ceph_put_cap_refs(ci, got);
2100 unlock:
2101 	inode_unlock(inode);
2102 	ceph_free_cap_flush(prealloc_cf);
2103 	return ret;
2104 }
2105 
2106 /*
2107  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2108  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2109  * this fails; zero is returned on success.
2110  */
2111 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2112 			  struct file *dst_filp,
2113 			  loff_t dst_endoff, int *dst_got)
2114 {
2115 	int ret = 0;
2116 	bool retrying = false;
2117 
2118 retry_caps:
2119 	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2120 			    dst_endoff, dst_got, NULL);
2121 	if (ret < 0)
2122 		return ret;
2123 
2124 	/*
2125 	 * Since we're already holding the FILE_WR capability for the dst file,
2126 	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2127 	 * retry dance instead to try to get both capabilities.
2128 	 */
2129 	ret = ceph_try_get_caps(file_inode(src_filp),
2130 				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2131 				false, src_got);
2132 	if (ret <= 0) {
2133 		/* Start by dropping dst_ci caps and getting src_ci caps */
2134 		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2135 		if (retrying) {
2136 			if (!ret)
2137 				/* ceph_try_get_caps masks EAGAIN */
2138 				ret = -EAGAIN;
2139 			return ret;
2140 		}
2141 		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2142 				    CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
2143 		if (ret < 0)
2144 			return ret;
2145 		/*... drop src_ci caps too, and retry */
2146 		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2147 		retrying = true;
2148 		goto retry_caps;
2149 	}
2150 	return ret;
2151 }
2152 
2153 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2154 			   struct ceph_inode_info *dst_ci, int dst_got)
2155 {
2156 	ceph_put_cap_refs(src_ci, src_got);
2157 	ceph_put_cap_refs(dst_ci, dst_got);
2158 }
2159 
2160 /*
2161  * This function does several size-related checks, returning an error if:
2162  *  - source file is smaller than off+len
2163  *  - destination file size is not OK (inode_newsize_ok())
2164  *  - max bytes quotas is exceeded
2165  */
2166 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2167 			   loff_t src_off, loff_t dst_off, size_t len)
2168 {
2169 	loff_t size, endoff;
2170 
2171 	size = i_size_read(src_inode);
2172 	/*
2173 	 * Don't copy beyond source file EOF.  Instead of simply setting length
2174 	 * to (size - src_off), just drop to VFS default implementation, as the
2175 	 * local i_size may be stale due to other clients writing to the source
2176 	 * inode.
2177 	 */
2178 	if (src_off + len > size) {
2179 		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2180 		     src_off, len, size);
2181 		return -EOPNOTSUPP;
2182 	}
2183 	size = i_size_read(dst_inode);
2184 
2185 	endoff = dst_off + len;
2186 	if (inode_newsize_ok(dst_inode, endoff))
2187 		return -EOPNOTSUPP;
2188 
2189 	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2190 		return -EDQUOT;
2191 
2192 	return 0;
2193 }
2194 
2195 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2196 				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2197 				    struct ceph_fs_client *fsc,
2198 				    size_t len, unsigned int flags)
2199 {
2200 	struct ceph_object_locator src_oloc, dst_oloc;
2201 	struct ceph_object_id src_oid, dst_oid;
2202 	size_t bytes = 0;
2203 	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2204 	u32 src_objlen, dst_objlen;
2205 	u32 object_size = src_ci->i_layout.object_size;
2206 	int ret;
2207 
2208 	src_oloc.pool = src_ci->i_layout.pool_id;
2209 	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2210 	dst_oloc.pool = dst_ci->i_layout.pool_id;
2211 	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2212 
2213 	while (len >= object_size) {
2214 		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2215 					      object_size, &src_objnum,
2216 					      &src_objoff, &src_objlen);
2217 		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2218 					      object_size, &dst_objnum,
2219 					      &dst_objoff, &dst_objlen);
2220 		ceph_oid_init(&src_oid);
2221 		ceph_oid_printf(&src_oid, "%llx.%08llx",
2222 				src_ci->i_vino.ino, src_objnum);
2223 		ceph_oid_init(&dst_oid);
2224 		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2225 				dst_ci->i_vino.ino, dst_objnum);
2226 		/* Do an object remote copy */
2227 		ret = ceph_osdc_copy_from(&fsc->client->osdc,
2228 					  src_ci->i_vino.snap, 0,
2229 					  &src_oid, &src_oloc,
2230 					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2231 					  CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2232 					  &dst_oid, &dst_oloc,
2233 					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2234 					  CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2235 					  dst_ci->i_truncate_seq,
2236 					  dst_ci->i_truncate_size,
2237 					  CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2238 		if (ret) {
2239 			if (ret == -EOPNOTSUPP) {
2240 				fsc->have_copy_from2 = false;
2241 				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2242 			}
2243 			dout("ceph_osdc_copy_from returned %d\n", ret);
2244 			if (!bytes)
2245 				bytes = ret;
2246 			goto out;
2247 		}
2248 		len -= object_size;
2249 		bytes += object_size;
2250 		*src_off += object_size;
2251 		*dst_off += object_size;
2252 	}
2253 
2254 out:
2255 	ceph_oloc_destroy(&src_oloc);
2256 	ceph_oloc_destroy(&dst_oloc);
2257 	return bytes;
2258 }
2259 
2260 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2261 				      struct file *dst_file, loff_t dst_off,
2262 				      size_t len, unsigned int flags)
2263 {
2264 	struct inode *src_inode = file_inode(src_file);
2265 	struct inode *dst_inode = file_inode(dst_file);
2266 	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2267 	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2268 	struct ceph_cap_flush *prealloc_cf;
2269 	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2270 	loff_t size;
2271 	ssize_t ret = -EIO, bytes;
2272 	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2273 	u32 src_objlen, dst_objlen;
2274 	int src_got = 0, dst_got = 0, err, dirty;
2275 
2276 	if (src_inode->i_sb != dst_inode->i_sb) {
2277 		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2278 
2279 		if (ceph_fsid_compare(&src_fsc->client->fsid,
2280 				      &dst_fsc->client->fsid)) {
2281 			dout("Copying files across clusters: src: %pU dst: %pU\n",
2282 			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2283 			return -EXDEV;
2284 		}
2285 	}
2286 	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2287 		return -EROFS;
2288 
2289 	/*
2290 	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2291 	 * fallback to the default VFS copy_file_range implementation.  This is
2292 	 * desirable in several cases (for ex, the 'len' is smaller than the
2293 	 * size of the objects, or in cases where that would be more
2294 	 * efficient).
2295 	 */
2296 
2297 	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2298 		return -EOPNOTSUPP;
2299 
2300 	if (!src_fsc->have_copy_from2)
2301 		return -EOPNOTSUPP;
2302 
2303 	/*
2304 	 * Striped file layouts require that we copy partial objects, but the
2305 	 * OSD copy-from operation only supports full-object copies.  Limit
2306 	 * this to non-striped file layouts for now.
2307 	 */
2308 	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2309 	    (src_ci->i_layout.stripe_count != 1) ||
2310 	    (dst_ci->i_layout.stripe_count != 1) ||
2311 	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2312 		dout("Invalid src/dst files layout\n");
2313 		return -EOPNOTSUPP;
2314 	}
2315 
2316 	if (len < src_ci->i_layout.object_size)
2317 		return -EOPNOTSUPP; /* no remote copy will be done */
2318 
2319 	prealloc_cf = ceph_alloc_cap_flush();
2320 	if (!prealloc_cf)
2321 		return -ENOMEM;
2322 
2323 	/* Start by sync'ing the source and destination files */
2324 	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2325 	if (ret < 0) {
2326 		dout("failed to write src file (%zd)\n", ret);
2327 		goto out;
2328 	}
2329 	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2330 	if (ret < 0) {
2331 		dout("failed to write dst file (%zd)\n", ret);
2332 		goto out;
2333 	}
2334 
2335 	/*
2336 	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2337 	 * clients may have dirty data in their caches.  And OSDs know nothing
2338 	 * about caps, so they can't safely do the remote object copies.
2339 	 */
2340 	err = get_rd_wr_caps(src_file, &src_got,
2341 			     dst_file, (dst_off + len), &dst_got);
2342 	if (err < 0) {
2343 		dout("get_rd_wr_caps returned %d\n", err);
2344 		ret = -EOPNOTSUPP;
2345 		goto out;
2346 	}
2347 
2348 	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2349 	if (ret < 0)
2350 		goto out_caps;
2351 
2352 	/* Drop dst file cached pages */
2353 	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2354 					    dst_off >> PAGE_SHIFT,
2355 					    (dst_off + len) >> PAGE_SHIFT);
2356 	if (ret < 0) {
2357 		dout("Failed to invalidate inode pages (%zd)\n", ret);
2358 		ret = 0; /* XXX */
2359 	}
2360 	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2361 				      src_ci->i_layout.object_size,
2362 				      &src_objnum, &src_objoff, &src_objlen);
2363 	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2364 				      dst_ci->i_layout.object_size,
2365 				      &dst_objnum, &dst_objoff, &dst_objlen);
2366 	/* object-level offsets need to the same */
2367 	if (src_objoff != dst_objoff) {
2368 		ret = -EOPNOTSUPP;
2369 		goto out_caps;
2370 	}
2371 
2372 	/*
2373 	 * Do a manual copy if the object offset isn't object aligned.
2374 	 * 'src_objlen' contains the bytes left until the end of the object,
2375 	 * starting at the src_off
2376 	 */
2377 	if (src_objoff) {
2378 		dout("Initial partial copy of %u bytes\n", src_objlen);
2379 
2380 		/*
2381 		 * we need to temporarily drop all caps as we'll be calling
2382 		 * {read,write}_iter, which will get caps again.
2383 		 */
2384 		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2385 		ret = do_splice_direct(src_file, &src_off, dst_file,
2386 				       &dst_off, src_objlen, flags);
2387 		/* Abort on short copies or on error */
2388 		if (ret < src_objlen) {
2389 			dout("Failed partial copy (%zd)\n", ret);
2390 			goto out;
2391 		}
2392 		len -= ret;
2393 		err = get_rd_wr_caps(src_file, &src_got,
2394 				     dst_file, (dst_off + len), &dst_got);
2395 		if (err < 0)
2396 			goto out;
2397 		err = is_file_size_ok(src_inode, dst_inode,
2398 				      src_off, dst_off, len);
2399 		if (err < 0)
2400 			goto out_caps;
2401 	}
2402 
2403 	size = i_size_read(dst_inode);
2404 	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2405 				     src_fsc, len, flags);
2406 	if (bytes <= 0) {
2407 		if (!ret)
2408 			ret = bytes;
2409 		goto out_caps;
2410 	}
2411 	dout("Copied %zu bytes out of %zu\n", bytes, len);
2412 	len -= bytes;
2413 	ret += bytes;
2414 
2415 	file_update_time(dst_file);
2416 	inode_inc_iversion_raw(dst_inode);
2417 
2418 	if (dst_off > size) {
2419 		/* Let the MDS know about dst file size change */
2420 		if (ceph_inode_set_size(dst_inode, dst_off) ||
2421 		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2422 			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2423 	}
2424 	/* Mark Fw dirty */
2425 	spin_lock(&dst_ci->i_ceph_lock);
2426 	dst_ci->i_inline_version = CEPH_INLINE_NONE;
2427 	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2428 	spin_unlock(&dst_ci->i_ceph_lock);
2429 	if (dirty)
2430 		__mark_inode_dirty(dst_inode, dirty);
2431 
2432 out_caps:
2433 	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2434 
2435 	/*
2436 	 * Do the final manual copy if we still have some bytes left, unless
2437 	 * there were errors in remote object copies (len >= object_size).
2438 	 */
2439 	if (len && (len < src_ci->i_layout.object_size)) {
2440 		dout("Final partial copy of %zu bytes\n", len);
2441 		bytes = do_splice_direct(src_file, &src_off, dst_file,
2442 					 &dst_off, len, flags);
2443 		if (bytes > 0)
2444 			ret += bytes;
2445 		else
2446 			dout("Failed partial copy (%zd)\n", bytes);
2447 	}
2448 
2449 out:
2450 	ceph_free_cap_flush(prealloc_cf);
2451 
2452 	return ret;
2453 }
2454 
2455 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2456 				    struct file *dst_file, loff_t dst_off,
2457 				    size_t len, unsigned int flags)
2458 {
2459 	ssize_t ret;
2460 
2461 	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2462 				     len, flags);
2463 
2464 	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2465 		ret = generic_copy_file_range(src_file, src_off, dst_file,
2466 					      dst_off, len, flags);
2467 	return ret;
2468 }
2469 
2470 const struct file_operations ceph_file_fops = {
2471 	.open = ceph_open,
2472 	.release = ceph_release,
2473 	.llseek = ceph_llseek,
2474 	.read_iter = ceph_read_iter,
2475 	.write_iter = ceph_write_iter,
2476 	.mmap = ceph_mmap,
2477 	.fsync = ceph_fsync,
2478 	.lock = ceph_lock,
2479 	.flock = ceph_flock,
2480 	.splice_read = generic_file_splice_read,
2481 	.splice_write = iter_file_splice_write,
2482 	.unlocked_ioctl = ceph_ioctl,
2483 	.compat_ioctl = compat_ptr_ioctl,
2484 	.fallocate	= ceph_fallocate,
2485 	.copy_file_range = ceph_copy_file_range,
2486 };
2487