xref: /linux/fs/ceph/file.c (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
11 
12 #include "super.h"
13 #include "mds_client.h"
14 #include "cache.h"
15 
16 /*
17  * Ceph file operations
18  *
19  * Implement basic open/close functionality, and implement
20  * read/write.
21  *
22  * We implement three modes of file I/O:
23  *  - buffered uses the generic_file_aio_{read,write} helpers
24  *
25  *  - synchronous is used when there is multi-client read/write
26  *    sharing, avoids the page cache, and synchronously waits for an
27  *    ack from the OSD.
28  *
29  *  - direct io takes the variant of the sync path that references
30  *    user pages directly.
31  *
32  * fsync() flushes and waits on dirty pages, but just queues metadata
33  * for writeback: since the MDS can recover size and mtime there is no
34  * need to wait for MDS acknowledgement.
35  */
36 
37 /*
38  * Calculate the length sum of direct io vectors that can
39  * be combined into one page vector.
40  */
41 static size_t dio_get_pagev_size(const struct iov_iter *it)
42 {
43     const struct iovec *iov = it->iov;
44     const struct iovec *iovend = iov + it->nr_segs;
45     size_t size;
46 
47     size = iov->iov_len - it->iov_offset;
48     /*
49      * An iov can be page vectored when both the current tail
50      * and the next base are page aligned.
51      */
52     while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53            (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54         size += iov->iov_len;
55     }
56     dout("dio_get_pagevlen len = %zu\n", size);
57     return size;
58 }
59 
60 /*
61  * Allocate a page vector based on (@it, @nbytes).
62  * The return value is the tuple describing a page vector,
63  * that is (@pages, @page_align, @num_pages).
64  */
65 static struct page **
66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 		    size_t *page_align, int *num_pages)
68 {
69 	struct iov_iter tmp_it = *it;
70 	size_t align;
71 	struct page **pages;
72 	int ret = 0, idx, npages;
73 
74 	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 		(PAGE_SIZE - 1);
76 	npages = calc_pages_for(align, nbytes);
77 	pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 	if (!pages) {
79 		pages = vmalloc(sizeof(*pages) * npages);
80 		if (!pages)
81 			return ERR_PTR(-ENOMEM);
82 	}
83 
84 	for (idx = 0; idx < npages; ) {
85 		size_t start;
86 		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 					 npages - idx, &start);
88 		if (ret < 0)
89 			goto fail;
90 
91 		iov_iter_advance(&tmp_it, ret);
92 		nbytes -= ret;
93 		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 	}
95 
96 	BUG_ON(nbytes != 0);
97 	*num_pages = npages;
98 	*page_align = align;
99 	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 	return pages;
101 fail:
102 	ceph_put_page_vector(pages, idx, false);
103 	return ERR_PTR(ret);
104 }
105 
106 /*
107  * Prepare an open request.  Preallocate ceph_cap to avoid an
108  * inopportune ENOMEM later.
109  */
110 static struct ceph_mds_request *
111 prepare_open_request(struct super_block *sb, int flags, int create_mode)
112 {
113 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 	struct ceph_mds_client *mdsc = fsc->mdsc;
115 	struct ceph_mds_request *req;
116 	int want_auth = USE_ANY_MDS;
117 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118 
119 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 		want_auth = USE_AUTH_MDS;
121 
122 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 	if (IS_ERR(req))
124 		goto out;
125 	req->r_fmode = ceph_flags_to_mode(flags);
126 	req->r_args.open.flags = cpu_to_le32(flags);
127 	req->r_args.open.mode = cpu_to_le32(create_mode);
128 out:
129 	return req;
130 }
131 
132 /*
133  * initialize private struct file data.
134  * if we fail, clean up by dropping fmode reference on the ceph_inode
135  */
136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137 {
138 	struct ceph_file_info *cf;
139 	int ret = 0;
140 
141 	switch (inode->i_mode & S_IFMT) {
142 	case S_IFREG:
143 		ceph_fscache_register_inode_cookie(inode);
144 		ceph_fscache_file_set_cookie(inode, file);
145 	case S_IFDIR:
146 		dout("init_file %p %p 0%o (regular)\n", inode, file,
147 		     inode->i_mode);
148 		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
149 		if (cf == NULL) {
150 			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
151 			return -ENOMEM;
152 		}
153 		cf->fmode = fmode;
154 		cf->next_offset = 2;
155 		cf->readdir_cache_idx = -1;
156 		file->private_data = cf;
157 		BUG_ON(inode->i_fop->release != ceph_release);
158 		break;
159 
160 	case S_IFLNK:
161 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
162 		     inode->i_mode);
163 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
164 		break;
165 
166 	default:
167 		dout("init_file %p %p 0%o (special)\n", inode, file,
168 		     inode->i_mode);
169 		/*
170 		 * we need to drop the open ref now, since we don't
171 		 * have .release set to ceph_release.
172 		 */
173 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
174 		BUG_ON(inode->i_fop->release == ceph_release);
175 
176 		/* call the proper open fop */
177 		ret = inode->i_fop->open(inode, file);
178 	}
179 	return ret;
180 }
181 
182 /*
183  * try renew caps after session gets killed.
184  */
185 int ceph_renew_caps(struct inode *inode)
186 {
187 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
188 	struct ceph_inode_info *ci = ceph_inode(inode);
189 	struct ceph_mds_request *req;
190 	int err, flags, wanted;
191 
192 	spin_lock(&ci->i_ceph_lock);
193 	wanted = __ceph_caps_file_wanted(ci);
194 	if (__ceph_is_any_real_caps(ci) &&
195 	    (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
196 		int issued = __ceph_caps_issued(ci, NULL);
197 		spin_unlock(&ci->i_ceph_lock);
198 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
199 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
200 		ceph_check_caps(ci, 0, NULL);
201 		return 0;
202 	}
203 	spin_unlock(&ci->i_ceph_lock);
204 
205 	flags = 0;
206 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
207 		flags = O_RDWR;
208 	else if (wanted & CEPH_CAP_FILE_RD)
209 		flags = O_RDONLY;
210 	else if (wanted & CEPH_CAP_FILE_WR)
211 		flags = O_WRONLY;
212 #ifdef O_LAZY
213 	if (wanted & CEPH_CAP_FILE_LAZYIO)
214 		flags |= O_LAZY;
215 #endif
216 
217 	req = prepare_open_request(inode->i_sb, flags, 0);
218 	if (IS_ERR(req)) {
219 		err = PTR_ERR(req);
220 		goto out;
221 	}
222 
223 	req->r_inode = inode;
224 	ihold(inode);
225 	req->r_num_caps = 1;
226 	req->r_fmode = -1;
227 
228 	err = ceph_mdsc_do_request(mdsc, NULL, req);
229 	ceph_mdsc_put_request(req);
230 out:
231 	dout("renew caps %p open result=%d\n", inode, err);
232 	return err < 0 ? err : 0;
233 }
234 
235 /*
236  * If we already have the requisite capabilities, we can satisfy
237  * the open request locally (no need to request new caps from the
238  * MDS).  We do, however, need to inform the MDS (asynchronously)
239  * if our wanted caps set expands.
240  */
241 int ceph_open(struct inode *inode, struct file *file)
242 {
243 	struct ceph_inode_info *ci = ceph_inode(inode);
244 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
245 	struct ceph_mds_client *mdsc = fsc->mdsc;
246 	struct ceph_mds_request *req;
247 	struct ceph_file_info *cf = file->private_data;
248 	int err;
249 	int flags, fmode, wanted;
250 
251 	if (cf) {
252 		dout("open file %p is already opened\n", file);
253 		return 0;
254 	}
255 
256 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
257 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
258 	if (S_ISDIR(inode->i_mode))
259 		flags = O_DIRECTORY;  /* mds likes to know */
260 
261 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
262 	     ceph_vinop(inode), file, flags, file->f_flags);
263 	fmode = ceph_flags_to_mode(flags);
264 	wanted = ceph_caps_for_mode(fmode);
265 
266 	/* snapped files are read-only */
267 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
268 		return -EROFS;
269 
270 	/* trivially open snapdir */
271 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
272 		spin_lock(&ci->i_ceph_lock);
273 		__ceph_get_fmode(ci, fmode);
274 		spin_unlock(&ci->i_ceph_lock);
275 		return ceph_init_file(inode, file, fmode);
276 	}
277 
278 	/*
279 	 * No need to block if we have caps on the auth MDS (for
280 	 * write) or any MDS (for read).  Update wanted set
281 	 * asynchronously.
282 	 */
283 	spin_lock(&ci->i_ceph_lock);
284 	if (__ceph_is_any_real_caps(ci) &&
285 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
286 		int mds_wanted = __ceph_caps_mds_wanted(ci);
287 		int issued = __ceph_caps_issued(ci, NULL);
288 
289 		dout("open %p fmode %d want %s issued %s using existing\n",
290 		     inode, fmode, ceph_cap_string(wanted),
291 		     ceph_cap_string(issued));
292 		__ceph_get_fmode(ci, fmode);
293 		spin_unlock(&ci->i_ceph_lock);
294 
295 		/* adjust wanted? */
296 		if ((issued & wanted) != wanted &&
297 		    (mds_wanted & wanted) != wanted &&
298 		    ceph_snap(inode) != CEPH_SNAPDIR)
299 			ceph_check_caps(ci, 0, NULL);
300 
301 		return ceph_init_file(inode, file, fmode);
302 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
303 		   (ci->i_snap_caps & wanted) == wanted) {
304 		__ceph_get_fmode(ci, fmode);
305 		spin_unlock(&ci->i_ceph_lock);
306 		return ceph_init_file(inode, file, fmode);
307 	}
308 
309 	spin_unlock(&ci->i_ceph_lock);
310 
311 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
312 	req = prepare_open_request(inode->i_sb, flags, 0);
313 	if (IS_ERR(req)) {
314 		err = PTR_ERR(req);
315 		goto out;
316 	}
317 	req->r_inode = inode;
318 	ihold(inode);
319 
320 	req->r_num_caps = 1;
321 	err = ceph_mdsc_do_request(mdsc, NULL, req);
322 	if (!err)
323 		err = ceph_init_file(inode, file, req->r_fmode);
324 	ceph_mdsc_put_request(req);
325 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
326 out:
327 	return err;
328 }
329 
330 
331 /*
332  * Do a lookup + open with a single request.  If we get a non-existent
333  * file or symlink, return 1 so the VFS can retry.
334  */
335 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
336 		     struct file *file, unsigned flags, umode_t mode,
337 		     int *opened)
338 {
339 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
340 	struct ceph_mds_client *mdsc = fsc->mdsc;
341 	struct ceph_mds_request *req;
342 	struct dentry *dn;
343 	struct ceph_acls_info acls = {};
344        int mask;
345 	int err;
346 
347 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
348 	     dir, dentry, dentry,
349 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
350 
351 	if (dentry->d_name.len > NAME_MAX)
352 		return -ENAMETOOLONG;
353 
354 	if (flags & O_CREAT) {
355 		err = ceph_pre_init_acls(dir, &mode, &acls);
356 		if (err < 0)
357 			return err;
358 	}
359 
360 	/* do the open */
361 	req = prepare_open_request(dir->i_sb, flags, mode);
362 	if (IS_ERR(req)) {
363 		err = PTR_ERR(req);
364 		goto out_acl;
365 	}
366 	req->r_dentry = dget(dentry);
367 	req->r_num_caps = 2;
368 	if (flags & O_CREAT) {
369 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
370 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
371 		if (acls.pagelist) {
372 			req->r_pagelist = acls.pagelist;
373 			acls.pagelist = NULL;
374 		}
375 	}
376 
377        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
378        if (ceph_security_xattr_wanted(dir))
379                mask |= CEPH_CAP_XATTR_SHARED;
380        req->r_args.open.mask = cpu_to_le32(mask);
381 
382 	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
383 	err = ceph_mdsc_do_request(mdsc,
384 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
385 				   req);
386 	err = ceph_handle_snapdir(req, dentry, err);
387 	if (err)
388 		goto out_req;
389 
390 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
391 		err = ceph_handle_notrace_create(dir, dentry);
392 
393 	if (d_in_lookup(dentry)) {
394 		dn = ceph_finish_lookup(req, dentry, err);
395 		if (IS_ERR(dn))
396 			err = PTR_ERR(dn);
397 	} else {
398 		/* we were given a hashed negative dentry */
399 		dn = NULL;
400 	}
401 	if (err)
402 		goto out_req;
403 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
404 		/* make vfs retry on splice, ENOENT, or symlink */
405 		dout("atomic_open finish_no_open on dn %p\n", dn);
406 		err = finish_no_open(file, dn);
407 	} else {
408 		dout("atomic_open finish_open on dn %p\n", dn);
409 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
410 			ceph_init_inode_acls(d_inode(dentry), &acls);
411 			*opened |= FILE_CREATED;
412 		}
413 		err = finish_open(file, dentry, ceph_open, opened);
414 	}
415 out_req:
416 	if (!req->r_err && req->r_target_inode)
417 		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
418 	ceph_mdsc_put_request(req);
419 out_acl:
420 	ceph_release_acls_info(&acls);
421 	dout("atomic_open result=%d\n", err);
422 	return err;
423 }
424 
425 int ceph_release(struct inode *inode, struct file *file)
426 {
427 	struct ceph_inode_info *ci = ceph_inode(inode);
428 	struct ceph_file_info *cf = file->private_data;
429 
430 	dout("release inode %p file %p\n", inode, file);
431 	ceph_put_fmode(ci, cf->fmode);
432 	if (cf->last_readdir)
433 		ceph_mdsc_put_request(cf->last_readdir);
434 	kfree(cf->last_name);
435 	kfree(cf->dir_info);
436 	kmem_cache_free(ceph_file_cachep, cf);
437 
438 	/* wake up anyone waiting for caps on this inode */
439 	wake_up_all(&ci->i_cap_wq);
440 	return 0;
441 }
442 
443 enum {
444 	HAVE_RETRIED = 1,
445 	CHECK_EOF =    2,
446 	READ_INLINE =  3,
447 };
448 
449 /*
450  * Read a range of bytes striped over one or more objects.  Iterate over
451  * objects we stripe over.  (That's not atomic, but good enough for now.)
452  *
453  * If we get a short result from the OSD, check against i_size; we need to
454  * only return a short read to the caller if we hit EOF.
455  */
456 static int striped_read(struct inode *inode,
457 			u64 pos, u64 len,
458 			struct page **pages, int num_pages,
459 			int page_align, int *checkeof)
460 {
461 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
462 	struct ceph_inode_info *ci = ceph_inode(inode);
463 	u64 this_len;
464 	loff_t i_size;
465 	int page_idx;
466 	int ret, read = 0;
467 	bool hit_stripe, was_short;
468 
469 	/*
470 	 * we may need to do multiple reads.  not atomic, unfortunately.
471 	 */
472 more:
473 	this_len = len;
474 	page_idx = (page_align + read) >> PAGE_SHIFT;
475 	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
476 				  &ci->i_layout, pos, &this_len,
477 				  ci->i_truncate_seq, ci->i_truncate_size,
478 				  pages + page_idx, num_pages - page_idx,
479 				  ((page_align + read) & ~PAGE_MASK));
480 	if (ret == -ENOENT)
481 		ret = 0;
482 	hit_stripe = this_len < len;
483 	was_short = ret >= 0 && ret < this_len;
484 	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
485 	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
486 
487 	i_size = i_size_read(inode);
488 	if (ret >= 0) {
489 		if (was_short && (pos + ret < i_size)) {
490 			int zlen = min(this_len - ret, i_size - pos - ret);
491 			int zoff = page_align + read + ret;
492 			dout(" zero gap %llu to %llu\n",
493 			     pos + ret, pos + ret + zlen);
494 			ceph_zero_page_vector_range(zoff, zlen, pages);
495 			ret += zlen;
496 		}
497 
498 		read += ret;
499 		pos += ret;
500 		len -= ret;
501 
502 		/* hit stripe and need continue*/
503 		if (len && hit_stripe && pos < i_size)
504 			goto more;
505 	}
506 
507 	if (read > 0) {
508 		ret = read;
509 		/* did we bounce off eof? */
510 		if (pos + len > i_size)
511 			*checkeof = CHECK_EOF;
512 	}
513 
514 	dout("striped_read returns %d\n", ret);
515 	return ret;
516 }
517 
518 /*
519  * Completely synchronous read and write methods.  Direct from __user
520  * buffer to osd, or directly to user pages (if O_DIRECT).
521  *
522  * If the read spans object boundary, just do multiple reads.
523  */
524 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
525 			      int *checkeof)
526 {
527 	struct file *file = iocb->ki_filp;
528 	struct inode *inode = file_inode(file);
529 	struct page **pages;
530 	u64 off = iocb->ki_pos;
531 	int num_pages;
532 	ssize_t ret;
533 	size_t len = iov_iter_count(to);
534 
535 	dout("sync_read on file %p %llu~%u %s\n", file, off,
536 	     (unsigned)len,
537 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
538 
539 	if (!len)
540 		return 0;
541 	/*
542 	 * flush any page cache pages in this range.  this
543 	 * will make concurrent normal and sync io slow,
544 	 * but it will at least behave sensibly when they are
545 	 * in sequence.
546 	 */
547 	ret = filemap_write_and_wait_range(inode->i_mapping, off,
548 						off + len);
549 	if (ret < 0)
550 		return ret;
551 
552 	if (unlikely(to->type & ITER_PIPE)) {
553 		size_t page_off;
554 		ret = iov_iter_get_pages_alloc(to, &pages, len,
555 					       &page_off);
556 		if (ret <= 0)
557 			return -ENOMEM;
558 		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
559 
560 		ret = striped_read(inode, off, ret, pages, num_pages,
561 				   page_off, checkeof);
562 		if (ret > 0) {
563 			iov_iter_advance(to, ret);
564 			off += ret;
565 		} else {
566 			iov_iter_advance(to, 0);
567 		}
568 		ceph_put_page_vector(pages, num_pages, false);
569 	} else {
570 		num_pages = calc_pages_for(off, len);
571 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
572 		if (IS_ERR(pages))
573 			return PTR_ERR(pages);
574 
575 		ret = striped_read(inode, off, len, pages, num_pages,
576 				   (off & ~PAGE_MASK), checkeof);
577 		if (ret > 0) {
578 			int l, k = 0;
579 			size_t left = ret;
580 
581 			while (left) {
582 				size_t page_off = off & ~PAGE_MASK;
583 				size_t copy = min_t(size_t, left,
584 						    PAGE_SIZE - page_off);
585 				l = copy_page_to_iter(pages[k++], page_off,
586 						      copy, to);
587 				off += l;
588 				left -= l;
589 				if (l < copy)
590 					break;
591 			}
592 		}
593 		ceph_release_page_vector(pages, num_pages);
594 	}
595 
596 	if (off > iocb->ki_pos) {
597 		ret = off - iocb->ki_pos;
598 		iocb->ki_pos = off;
599 	}
600 
601 	dout("sync_read result %zd\n", ret);
602 	return ret;
603 }
604 
605 struct ceph_aio_request {
606 	struct kiocb *iocb;
607 	size_t total_len;
608 	int write;
609 	int error;
610 	struct list_head osd_reqs;
611 	unsigned num_reqs;
612 	atomic_t pending_reqs;
613 	struct timespec mtime;
614 	struct ceph_cap_flush *prealloc_cf;
615 };
616 
617 struct ceph_aio_work {
618 	struct work_struct work;
619 	struct ceph_osd_request *req;
620 };
621 
622 static void ceph_aio_retry_work(struct work_struct *work);
623 
624 static void ceph_aio_complete(struct inode *inode,
625 			      struct ceph_aio_request *aio_req)
626 {
627 	struct ceph_inode_info *ci = ceph_inode(inode);
628 	int ret;
629 
630 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
631 		return;
632 
633 	ret = aio_req->error;
634 	if (!ret)
635 		ret = aio_req->total_len;
636 
637 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
638 
639 	if (ret >= 0 && aio_req->write) {
640 		int dirty;
641 
642 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
643 		if (endoff > i_size_read(inode)) {
644 			if (ceph_inode_set_size(inode, endoff))
645 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
646 		}
647 
648 		spin_lock(&ci->i_ceph_lock);
649 		ci->i_inline_version = CEPH_INLINE_NONE;
650 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
651 					       &aio_req->prealloc_cf);
652 		spin_unlock(&ci->i_ceph_lock);
653 		if (dirty)
654 			__mark_inode_dirty(inode, dirty);
655 
656 	}
657 
658 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
659 						CEPH_CAP_FILE_RD));
660 
661 	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
662 
663 	ceph_free_cap_flush(aio_req->prealloc_cf);
664 	kfree(aio_req);
665 }
666 
667 static void ceph_aio_complete_req(struct ceph_osd_request *req)
668 {
669 	int rc = req->r_result;
670 	struct inode *inode = req->r_inode;
671 	struct ceph_aio_request *aio_req = req->r_priv;
672 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
673 	int num_pages = calc_pages_for((u64)osd_data->alignment,
674 				       osd_data->length);
675 
676 	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
677 	     inode, rc, osd_data->length);
678 
679 	if (rc == -EOLDSNAPC) {
680 		struct ceph_aio_work *aio_work;
681 		BUG_ON(!aio_req->write);
682 
683 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
684 		if (aio_work) {
685 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
686 			aio_work->req = req;
687 			queue_work(ceph_inode_to_client(inode)->wb_wq,
688 				   &aio_work->work);
689 			return;
690 		}
691 		rc = -ENOMEM;
692 	} else if (!aio_req->write) {
693 		if (rc == -ENOENT)
694 			rc = 0;
695 		if (rc >= 0 && osd_data->length > rc) {
696 			int zoff = osd_data->alignment + rc;
697 			int zlen = osd_data->length - rc;
698 			/*
699 			 * If read is satisfied by single OSD request,
700 			 * it can pass EOF. Otherwise read is within
701 			 * i_size.
702 			 */
703 			if (aio_req->num_reqs == 1) {
704 				loff_t i_size = i_size_read(inode);
705 				loff_t endoff = aio_req->iocb->ki_pos + rc;
706 				if (endoff < i_size)
707 					zlen = min_t(size_t, zlen,
708 						     i_size - endoff);
709 				aio_req->total_len = rc + zlen;
710 			}
711 
712 			if (zlen > 0)
713 				ceph_zero_page_vector_range(zoff, zlen,
714 							    osd_data->pages);
715 		}
716 	}
717 
718 	ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
719 	ceph_osdc_put_request(req);
720 
721 	if (rc < 0)
722 		cmpxchg(&aio_req->error, 0, rc);
723 
724 	ceph_aio_complete(inode, aio_req);
725 	return;
726 }
727 
728 static void ceph_aio_retry_work(struct work_struct *work)
729 {
730 	struct ceph_aio_work *aio_work =
731 		container_of(work, struct ceph_aio_work, work);
732 	struct ceph_osd_request *orig_req = aio_work->req;
733 	struct ceph_aio_request *aio_req = orig_req->r_priv;
734 	struct inode *inode = orig_req->r_inode;
735 	struct ceph_inode_info *ci = ceph_inode(inode);
736 	struct ceph_snap_context *snapc;
737 	struct ceph_osd_request *req;
738 	int ret;
739 
740 	spin_lock(&ci->i_ceph_lock);
741 	if (__ceph_have_pending_cap_snap(ci)) {
742 		struct ceph_cap_snap *capsnap =
743 			list_last_entry(&ci->i_cap_snaps,
744 					struct ceph_cap_snap,
745 					ci_item);
746 		snapc = ceph_get_snap_context(capsnap->context);
747 	} else {
748 		BUG_ON(!ci->i_head_snapc);
749 		snapc = ceph_get_snap_context(ci->i_head_snapc);
750 	}
751 	spin_unlock(&ci->i_ceph_lock);
752 
753 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
754 			false, GFP_NOFS);
755 	if (!req) {
756 		ret = -ENOMEM;
757 		req = orig_req;
758 		goto out;
759 	}
760 
761 	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
762 			CEPH_OSD_FLAG_ONDISK |
763 			CEPH_OSD_FLAG_WRITE;
764 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
765 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
766 
767 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
768 	if (ret) {
769 		ceph_osdc_put_request(req);
770 		req = orig_req;
771 		goto out;
772 	}
773 
774 	req->r_ops[0] = orig_req->r_ops[0];
775 	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
776 
777 	req->r_mtime = aio_req->mtime;
778 	req->r_data_offset = req->r_ops[0].extent.offset;
779 
780 	ceph_osdc_put_request(orig_req);
781 
782 	req->r_callback = ceph_aio_complete_req;
783 	req->r_inode = inode;
784 	req->r_priv = aio_req;
785 
786 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
787 out:
788 	if (ret < 0) {
789 		req->r_result = ret;
790 		ceph_aio_complete_req(req);
791 	}
792 
793 	ceph_put_snap_context(snapc);
794 	kfree(aio_work);
795 }
796 
797 /*
798  * Write commit request unsafe callback, called to tell us when a
799  * request is unsafe (that is, in flight--has been handed to the
800  * messenger to send to its target osd).  It is called again when
801  * we've received a response message indicating the request is
802  * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
803  * is completed early (and unsuccessfully) due to a timeout or
804  * interrupt.
805  *
806  * This is used if we requested both an ACK and ONDISK commit reply
807  * from the OSD.
808  */
809 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
810 {
811 	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
812 
813 	dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
814 		unsafe ? "un" : "");
815 	if (unsafe) {
816 		ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
817 		spin_lock(&ci->i_unsafe_lock);
818 		list_add_tail(&req->r_unsafe_item,
819 			      &ci->i_unsafe_writes);
820 		spin_unlock(&ci->i_unsafe_lock);
821 
822 		complete_all(&req->r_completion);
823 	} else {
824 		spin_lock(&ci->i_unsafe_lock);
825 		list_del_init(&req->r_unsafe_item);
826 		spin_unlock(&ci->i_unsafe_lock);
827 		ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
828 	}
829 }
830 
831 /*
832  * Wait on any unsafe replies for the given inode.  First wait on the
833  * newest request, and make that the upper bound.  Then, if there are
834  * more requests, keep waiting on the oldest as long as it is still older
835  * than the original request.
836  */
837 void ceph_sync_write_wait(struct inode *inode)
838 {
839 	struct ceph_inode_info *ci = ceph_inode(inode);
840 	struct list_head *head = &ci->i_unsafe_writes;
841 	struct ceph_osd_request *req;
842 	u64 last_tid;
843 
844 	if (!S_ISREG(inode->i_mode))
845 		return;
846 
847 	spin_lock(&ci->i_unsafe_lock);
848 	if (list_empty(head))
849 		goto out;
850 
851 	/* set upper bound as _last_ entry in chain */
852 
853 	req = list_last_entry(head, struct ceph_osd_request,
854 			      r_unsafe_item);
855 	last_tid = req->r_tid;
856 
857 	do {
858 		ceph_osdc_get_request(req);
859 		spin_unlock(&ci->i_unsafe_lock);
860 
861 		dout("sync_write_wait on tid %llu (until %llu)\n",
862 		     req->r_tid, last_tid);
863 		wait_for_completion(&req->r_done_completion);
864 		ceph_osdc_put_request(req);
865 
866 		spin_lock(&ci->i_unsafe_lock);
867 		/*
868 		 * from here on look at first entry in chain, since we
869 		 * only want to wait for anything older than last_tid
870 		 */
871 		if (list_empty(head))
872 			break;
873 		req = list_first_entry(head, struct ceph_osd_request,
874 				       r_unsafe_item);
875 	} while (req->r_tid < last_tid);
876 out:
877 	spin_unlock(&ci->i_unsafe_lock);
878 }
879 
880 static ssize_t
881 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
882 		       struct ceph_snap_context *snapc,
883 		       struct ceph_cap_flush **pcf)
884 {
885 	struct file *file = iocb->ki_filp;
886 	struct inode *inode = file_inode(file);
887 	struct ceph_inode_info *ci = ceph_inode(inode);
888 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
889 	struct ceph_vino vino;
890 	struct ceph_osd_request *req;
891 	struct page **pages;
892 	struct ceph_aio_request *aio_req = NULL;
893 	int num_pages = 0;
894 	int flags;
895 	int ret;
896 	struct timespec mtime = current_time(inode);
897 	size_t count = iov_iter_count(iter);
898 	loff_t pos = iocb->ki_pos;
899 	bool write = iov_iter_rw(iter) == WRITE;
900 
901 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
902 		return -EROFS;
903 
904 	dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
905 	     (write ? "write" : "read"), file, pos, (unsigned)count);
906 
907 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
908 	if (ret < 0)
909 		return ret;
910 
911 	if (write) {
912 		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
913 					pos >> PAGE_SHIFT,
914 					(pos + count) >> PAGE_SHIFT);
915 		if (ret2 < 0)
916 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
917 
918 		flags = CEPH_OSD_FLAG_ORDERSNAP |
919 			CEPH_OSD_FLAG_ONDISK |
920 			CEPH_OSD_FLAG_WRITE;
921 	} else {
922 		flags = CEPH_OSD_FLAG_READ;
923 	}
924 
925 	while (iov_iter_count(iter) > 0) {
926 		u64 size = dio_get_pagev_size(iter);
927 		size_t start = 0;
928 		ssize_t len;
929 
930 		vino = ceph_vino(inode);
931 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
932 					    vino, pos, &size, 0,
933 					    /*include a 'startsync' command*/
934 					    write ? 2 : 1,
935 					    write ? CEPH_OSD_OP_WRITE :
936 						    CEPH_OSD_OP_READ,
937 					    flags, snapc,
938 					    ci->i_truncate_seq,
939 					    ci->i_truncate_size,
940 					    false);
941 		if (IS_ERR(req)) {
942 			ret = PTR_ERR(req);
943 			break;
944 		}
945 
946 		len = size;
947 		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
948 		if (IS_ERR(pages)) {
949 			ceph_osdc_put_request(req);
950 			ret = PTR_ERR(pages);
951 			break;
952 		}
953 
954 		/*
955 		 * To simplify error handling, allow AIO when IO within i_size
956 		 * or IO can be satisfied by single OSD request.
957 		 */
958 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
959 		    (len == count || pos + count <= i_size_read(inode))) {
960 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
961 			if (aio_req) {
962 				aio_req->iocb = iocb;
963 				aio_req->write = write;
964 				INIT_LIST_HEAD(&aio_req->osd_reqs);
965 				if (write) {
966 					aio_req->mtime = mtime;
967 					swap(aio_req->prealloc_cf, *pcf);
968 				}
969 			}
970 			/* ignore error */
971 		}
972 
973 		if (write) {
974 			/*
975 			 * throw out any page cache pages in this range. this
976 			 * may block.
977 			 */
978 			truncate_inode_pages_range(inode->i_mapping, pos,
979 					(pos+len) | (PAGE_SIZE - 1));
980 
981 			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
982 			req->r_mtime = mtime;
983 		}
984 
985 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
986 						 false, false);
987 
988 		if (aio_req) {
989 			aio_req->total_len += len;
990 			aio_req->num_reqs++;
991 			atomic_inc(&aio_req->pending_reqs);
992 
993 			req->r_callback = ceph_aio_complete_req;
994 			req->r_inode = inode;
995 			req->r_priv = aio_req;
996 			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
997 
998 			pos += len;
999 			iov_iter_advance(iter, len);
1000 			continue;
1001 		}
1002 
1003 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1004 		if (!ret)
1005 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1006 
1007 		size = i_size_read(inode);
1008 		if (!write) {
1009 			if (ret == -ENOENT)
1010 				ret = 0;
1011 			if (ret >= 0 && ret < len && pos + ret < size) {
1012 				int zlen = min_t(size_t, len - ret,
1013 						 size - pos - ret);
1014 				ceph_zero_page_vector_range(start + ret, zlen,
1015 							    pages);
1016 				ret += zlen;
1017 			}
1018 			if (ret >= 0)
1019 				len = ret;
1020 		}
1021 
1022 		ceph_put_page_vector(pages, num_pages, !write);
1023 
1024 		ceph_osdc_put_request(req);
1025 		if (ret < 0)
1026 			break;
1027 
1028 		pos += len;
1029 		iov_iter_advance(iter, len);
1030 
1031 		if (!write && pos >= size)
1032 			break;
1033 
1034 		if (write && pos > size) {
1035 			if (ceph_inode_set_size(inode, pos))
1036 				ceph_check_caps(ceph_inode(inode),
1037 						CHECK_CAPS_AUTHONLY,
1038 						NULL);
1039 		}
1040 	}
1041 
1042 	if (aio_req) {
1043 		LIST_HEAD(osd_reqs);
1044 
1045 		if (aio_req->num_reqs == 0) {
1046 			kfree(aio_req);
1047 			return ret;
1048 		}
1049 
1050 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1051 					      CEPH_CAP_FILE_RD);
1052 
1053 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1054 		while (!list_empty(&osd_reqs)) {
1055 			req = list_first_entry(&osd_reqs,
1056 					       struct ceph_osd_request,
1057 					       r_unsafe_item);
1058 			list_del_init(&req->r_unsafe_item);
1059 			if (ret >= 0)
1060 				ret = ceph_osdc_start_request(req->r_osdc,
1061 							      req, false);
1062 			if (ret < 0) {
1063 				req->r_result = ret;
1064 				ceph_aio_complete_req(req);
1065 			}
1066 		}
1067 		return -EIOCBQUEUED;
1068 	}
1069 
1070 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1071 		ret = pos - iocb->ki_pos;
1072 		iocb->ki_pos = pos;
1073 	}
1074 	return ret;
1075 }
1076 
1077 /*
1078  * Synchronous write, straight from __user pointer or user pages.
1079  *
1080  * If write spans object boundary, just do multiple writes.  (For a
1081  * correct atomic write, we should e.g. take write locks on all
1082  * objects, rollback on failure, etc.)
1083  */
1084 static ssize_t
1085 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1086 		struct ceph_snap_context *snapc)
1087 {
1088 	struct file *file = iocb->ki_filp;
1089 	struct inode *inode = file_inode(file);
1090 	struct ceph_inode_info *ci = ceph_inode(inode);
1091 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1092 	struct ceph_vino vino;
1093 	struct ceph_osd_request *req;
1094 	struct page **pages;
1095 	u64 len;
1096 	int num_pages;
1097 	int written = 0;
1098 	int flags;
1099 	int check_caps = 0;
1100 	int ret;
1101 	struct timespec mtime = current_time(inode);
1102 	size_t count = iov_iter_count(from);
1103 
1104 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1105 		return -EROFS;
1106 
1107 	dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1108 
1109 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1110 	if (ret < 0)
1111 		return ret;
1112 
1113 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1114 					    pos >> PAGE_SHIFT,
1115 					    (pos + count) >> PAGE_SHIFT);
1116 	if (ret < 0)
1117 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1118 
1119 	flags = CEPH_OSD_FLAG_ORDERSNAP |
1120 		CEPH_OSD_FLAG_ONDISK |
1121 		CEPH_OSD_FLAG_WRITE |
1122 		CEPH_OSD_FLAG_ACK;
1123 
1124 	while ((len = iov_iter_count(from)) > 0) {
1125 		size_t left;
1126 		int n;
1127 
1128 		vino = ceph_vino(inode);
1129 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1130 					    vino, pos, &len, 0, 1,
1131 					    CEPH_OSD_OP_WRITE, flags, snapc,
1132 					    ci->i_truncate_seq,
1133 					    ci->i_truncate_size,
1134 					    false);
1135 		if (IS_ERR(req)) {
1136 			ret = PTR_ERR(req);
1137 			break;
1138 		}
1139 
1140 		/*
1141 		 * write from beginning of first page,
1142 		 * regardless of io alignment
1143 		 */
1144 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145 
1146 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1147 		if (IS_ERR(pages)) {
1148 			ret = PTR_ERR(pages);
1149 			goto out;
1150 		}
1151 
1152 		left = len;
1153 		for (n = 0; n < num_pages; n++) {
1154 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1155 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1156 			if (ret != plen) {
1157 				ret = -EFAULT;
1158 				break;
1159 			}
1160 			left -= ret;
1161 		}
1162 
1163 		if (ret < 0) {
1164 			ceph_release_page_vector(pages, num_pages);
1165 			goto out;
1166 		}
1167 
1168 		/* get a second commit callback */
1169 		req->r_unsafe_callback = ceph_sync_write_unsafe;
1170 		req->r_inode = inode;
1171 
1172 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1173 						false, true);
1174 
1175 		req->r_mtime = mtime;
1176 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1177 		if (!ret)
1178 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1179 
1180 out:
1181 		ceph_osdc_put_request(req);
1182 		if (ret == 0) {
1183 			pos += len;
1184 			written += len;
1185 
1186 			if (pos > i_size_read(inode)) {
1187 				check_caps = ceph_inode_set_size(inode, pos);
1188 				if (check_caps)
1189 					ceph_check_caps(ceph_inode(inode),
1190 							CHECK_CAPS_AUTHONLY,
1191 							NULL);
1192 			}
1193 		} else
1194 			break;
1195 	}
1196 
1197 	if (ret != -EOLDSNAPC && written > 0) {
1198 		ret = written;
1199 		iocb->ki_pos = pos;
1200 	}
1201 	return ret;
1202 }
1203 
1204 /*
1205  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1206  * Atomically grab references, so that those bits are not released
1207  * back to the MDS mid-read.
1208  *
1209  * Hmm, the sync read case isn't actually async... should it be?
1210  */
1211 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1212 {
1213 	struct file *filp = iocb->ki_filp;
1214 	struct ceph_file_info *fi = filp->private_data;
1215 	size_t len = iov_iter_count(to);
1216 	struct inode *inode = file_inode(filp);
1217 	struct ceph_inode_info *ci = ceph_inode(inode);
1218 	struct page *pinned_page = NULL;
1219 	ssize_t ret;
1220 	int want, got = 0;
1221 	int retry_op = 0, read = 0;
1222 
1223 again:
1224 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1225 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1226 
1227 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1228 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1229 	else
1230 		want = CEPH_CAP_FILE_CACHE;
1231 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1232 	if (ret < 0)
1233 		return ret;
1234 
1235 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1236 	    (iocb->ki_flags & IOCB_DIRECT) ||
1237 	    (fi->flags & CEPH_F_SYNC)) {
1238 
1239 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1240 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1241 		     ceph_cap_string(got));
1242 
1243 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1244 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1245 				ret = ceph_direct_read_write(iocb, to,
1246 							     NULL, NULL);
1247 				if (ret >= 0 && ret < len)
1248 					retry_op = CHECK_EOF;
1249 			} else {
1250 				ret = ceph_sync_read(iocb, to, &retry_op);
1251 			}
1252 		} else {
1253 			retry_op = READ_INLINE;
1254 		}
1255 	} else {
1256 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1257 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1258 		     ceph_cap_string(got));
1259 		current->journal_info = filp;
1260 		ret = generic_file_read_iter(iocb, to);
1261 		current->journal_info = NULL;
1262 	}
1263 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1264 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1265 	if (pinned_page) {
1266 		put_page(pinned_page);
1267 		pinned_page = NULL;
1268 	}
1269 	ceph_put_cap_refs(ci, got);
1270 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1271 		int statret;
1272 		struct page *page = NULL;
1273 		loff_t i_size;
1274 		if (retry_op == READ_INLINE) {
1275 			page = __page_cache_alloc(GFP_KERNEL);
1276 			if (!page)
1277 				return -ENOMEM;
1278 		}
1279 
1280 		statret = __ceph_do_getattr(inode, page,
1281 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1282 		if (statret < 0) {
1283 			if (page)
1284 				__free_page(page);
1285 			if (statret == -ENODATA) {
1286 				BUG_ON(retry_op != READ_INLINE);
1287 				goto again;
1288 			}
1289 			return statret;
1290 		}
1291 
1292 		i_size = i_size_read(inode);
1293 		if (retry_op == READ_INLINE) {
1294 			BUG_ON(ret > 0 || read > 0);
1295 			if (iocb->ki_pos < i_size &&
1296 			    iocb->ki_pos < PAGE_SIZE) {
1297 				loff_t end = min_t(loff_t, i_size,
1298 						   iocb->ki_pos + len);
1299 				end = min_t(loff_t, end, PAGE_SIZE);
1300 				if (statret < end)
1301 					zero_user_segment(page, statret, end);
1302 				ret = copy_page_to_iter(page,
1303 						iocb->ki_pos & ~PAGE_MASK,
1304 						end - iocb->ki_pos, to);
1305 				iocb->ki_pos += ret;
1306 				read += ret;
1307 			}
1308 			if (iocb->ki_pos < i_size && read < len) {
1309 				size_t zlen = min_t(size_t, len - read,
1310 						    i_size - iocb->ki_pos);
1311 				ret = iov_iter_zero(zlen, to);
1312 				iocb->ki_pos += ret;
1313 				read += ret;
1314 			}
1315 			__free_pages(page, 0);
1316 			return read;
1317 		}
1318 
1319 		/* hit EOF or hole? */
1320 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1321 		    ret < len) {
1322 			dout("sync_read hit hole, ppos %lld < size %lld"
1323 			     ", reading more\n", iocb->ki_pos, i_size);
1324 
1325 			read += ret;
1326 			len -= ret;
1327 			retry_op = HAVE_RETRIED;
1328 			goto again;
1329 		}
1330 	}
1331 
1332 	if (ret >= 0)
1333 		ret += read;
1334 
1335 	return ret;
1336 }
1337 
1338 /*
1339  * Take cap references to avoid releasing caps to MDS mid-write.
1340  *
1341  * If we are synchronous, and write with an old snap context, the OSD
1342  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1343  * dropping our cap refs and allowing the pending snap to logically
1344  * complete _before_ this write occurs.
1345  *
1346  * If we are near ENOSPC, write synchronously.
1347  */
1348 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349 {
1350 	struct file *file = iocb->ki_filp;
1351 	struct ceph_file_info *fi = file->private_data;
1352 	struct inode *inode = file_inode(file);
1353 	struct ceph_inode_info *ci = ceph_inode(inode);
1354 	struct ceph_osd_client *osdc =
1355 		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1356 	struct ceph_cap_flush *prealloc_cf;
1357 	ssize_t count, written = 0;
1358 	int err, want, got;
1359 	loff_t pos;
1360 
1361 	if (ceph_snap(inode) != CEPH_NOSNAP)
1362 		return -EROFS;
1363 
1364 	prealloc_cf = ceph_alloc_cap_flush();
1365 	if (!prealloc_cf)
1366 		return -ENOMEM;
1367 
1368 	inode_lock(inode);
1369 
1370 	/* We can write back this queue in page reclaim */
1371 	current->backing_dev_info = inode_to_bdi(inode);
1372 
1373 	if (iocb->ki_flags & IOCB_APPEND) {
1374 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1375 		if (err < 0)
1376 			goto out;
1377 	}
1378 
1379 	err = generic_write_checks(iocb, from);
1380 	if (err <= 0)
1381 		goto out;
1382 
1383 	pos = iocb->ki_pos;
1384 	count = iov_iter_count(from);
1385 	err = file_remove_privs(file);
1386 	if (err)
1387 		goto out;
1388 
1389 	err = file_update_time(file);
1390 	if (err)
1391 		goto out;
1392 
1393 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1394 		err = ceph_uninline_data(file, NULL);
1395 		if (err < 0)
1396 			goto out;
1397 	}
1398 
1399 retry_snap:
1400 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1401 		err = -ENOSPC;
1402 		goto out;
1403 	}
1404 
1405 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1406 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1407 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1408 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1409 	else
1410 		want = CEPH_CAP_FILE_BUFFER;
1411 	got = 0;
1412 	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1413 			    &got, NULL);
1414 	if (err < 0)
1415 		goto out;
1416 
1417 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1418 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1419 
1420 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1421 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1422 		struct ceph_snap_context *snapc;
1423 		struct iov_iter data;
1424 		inode_unlock(inode);
1425 
1426 		spin_lock(&ci->i_ceph_lock);
1427 		if (__ceph_have_pending_cap_snap(ci)) {
1428 			struct ceph_cap_snap *capsnap =
1429 					list_last_entry(&ci->i_cap_snaps,
1430 							struct ceph_cap_snap,
1431 							ci_item);
1432 			snapc = ceph_get_snap_context(capsnap->context);
1433 		} else {
1434 			BUG_ON(!ci->i_head_snapc);
1435 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1436 		}
1437 		spin_unlock(&ci->i_ceph_lock);
1438 
1439 		/* we might need to revert back to that point */
1440 		data = *from;
1441 		if (iocb->ki_flags & IOCB_DIRECT)
1442 			written = ceph_direct_read_write(iocb, &data, snapc,
1443 							 &prealloc_cf);
1444 		else
1445 			written = ceph_sync_write(iocb, &data, pos, snapc);
1446 		if (written == -EOLDSNAPC) {
1447 			dout("aio_write %p %llx.%llx %llu~%u"
1448 				"got EOLDSNAPC, retrying\n",
1449 				inode, ceph_vinop(inode),
1450 				pos, (unsigned)count);
1451 			inode_lock(inode);
1452 			goto retry_snap;
1453 		}
1454 		if (written > 0)
1455 			iov_iter_advance(from, written);
1456 		ceph_put_snap_context(snapc);
1457 	} else {
1458 		/*
1459 		 * No need to acquire the i_truncate_mutex. Because
1460 		 * the MDS revokes Fwb caps before sending truncate
1461 		 * message to us. We can't get Fwb cap while there
1462 		 * are pending vmtruncate. So write and vmtruncate
1463 		 * can not run at the same time
1464 		 */
1465 		written = generic_perform_write(file, from, pos);
1466 		if (likely(written >= 0))
1467 			iocb->ki_pos = pos + written;
1468 		inode_unlock(inode);
1469 	}
1470 
1471 	if (written >= 0) {
1472 		int dirty;
1473 		spin_lock(&ci->i_ceph_lock);
1474 		ci->i_inline_version = CEPH_INLINE_NONE;
1475 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1476 					       &prealloc_cf);
1477 		spin_unlock(&ci->i_ceph_lock);
1478 		if (dirty)
1479 			__mark_inode_dirty(inode, dirty);
1480 	}
1481 
1482 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1483 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1484 	     ceph_cap_string(got));
1485 	ceph_put_cap_refs(ci, got);
1486 
1487 	if (written >= 0) {
1488 		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1489 			iocb->ki_flags |= IOCB_DSYNC;
1490 
1491 		written = generic_write_sync(iocb, written);
1492 	}
1493 
1494 	goto out_unlocked;
1495 
1496 out:
1497 	inode_unlock(inode);
1498 out_unlocked:
1499 	ceph_free_cap_flush(prealloc_cf);
1500 	current->backing_dev_info = NULL;
1501 	return written ? written : err;
1502 }
1503 
1504 /*
1505  * llseek.  be sure to verify file size on SEEK_END.
1506  */
1507 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1508 {
1509 	struct inode *inode = file->f_mapping->host;
1510 	loff_t i_size;
1511 	loff_t ret;
1512 
1513 	inode_lock(inode);
1514 
1515 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1516 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1517 		if (ret < 0)
1518 			goto out;
1519 	}
1520 
1521 	i_size = i_size_read(inode);
1522 	switch (whence) {
1523 	case SEEK_END:
1524 		offset += i_size;
1525 		break;
1526 	case SEEK_CUR:
1527 		/*
1528 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1529 		 * position-querying operation.  Avoid rewriting the "same"
1530 		 * f_pos value back to the file because a concurrent read(),
1531 		 * write() or lseek() might have altered it
1532 		 */
1533 		if (offset == 0) {
1534 			ret = file->f_pos;
1535 			goto out;
1536 		}
1537 		offset += file->f_pos;
1538 		break;
1539 	case SEEK_DATA:
1540 		if (offset >= i_size) {
1541 			ret = -ENXIO;
1542 			goto out;
1543 		}
1544 		break;
1545 	case SEEK_HOLE:
1546 		if (offset >= i_size) {
1547 			ret = -ENXIO;
1548 			goto out;
1549 		}
1550 		offset = i_size;
1551 		break;
1552 	}
1553 
1554 	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1555 
1556 out:
1557 	inode_unlock(inode);
1558 	return ret;
1559 }
1560 
1561 static inline void ceph_zero_partial_page(
1562 	struct inode *inode, loff_t offset, unsigned size)
1563 {
1564 	struct page *page;
1565 	pgoff_t index = offset >> PAGE_SHIFT;
1566 
1567 	page = find_lock_page(inode->i_mapping, index);
1568 	if (page) {
1569 		wait_on_page_writeback(page);
1570 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1571 		unlock_page(page);
1572 		put_page(page);
1573 	}
1574 }
1575 
1576 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1577 				      loff_t length)
1578 {
1579 	loff_t nearly = round_up(offset, PAGE_SIZE);
1580 	if (offset < nearly) {
1581 		loff_t size = nearly - offset;
1582 		if (length < size)
1583 			size = length;
1584 		ceph_zero_partial_page(inode, offset, size);
1585 		offset += size;
1586 		length -= size;
1587 	}
1588 	if (length >= PAGE_SIZE) {
1589 		loff_t size = round_down(length, PAGE_SIZE);
1590 		truncate_pagecache_range(inode, offset, offset + size - 1);
1591 		offset += size;
1592 		length -= size;
1593 	}
1594 	if (length)
1595 		ceph_zero_partial_page(inode, offset, length);
1596 }
1597 
1598 static int ceph_zero_partial_object(struct inode *inode,
1599 				    loff_t offset, loff_t *length)
1600 {
1601 	struct ceph_inode_info *ci = ceph_inode(inode);
1602 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1603 	struct ceph_osd_request *req;
1604 	int ret = 0;
1605 	loff_t zero = 0;
1606 	int op;
1607 
1608 	if (!length) {
1609 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1610 		length = &zero;
1611 	} else {
1612 		op = CEPH_OSD_OP_ZERO;
1613 	}
1614 
1615 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1616 					ceph_vino(inode),
1617 					offset, length,
1618 					0, 1, op,
1619 					CEPH_OSD_FLAG_WRITE |
1620 					CEPH_OSD_FLAG_ONDISK,
1621 					NULL, 0, 0, false);
1622 	if (IS_ERR(req)) {
1623 		ret = PTR_ERR(req);
1624 		goto out;
1625 	}
1626 
1627 	req->r_mtime = inode->i_mtime;
1628 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1629 	if (!ret) {
1630 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1631 		if (ret == -ENOENT)
1632 			ret = 0;
1633 	}
1634 	ceph_osdc_put_request(req);
1635 
1636 out:
1637 	return ret;
1638 }
1639 
1640 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1641 {
1642 	int ret = 0;
1643 	struct ceph_inode_info *ci = ceph_inode(inode);
1644 	s32 stripe_unit = ci->i_layout.stripe_unit;
1645 	s32 stripe_count = ci->i_layout.stripe_count;
1646 	s32 object_size = ci->i_layout.object_size;
1647 	u64 object_set_size = object_size * stripe_count;
1648 	u64 nearly, t;
1649 
1650 	/* round offset up to next period boundary */
1651 	nearly = offset + object_set_size - 1;
1652 	t = nearly;
1653 	nearly -= do_div(t, object_set_size);
1654 
1655 	while (length && offset < nearly) {
1656 		loff_t size = length;
1657 		ret = ceph_zero_partial_object(inode, offset, &size);
1658 		if (ret < 0)
1659 			return ret;
1660 		offset += size;
1661 		length -= size;
1662 	}
1663 	while (length >= object_set_size) {
1664 		int i;
1665 		loff_t pos = offset;
1666 		for (i = 0; i < stripe_count; ++i) {
1667 			ret = ceph_zero_partial_object(inode, pos, NULL);
1668 			if (ret < 0)
1669 				return ret;
1670 			pos += stripe_unit;
1671 		}
1672 		offset += object_set_size;
1673 		length -= object_set_size;
1674 	}
1675 	while (length) {
1676 		loff_t size = length;
1677 		ret = ceph_zero_partial_object(inode, offset, &size);
1678 		if (ret < 0)
1679 			return ret;
1680 		offset += size;
1681 		length -= size;
1682 	}
1683 	return ret;
1684 }
1685 
1686 static long ceph_fallocate(struct file *file, int mode,
1687 				loff_t offset, loff_t length)
1688 {
1689 	struct ceph_file_info *fi = file->private_data;
1690 	struct inode *inode = file_inode(file);
1691 	struct ceph_inode_info *ci = ceph_inode(inode);
1692 	struct ceph_osd_client *osdc =
1693 		&ceph_inode_to_client(inode)->client->osdc;
1694 	struct ceph_cap_flush *prealloc_cf;
1695 	int want, got = 0;
1696 	int dirty;
1697 	int ret = 0;
1698 	loff_t endoff = 0;
1699 	loff_t size;
1700 
1701 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1702 		return -EOPNOTSUPP;
1703 
1704 	if (!S_ISREG(inode->i_mode))
1705 		return -EOPNOTSUPP;
1706 
1707 	prealloc_cf = ceph_alloc_cap_flush();
1708 	if (!prealloc_cf)
1709 		return -ENOMEM;
1710 
1711 	inode_lock(inode);
1712 
1713 	if (ceph_snap(inode) != CEPH_NOSNAP) {
1714 		ret = -EROFS;
1715 		goto unlock;
1716 	}
1717 
1718 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1719 	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
1720 		ret = -ENOSPC;
1721 		goto unlock;
1722 	}
1723 
1724 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1725 		ret = ceph_uninline_data(file, NULL);
1726 		if (ret < 0)
1727 			goto unlock;
1728 	}
1729 
1730 	size = i_size_read(inode);
1731 	if (!(mode & FALLOC_FL_KEEP_SIZE))
1732 		endoff = offset + length;
1733 
1734 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1735 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1736 	else
1737 		want = CEPH_CAP_FILE_BUFFER;
1738 
1739 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1740 	if (ret < 0)
1741 		goto unlock;
1742 
1743 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1744 		if (offset < size)
1745 			ceph_zero_pagecache_range(inode, offset, length);
1746 		ret = ceph_zero_objects(inode, offset, length);
1747 	} else if (endoff > size) {
1748 		truncate_pagecache_range(inode, size, -1);
1749 		if (ceph_inode_set_size(inode, endoff))
1750 			ceph_check_caps(ceph_inode(inode),
1751 				CHECK_CAPS_AUTHONLY, NULL);
1752 	}
1753 
1754 	if (!ret) {
1755 		spin_lock(&ci->i_ceph_lock);
1756 		ci->i_inline_version = CEPH_INLINE_NONE;
1757 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1758 					       &prealloc_cf);
1759 		spin_unlock(&ci->i_ceph_lock);
1760 		if (dirty)
1761 			__mark_inode_dirty(inode, dirty);
1762 	}
1763 
1764 	ceph_put_cap_refs(ci, got);
1765 unlock:
1766 	inode_unlock(inode);
1767 	ceph_free_cap_flush(prealloc_cf);
1768 	return ret;
1769 }
1770 
1771 const struct file_operations ceph_file_fops = {
1772 	.open = ceph_open,
1773 	.release = ceph_release,
1774 	.llseek = ceph_llseek,
1775 	.read_iter = ceph_read_iter,
1776 	.write_iter = ceph_write_iter,
1777 	.mmap = ceph_mmap,
1778 	.fsync = ceph_fsync,
1779 	.lock = ceph_lock,
1780 	.flock = ceph_flock,
1781 	.splice_read = generic_file_splice_read,
1782 	.splice_write = iter_file_splice_write,
1783 	.unlocked_ioctl = ceph_ioctl,
1784 	.compat_ioctl	= ceph_ioctl,
1785 	.fallocate	= ceph_fallocate,
1786 };
1787 
1788