xref: /linux/fs/ceph/file.c (revision d89dffa976bcd13fd87eb76e02e3b71c3a7868e3)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
9 
10 #include "super.h"
11 #include "mds_client.h"
12 
13 /*
14  * Ceph file operations
15  *
16  * Implement basic open/close functionality, and implement
17  * read/write.
18  *
19  * We implement three modes of file I/O:
20  *  - buffered uses the generic_file_aio_{read,write} helpers
21  *
22  *  - synchronous is used when there is multi-client read/write
23  *    sharing, avoids the page cache, and synchronously waits for an
24  *    ack from the OSD.
25  *
26  *  - direct io takes the variant of the sync path that references
27  *    user pages directly.
28  *
29  * fsync() flushes and waits on dirty pages, but just queues metadata
30  * for writeback: since the MDS can recover size and mtime there is no
31  * need to wait for MDS acknowledgement.
32  */
33 
34 
35 /*
36  * Prepare an open request.  Preallocate ceph_cap to avoid an
37  * inopportune ENOMEM later.
38  */
39 static struct ceph_mds_request *
40 prepare_open_request(struct super_block *sb, int flags, int create_mode)
41 {
42 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
43 	struct ceph_mds_client *mdsc = fsc->mdsc;
44 	struct ceph_mds_request *req;
45 	int want_auth = USE_ANY_MDS;
46 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
47 
48 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
49 		want_auth = USE_AUTH_MDS;
50 
51 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
52 	if (IS_ERR(req))
53 		goto out;
54 	req->r_fmode = ceph_flags_to_mode(flags);
55 	req->r_args.open.flags = cpu_to_le32(flags);
56 	req->r_args.open.mode = cpu_to_le32(create_mode);
57 out:
58 	return req;
59 }
60 
61 /*
62  * initialize private struct file data.
63  * if we fail, clean up by dropping fmode reference on the ceph_inode
64  */
65 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
66 {
67 	struct ceph_file_info *cf;
68 	int ret = 0;
69 
70 	switch (inode->i_mode & S_IFMT) {
71 	case S_IFREG:
72 	case S_IFDIR:
73 		dout("init_file %p %p 0%o (regular)\n", inode, file,
74 		     inode->i_mode);
75 		cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
76 		if (cf == NULL) {
77 			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
78 			return -ENOMEM;
79 		}
80 		cf->fmode = fmode;
81 		cf->next_offset = 2;
82 		file->private_data = cf;
83 		BUG_ON(inode->i_fop->release != ceph_release);
84 		break;
85 
86 	case S_IFLNK:
87 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
88 		     inode->i_mode);
89 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
90 		break;
91 
92 	default:
93 		dout("init_file %p %p 0%o (special)\n", inode, file,
94 		     inode->i_mode);
95 		/*
96 		 * we need to drop the open ref now, since we don't
97 		 * have .release set to ceph_release.
98 		 */
99 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
100 		BUG_ON(inode->i_fop->release == ceph_release);
101 
102 		/* call the proper open fop */
103 		ret = inode->i_fop->open(inode, file);
104 	}
105 	return ret;
106 }
107 
108 /*
109  * If the filp already has private_data, that means the file was
110  * already opened by intent during lookup, and we do nothing.
111  *
112  * If we already have the requisite capabilities, we can satisfy
113  * the open request locally (no need to request new caps from the
114  * MDS).  We do, however, need to inform the MDS (asynchronously)
115  * if our wanted caps set expands.
116  */
117 int ceph_open(struct inode *inode, struct file *file)
118 {
119 	struct ceph_inode_info *ci = ceph_inode(inode);
120 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
121 	struct ceph_mds_client *mdsc = fsc->mdsc;
122 	struct ceph_mds_request *req;
123 	struct ceph_file_info *cf = file->private_data;
124 	struct inode *parent_inode = NULL;
125 	int err;
126 	int flags, fmode, wanted;
127 
128 	if (cf) {
129 		dout("open file %p is already opened\n", file);
130 		return 0;
131 	}
132 
133 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
134 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
135 	if (S_ISDIR(inode->i_mode))
136 		flags = O_DIRECTORY;  /* mds likes to know */
137 
138 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
139 	     ceph_vinop(inode), file, flags, file->f_flags);
140 	fmode = ceph_flags_to_mode(flags);
141 	wanted = ceph_caps_for_mode(fmode);
142 
143 	/* snapped files are read-only */
144 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
145 		return -EROFS;
146 
147 	/* trivially open snapdir */
148 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
149 		spin_lock(&ci->i_ceph_lock);
150 		__ceph_get_fmode(ci, fmode);
151 		spin_unlock(&ci->i_ceph_lock);
152 		return ceph_init_file(inode, file, fmode);
153 	}
154 
155 	/*
156 	 * No need to block if we have caps on the auth MDS (for
157 	 * write) or any MDS (for read).  Update wanted set
158 	 * asynchronously.
159 	 */
160 	spin_lock(&ci->i_ceph_lock);
161 	if (__ceph_is_any_real_caps(ci) &&
162 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
163 		int mds_wanted = __ceph_caps_mds_wanted(ci);
164 		int issued = __ceph_caps_issued(ci, NULL);
165 
166 		dout("open %p fmode %d want %s issued %s using existing\n",
167 		     inode, fmode, ceph_cap_string(wanted),
168 		     ceph_cap_string(issued));
169 		__ceph_get_fmode(ci, fmode);
170 		spin_unlock(&ci->i_ceph_lock);
171 
172 		/* adjust wanted? */
173 		if ((issued & wanted) != wanted &&
174 		    (mds_wanted & wanted) != wanted &&
175 		    ceph_snap(inode) != CEPH_SNAPDIR)
176 			ceph_check_caps(ci, 0, NULL);
177 
178 		return ceph_init_file(inode, file, fmode);
179 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
180 		   (ci->i_snap_caps & wanted) == wanted) {
181 		__ceph_get_fmode(ci, fmode);
182 		spin_unlock(&ci->i_ceph_lock);
183 		return ceph_init_file(inode, file, fmode);
184 	}
185 	spin_unlock(&ci->i_ceph_lock);
186 
187 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
188 	req = prepare_open_request(inode->i_sb, flags, 0);
189 	if (IS_ERR(req)) {
190 		err = PTR_ERR(req);
191 		goto out;
192 	}
193 	req->r_inode = inode;
194 	ihold(inode);
195 	req->r_num_caps = 1;
196 	if (flags & (O_CREAT|O_TRUNC))
197 		parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
198 	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
199 	iput(parent_inode);
200 	if (!err)
201 		err = ceph_init_file(inode, file, req->r_fmode);
202 	ceph_mdsc_put_request(req);
203 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
204 out:
205 	return err;
206 }
207 
208 
209 /*
210  * Do a lookup + open with a single request.
211  *
212  * If this succeeds, but some subsequent check in the vfs
213  * may_open() fails, the struct *file gets cleaned up (i.e.
214  * ceph_release gets called).  So fear not!
215  */
216 int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
217 		     struct file *file, unsigned flags, umode_t mode,
218 		     int *opened)
219 {
220 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
221 	struct ceph_mds_client *mdsc = fsc->mdsc;
222 	struct ceph_mds_request *req;
223 	struct dentry *ret;
224 	int err;
225 
226 	dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
227 	     dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
228 
229 	/* do the open */
230 	req = prepare_open_request(dir->i_sb, flags, mode);
231 	if (IS_ERR(req))
232 		return PTR_ERR(req);
233 	req->r_dentry = dget(dentry);
234 	req->r_num_caps = 2;
235 	if (flags & O_CREAT) {
236 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
237 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
238 	}
239 	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
240 	err = ceph_mdsc_do_request(mdsc,
241 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
242 				   req);
243 	err = ceph_handle_snapdir(req, dentry, err);
244 	if (err)
245 		goto out;
246 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
247 		err = ceph_handle_notrace_create(dir, dentry);
248 	if (err)
249 		goto out;
250 	err = finish_open(file, req->r_dentry, ceph_open, opened);
251 out:
252 	ret = ceph_finish_lookup(req, dentry, err);
253 	ceph_mdsc_put_request(req);
254 	dout("ceph_lookup_open result=%p\n", ret);
255 
256 	if (IS_ERR(ret))
257 		return PTR_ERR(ret);
258 
259 	dput(ret);
260 	return err;
261 }
262 
263 int ceph_release(struct inode *inode, struct file *file)
264 {
265 	struct ceph_inode_info *ci = ceph_inode(inode);
266 	struct ceph_file_info *cf = file->private_data;
267 
268 	dout("release inode %p file %p\n", inode, file);
269 	ceph_put_fmode(ci, cf->fmode);
270 	if (cf->last_readdir)
271 		ceph_mdsc_put_request(cf->last_readdir);
272 	kfree(cf->last_name);
273 	kfree(cf->dir_info);
274 	dput(cf->dentry);
275 	kmem_cache_free(ceph_file_cachep, cf);
276 
277 	/* wake up anyone waiting for caps on this inode */
278 	wake_up_all(&ci->i_cap_wq);
279 	return 0;
280 }
281 
282 /*
283  * Read a range of bytes striped over one or more objects.  Iterate over
284  * objects we stripe over.  (That's not atomic, but good enough for now.)
285  *
286  * If we get a short result from the OSD, check against i_size; we need to
287  * only return a short read to the caller if we hit EOF.
288  */
289 static int striped_read(struct inode *inode,
290 			u64 off, u64 len,
291 			struct page **pages, int num_pages,
292 			int *checkeof, bool o_direct,
293 			unsigned long buf_align)
294 {
295 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
296 	struct ceph_inode_info *ci = ceph_inode(inode);
297 	u64 pos, this_len;
298 	int io_align, page_align;
299 	int left, pages_left;
300 	int read;
301 	struct page **page_pos;
302 	int ret;
303 	bool hit_stripe, was_short;
304 
305 	/*
306 	 * we may need to do multiple reads.  not atomic, unfortunately.
307 	 */
308 	pos = off;
309 	left = len;
310 	page_pos = pages;
311 	pages_left = num_pages;
312 	read = 0;
313 	io_align = off & ~PAGE_MASK;
314 
315 more:
316 	if (o_direct)
317 		page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
318 	else
319 		page_align = pos & ~PAGE_MASK;
320 	this_len = left;
321 	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
322 				  &ci->i_layout, pos, &this_len,
323 				  ci->i_truncate_seq,
324 				  ci->i_truncate_size,
325 				  page_pos, pages_left, page_align);
326 	if (ret == -ENOENT)
327 		ret = 0;
328 	hit_stripe = this_len < left;
329 	was_short = ret >= 0 && ret < this_len;
330 	dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
331 	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
332 
333 	if (ret > 0) {
334 		int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
335 
336 		if (read < pos - off) {
337 			dout(" zero gap %llu to %llu\n", off + read, pos);
338 			ceph_zero_page_vector_range(page_align + read,
339 						    pos - off - read, pages);
340 		}
341 		pos += ret;
342 		read = pos - off;
343 		left -= ret;
344 		page_pos += didpages;
345 		pages_left -= didpages;
346 
347 		/* hit stripe? */
348 		if (left && hit_stripe)
349 			goto more;
350 	}
351 
352 	if (was_short) {
353 		/* did we bounce off eof? */
354 		if (pos + left > inode->i_size)
355 			*checkeof = 1;
356 
357 		/* zero trailing bytes (inside i_size) */
358 		if (left > 0 && pos < inode->i_size) {
359 			if (pos + left > inode->i_size)
360 				left = inode->i_size - pos;
361 
362 			dout("zero tail %d\n", left);
363 			ceph_zero_page_vector_range(page_align + read, left,
364 						    pages);
365 			read += left;
366 		}
367 	}
368 
369 	if (ret >= 0)
370 		ret = read;
371 	dout("striped_read returns %d\n", ret);
372 	return ret;
373 }
374 
375 /*
376  * Completely synchronous read and write methods.  Direct from __user
377  * buffer to osd, or directly to user pages (if O_DIRECT).
378  *
379  * If the read spans object boundary, just do multiple reads.
380  */
381 static ssize_t ceph_sync_read(struct file *file, char __user *data,
382 			      unsigned len, loff_t *poff, int *checkeof)
383 {
384 	struct inode *inode = file->f_dentry->d_inode;
385 	struct page **pages;
386 	u64 off = *poff;
387 	int num_pages, ret;
388 
389 	dout("sync_read on file %p %llu~%u %s\n", file, off, len,
390 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
391 
392 	if (file->f_flags & O_DIRECT) {
393 		num_pages = calc_pages_for((unsigned long)data, len);
394 		pages = ceph_get_direct_page_vector(data, num_pages, true);
395 	} else {
396 		num_pages = calc_pages_for(off, len);
397 		pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
398 	}
399 	if (IS_ERR(pages))
400 		return PTR_ERR(pages);
401 
402 	/*
403 	 * flush any page cache pages in this range.  this
404 	 * will make concurrent normal and sync io slow,
405 	 * but it will at least behave sensibly when they are
406 	 * in sequence.
407 	 */
408 	ret = filemap_write_and_wait(inode->i_mapping);
409 	if (ret < 0)
410 		goto done;
411 
412 	ret = striped_read(inode, off, len, pages, num_pages, checkeof,
413 			   file->f_flags & O_DIRECT,
414 			   (unsigned long)data & ~PAGE_MASK);
415 
416 	if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
417 		ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
418 	if (ret >= 0)
419 		*poff = off + ret;
420 
421 done:
422 	if (file->f_flags & O_DIRECT)
423 		ceph_put_page_vector(pages, num_pages, true);
424 	else
425 		ceph_release_page_vector(pages, num_pages);
426 	dout("sync_read result %d\n", ret);
427 	return ret;
428 }
429 
430 /*
431  * Write commit callback, called if we requested both an ACK and
432  * ONDISK commit reply from the OSD.
433  */
434 static void sync_write_commit(struct ceph_osd_request *req,
435 			      struct ceph_msg *msg)
436 {
437 	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
438 
439 	dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
440 	spin_lock(&ci->i_unsafe_lock);
441 	list_del_init(&req->r_unsafe_item);
442 	spin_unlock(&ci->i_unsafe_lock);
443 	ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
444 }
445 
446 /*
447  * Synchronous write, straight from __user pointer or user pages (if
448  * O_DIRECT).
449  *
450  * If write spans object boundary, just do multiple writes.  (For a
451  * correct atomic write, we should e.g. take write locks on all
452  * objects, rollback on failure, etc.)
453  */
454 static ssize_t ceph_sync_write(struct file *file, const char __user *data,
455 			       size_t left, loff_t *offset)
456 {
457 	struct inode *inode = file->f_dentry->d_inode;
458 	struct ceph_inode_info *ci = ceph_inode(inode);
459 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
460 	struct ceph_osd_request *req;
461 	struct page **pages;
462 	int num_pages;
463 	long long unsigned pos;
464 	u64 len;
465 	int written = 0;
466 	int flags;
467 	int do_sync = 0;
468 	int check_caps = 0;
469 	int page_align, io_align;
470 	unsigned long buf_align;
471 	int ret;
472 	struct timespec mtime = CURRENT_TIME;
473 
474 	if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
475 		return -EROFS;
476 
477 	dout("sync_write on file %p %lld~%u %s\n", file, *offset,
478 	     (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
479 
480 	if (file->f_flags & O_APPEND)
481 		pos = i_size_read(inode);
482 	else
483 		pos = *offset;
484 
485 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
486 	if (ret < 0)
487 		return ret;
488 
489 	ret = invalidate_inode_pages2_range(inode->i_mapping,
490 					    pos >> PAGE_CACHE_SHIFT,
491 					    (pos + left) >> PAGE_CACHE_SHIFT);
492 	if (ret < 0)
493 		dout("invalidate_inode_pages2_range returned %d\n", ret);
494 
495 	flags = CEPH_OSD_FLAG_ORDERSNAP |
496 		CEPH_OSD_FLAG_ONDISK |
497 		CEPH_OSD_FLAG_WRITE;
498 	if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
499 		flags |= CEPH_OSD_FLAG_ACK;
500 	else
501 		do_sync = 1;
502 
503 	/*
504 	 * we may need to do multiple writes here if we span an object
505 	 * boundary.  this isn't atomic, unfortunately.  :(
506 	 */
507 more:
508 	io_align = pos & ~PAGE_MASK;
509 	buf_align = (unsigned long)data & ~PAGE_MASK;
510 	len = left;
511 	if (file->f_flags & O_DIRECT) {
512 		/* write from beginning of first page, regardless of
513 		   io alignment */
514 		page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
515 		num_pages = calc_pages_for((unsigned long)data, len);
516 	} else {
517 		page_align = pos & ~PAGE_MASK;
518 		num_pages = calc_pages_for(pos, len);
519 	}
520 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
521 				    ceph_vino(inode), pos, &len,
522 				    CEPH_OSD_OP_WRITE, flags,
523 				    ci->i_snap_realm->cached_context,
524 				    do_sync,
525 				    ci->i_truncate_seq, ci->i_truncate_size,
526 				    &mtime, false, 2, page_align);
527 	if (!req)
528 		return -ENOMEM;
529 
530 	if (file->f_flags & O_DIRECT) {
531 		pages = ceph_get_direct_page_vector(data, num_pages, false);
532 		if (IS_ERR(pages)) {
533 			ret = PTR_ERR(pages);
534 			goto out;
535 		}
536 
537 		/*
538 		 * throw out any page cache pages in this range. this
539 		 * may block.
540 		 */
541 		truncate_inode_pages_range(inode->i_mapping, pos,
542 					   (pos+len) | (PAGE_CACHE_SIZE-1));
543 	} else {
544 		pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
545 		if (IS_ERR(pages)) {
546 			ret = PTR_ERR(pages);
547 			goto out;
548 		}
549 		ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
550 		if (ret < 0) {
551 			ceph_release_page_vector(pages, num_pages);
552 			goto out;
553 		}
554 
555 		if ((file->f_flags & O_SYNC) == 0) {
556 			/* get a second commit callback */
557 			req->r_safe_callback = sync_write_commit;
558 			req->r_own_pages = 1;
559 		}
560 	}
561 	req->r_pages = pages;
562 	req->r_num_pages = num_pages;
563 	req->r_inode = inode;
564 
565 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
566 	if (!ret) {
567 		if (req->r_safe_callback) {
568 			/*
569 			 * Add to inode unsafe list only after we
570 			 * start_request so that a tid has been assigned.
571 			 */
572 			spin_lock(&ci->i_unsafe_lock);
573 			list_add_tail(&req->r_unsafe_item,
574 				      &ci->i_unsafe_writes);
575 			spin_unlock(&ci->i_unsafe_lock);
576 			ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
577 		}
578 
579 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
580 		if (ret < 0 && req->r_safe_callback) {
581 			spin_lock(&ci->i_unsafe_lock);
582 			list_del_init(&req->r_unsafe_item);
583 			spin_unlock(&ci->i_unsafe_lock);
584 			ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
585 		}
586 	}
587 
588 	if (file->f_flags & O_DIRECT)
589 		ceph_put_page_vector(pages, num_pages, false);
590 	else if (file->f_flags & O_SYNC)
591 		ceph_release_page_vector(pages, num_pages);
592 
593 out:
594 	ceph_osdc_put_request(req);
595 	if (ret == 0) {
596 		pos += len;
597 		written += len;
598 		left -= len;
599 		data += written;
600 		if (left)
601 			goto more;
602 
603 		ret = written;
604 		*offset = pos;
605 		if (pos > i_size_read(inode))
606 			check_caps = ceph_inode_set_size(inode, pos);
607 		if (check_caps)
608 			ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
609 					NULL);
610 	}
611 	return ret;
612 }
613 
614 /*
615  * Wrap generic_file_aio_read with checks for cap bits on the inode.
616  * Atomically grab references, so that those bits are not released
617  * back to the MDS mid-read.
618  *
619  * Hmm, the sync read case isn't actually async... should it be?
620  */
621 static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
622 			     unsigned long nr_segs, loff_t pos)
623 {
624 	struct file *filp = iocb->ki_filp;
625 	struct ceph_file_info *fi = filp->private_data;
626 	loff_t *ppos = &iocb->ki_pos;
627 	size_t len = iov->iov_len;
628 	struct inode *inode = filp->f_dentry->d_inode;
629 	struct ceph_inode_info *ci = ceph_inode(inode);
630 	void __user *base = iov->iov_base;
631 	ssize_t ret;
632 	int want, got = 0;
633 	int checkeof = 0, read = 0;
634 
635 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
636 	     inode, ceph_vinop(inode), pos, (unsigned)len, inode);
637 again:
638 	__ceph_do_pending_vmtruncate(inode);
639 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
640 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
641 	else
642 		want = CEPH_CAP_FILE_CACHE;
643 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
644 	if (ret < 0)
645 		goto out;
646 	dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
647 	     inode, ceph_vinop(inode), pos, (unsigned)len,
648 	     ceph_cap_string(got));
649 
650 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
651 	    (iocb->ki_filp->f_flags & O_DIRECT) ||
652 	    (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
653 	    (fi->flags & CEPH_F_SYNC))
654 		/* hmm, this isn't really async... */
655 		ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
656 	else
657 		ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
658 
659 out:
660 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
661 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
662 	ceph_put_cap_refs(ci, got);
663 
664 	if (checkeof && ret >= 0) {
665 		int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
666 
667 		/* hit EOF or hole? */
668 		if (statret == 0 && *ppos < inode->i_size) {
669 			dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
670 			read += ret;
671 			base += ret;
672 			len -= ret;
673 			checkeof = 0;
674 			goto again;
675 		}
676 	}
677 	if (ret >= 0)
678 		ret += read;
679 
680 	return ret;
681 }
682 
683 /*
684  * Take cap references to avoid releasing caps to MDS mid-write.
685  *
686  * If we are synchronous, and write with an old snap context, the OSD
687  * may return EOLDSNAPC.  In that case, retry the write.. _after_
688  * dropping our cap refs and allowing the pending snap to logically
689  * complete _before_ this write occurs.
690  *
691  * If we are near ENOSPC, write synchronously.
692  */
693 static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
694 		       unsigned long nr_segs, loff_t pos)
695 {
696 	struct file *file = iocb->ki_filp;
697 	struct ceph_file_info *fi = file->private_data;
698 	struct inode *inode = file->f_dentry->d_inode;
699 	struct ceph_inode_info *ci = ceph_inode(inode);
700 	struct ceph_osd_client *osdc =
701 		&ceph_sb_to_client(inode->i_sb)->client->osdc;
702 	loff_t endoff = pos + iov->iov_len;
703 	int want, got = 0;
704 	int ret, err;
705 
706 	if (ceph_snap(inode) != CEPH_NOSNAP)
707 		return -EROFS;
708 
709 retry_snap:
710 	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
711 		return -ENOSPC;
712 	__ceph_do_pending_vmtruncate(inode);
713 	dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
714 	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
715 	     inode->i_size);
716 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
717 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
718 	else
719 		want = CEPH_CAP_FILE_BUFFER;
720 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
721 	if (ret < 0)
722 		goto out_put;
723 
724 	dout("aio_write %p %llx.%llx %llu~%u  got cap refs on %s\n",
725 	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
726 	     ceph_cap_string(got));
727 
728 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
729 	    (iocb->ki_filp->f_flags & O_DIRECT) ||
730 	    (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
731 	    (fi->flags & CEPH_F_SYNC)) {
732 		ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
733 			&iocb->ki_pos);
734 	} else {
735 		/*
736 		 * buffered write; drop Fw early to avoid slow
737 		 * revocation if we get stuck on balance_dirty_pages
738 		 */
739 		int dirty;
740 
741 		spin_lock(&ci->i_ceph_lock);
742 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
743 		spin_unlock(&ci->i_ceph_lock);
744 		ceph_put_cap_refs(ci, got);
745 
746 		ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
747 		if ((ret >= 0 || ret == -EIOCBQUEUED) &&
748 		    ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
749 		     || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
750 			err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
751 			if (err < 0)
752 				ret = err;
753 		}
754 
755 		if (dirty)
756 			__mark_inode_dirty(inode, dirty);
757 		goto out;
758 	}
759 
760 	if (ret >= 0) {
761 		int dirty;
762 		spin_lock(&ci->i_ceph_lock);
763 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
764 		spin_unlock(&ci->i_ceph_lock);
765 		if (dirty)
766 			__mark_inode_dirty(inode, dirty);
767 	}
768 
769 out_put:
770 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
771 	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
772 	     ceph_cap_string(got));
773 	ceph_put_cap_refs(ci, got);
774 
775 out:
776 	if (ret == -EOLDSNAPC) {
777 		dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
778 		     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
779 		goto retry_snap;
780 	}
781 
782 	return ret;
783 }
784 
785 /*
786  * llseek.  be sure to verify file size on SEEK_END.
787  */
788 static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
789 {
790 	struct inode *inode = file->f_mapping->host;
791 	int ret;
792 
793 	mutex_lock(&inode->i_mutex);
794 	__ceph_do_pending_vmtruncate(inode);
795 
796 	if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
797 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
798 		if (ret < 0) {
799 			offset = ret;
800 			goto out;
801 		}
802 	}
803 
804 	switch (origin) {
805 	case SEEK_END:
806 		offset += inode->i_size;
807 		break;
808 	case SEEK_CUR:
809 		/*
810 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
811 		 * position-querying operation.  Avoid rewriting the "same"
812 		 * f_pos value back to the file because a concurrent read(),
813 		 * write() or lseek() might have altered it
814 		 */
815 		if (offset == 0) {
816 			offset = file->f_pos;
817 			goto out;
818 		}
819 		offset += file->f_pos;
820 		break;
821 	case SEEK_DATA:
822 		if (offset >= inode->i_size) {
823 			ret = -ENXIO;
824 			goto out;
825 		}
826 		break;
827 	case SEEK_HOLE:
828 		if (offset >= inode->i_size) {
829 			ret = -ENXIO;
830 			goto out;
831 		}
832 		offset = inode->i_size;
833 		break;
834 	}
835 
836 	if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
837 		offset = -EINVAL;
838 		goto out;
839 	}
840 
841 	/* Special lock needed here? */
842 	if (offset != file->f_pos) {
843 		file->f_pos = offset;
844 		file->f_version = 0;
845 	}
846 
847 out:
848 	mutex_unlock(&inode->i_mutex);
849 	return offset;
850 }
851 
852 const struct file_operations ceph_file_fops = {
853 	.open = ceph_open,
854 	.release = ceph_release,
855 	.llseek = ceph_llseek,
856 	.read = do_sync_read,
857 	.write = do_sync_write,
858 	.aio_read = ceph_aio_read,
859 	.aio_write = ceph_aio_write,
860 	.mmap = ceph_mmap,
861 	.fsync = ceph_fsync,
862 	.lock = ceph_lock,
863 	.flock = ceph_flock,
864 	.splice_read = generic_file_splice_read,
865 	.splice_write = generic_file_splice_write,
866 	.unlocked_ioctl = ceph_ioctl,
867 	.compat_ioctl	= ceph_ioctl,
868 };
869 
870