xref: /linux/fs/ceph/caps.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 #include "ceph_debug.h"
2 
3 #include <linux/fs.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
10 
11 #include "super.h"
12 #include "decode.h"
13 #include "messenger.h"
14 
15 /*
16  * Capability management
17  *
18  * The Ceph metadata servers control client access to inode metadata
19  * and file data by issuing capabilities, granting clients permission
20  * to read and/or write both inode field and file data to OSDs
21  * (storage nodes).  Each capability consists of a set of bits
22  * indicating which operations are allowed.
23  *
24  * If the client holds a *_SHARED cap, the client has a coherent value
25  * that can be safely read from the cached inode.
26  *
27  * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
28  * client is allowed to change inode attributes (e.g., file size,
29  * mtime), note its dirty state in the ceph_cap, and asynchronously
30  * flush that metadata change to the MDS.
31  *
32  * In the event of a conflicting operation (perhaps by another
33  * client), the MDS will revoke the conflicting client capabilities.
34  *
35  * In order for a client to cache an inode, it must hold a capability
36  * with at least one MDS server.  When inodes are released, release
37  * notifications are batched and periodically sent en masse to the MDS
38  * cluster to release server state.
39  */
40 
41 
42 /*
43  * Generate readable cap strings for debugging output.
44  */
45 #define MAX_CAP_STR 20
46 static char cap_str[MAX_CAP_STR][40];
47 static DEFINE_SPINLOCK(cap_str_lock);
48 static int last_cap_str;
49 
50 static char *gcap_string(char *s, int c)
51 {
52 	if (c & CEPH_CAP_GSHARED)
53 		*s++ = 's';
54 	if (c & CEPH_CAP_GEXCL)
55 		*s++ = 'x';
56 	if (c & CEPH_CAP_GCACHE)
57 		*s++ = 'c';
58 	if (c & CEPH_CAP_GRD)
59 		*s++ = 'r';
60 	if (c & CEPH_CAP_GWR)
61 		*s++ = 'w';
62 	if (c & CEPH_CAP_GBUFFER)
63 		*s++ = 'b';
64 	if (c & CEPH_CAP_GLAZYIO)
65 		*s++ = 'l';
66 	return s;
67 }
68 
69 const char *ceph_cap_string(int caps)
70 {
71 	int i;
72 	char *s;
73 	int c;
74 
75 	spin_lock(&cap_str_lock);
76 	i = last_cap_str++;
77 	if (last_cap_str == MAX_CAP_STR)
78 		last_cap_str = 0;
79 	spin_unlock(&cap_str_lock);
80 
81 	s = cap_str[i];
82 
83 	if (caps & CEPH_CAP_PIN)
84 		*s++ = 'p';
85 
86 	c = (caps >> CEPH_CAP_SAUTH) & 3;
87 	if (c) {
88 		*s++ = 'A';
89 		s = gcap_string(s, c);
90 	}
91 
92 	c = (caps >> CEPH_CAP_SLINK) & 3;
93 	if (c) {
94 		*s++ = 'L';
95 		s = gcap_string(s, c);
96 	}
97 
98 	c = (caps >> CEPH_CAP_SXATTR) & 3;
99 	if (c) {
100 		*s++ = 'X';
101 		s = gcap_string(s, c);
102 	}
103 
104 	c = caps >> CEPH_CAP_SFILE;
105 	if (c) {
106 		*s++ = 'F';
107 		s = gcap_string(s, c);
108 	}
109 
110 	if (s == cap_str[i])
111 		*s++ = '-';
112 	*s = 0;
113 	return cap_str[i];
114 }
115 
116 /*
117  * Cap reservations
118  *
119  * Maintain a global pool of preallocated struct ceph_caps, referenced
120  * by struct ceph_caps_reservations.  This ensures that we preallocate
121  * memory needed to successfully process an MDS response.  (If an MDS
122  * sends us cap information and we fail to process it, we will have
123  * problems due to the client and MDS being out of sync.)
124  *
125  * Reservations are 'owned' by a ceph_cap_reservation context.
126  */
127 static spinlock_t caps_list_lock;
128 static struct list_head caps_list;  /* unused (reserved or unreserved) */
129 static int caps_total_count;        /* total caps allocated */
130 static int caps_use_count;          /* in use */
131 static int caps_reserve_count;      /* unused, reserved */
132 static int caps_avail_count;        /* unused, unreserved */
133 static int caps_min_count;          /* keep at least this many (unreserved) */
134 
135 void __init ceph_caps_init(void)
136 {
137 	INIT_LIST_HEAD(&caps_list);
138 	spin_lock_init(&caps_list_lock);
139 }
140 
141 void ceph_caps_finalize(void)
142 {
143 	struct ceph_cap *cap;
144 
145 	spin_lock(&caps_list_lock);
146 	while (!list_empty(&caps_list)) {
147 		cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
148 		list_del(&cap->caps_item);
149 		kmem_cache_free(ceph_cap_cachep, cap);
150 	}
151 	caps_total_count = 0;
152 	caps_avail_count = 0;
153 	caps_use_count = 0;
154 	caps_reserve_count = 0;
155 	caps_min_count = 0;
156 	spin_unlock(&caps_list_lock);
157 }
158 
159 void ceph_adjust_min_caps(int delta)
160 {
161 	spin_lock(&caps_list_lock);
162 	caps_min_count += delta;
163 	BUG_ON(caps_min_count < 0);
164 	spin_unlock(&caps_list_lock);
165 }
166 
167 int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
168 {
169 	int i;
170 	struct ceph_cap *cap;
171 	int have;
172 	int alloc = 0;
173 	LIST_HEAD(newcaps);
174 	int ret = 0;
175 
176 	dout("reserve caps ctx=%p need=%d\n", ctx, need);
177 
178 	/* first reserve any caps that are already allocated */
179 	spin_lock(&caps_list_lock);
180 	if (caps_avail_count >= need)
181 		have = need;
182 	else
183 		have = caps_avail_count;
184 	caps_avail_count -= have;
185 	caps_reserve_count += have;
186 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
187 	       caps_avail_count);
188 	spin_unlock(&caps_list_lock);
189 
190 	for (i = have; i < need; i++) {
191 		cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
192 		if (!cap) {
193 			ret = -ENOMEM;
194 			goto out_alloc_count;
195 		}
196 		list_add(&cap->caps_item, &newcaps);
197 		alloc++;
198 	}
199 	BUG_ON(have + alloc != need);
200 
201 	spin_lock(&caps_list_lock);
202 	caps_total_count += alloc;
203 	caps_reserve_count += alloc;
204 	list_splice(&newcaps, &caps_list);
205 
206 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
207 	       caps_avail_count);
208 	spin_unlock(&caps_list_lock);
209 
210 	ctx->count = need;
211 	dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
212 	     ctx, caps_total_count, caps_use_count, caps_reserve_count,
213 	     caps_avail_count);
214 	return 0;
215 
216 out_alloc_count:
217 	/* we didn't manage to reserve as much as we needed */
218 	pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
219 		   ctx, need, have);
220 	return ret;
221 }
222 
223 int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
224 {
225 	dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
226 	if (ctx->count) {
227 		spin_lock(&caps_list_lock);
228 		BUG_ON(caps_reserve_count < ctx->count);
229 		caps_reserve_count -= ctx->count;
230 		caps_avail_count += ctx->count;
231 		ctx->count = 0;
232 		dout("unreserve caps %d = %d used + %d resv + %d avail\n",
233 		     caps_total_count, caps_use_count, caps_reserve_count,
234 		     caps_avail_count);
235 		BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
236 		       caps_avail_count);
237 		spin_unlock(&caps_list_lock);
238 	}
239 	return 0;
240 }
241 
242 static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
243 {
244 	struct ceph_cap *cap = NULL;
245 
246 	/* temporary, until we do something about cap import/export */
247 	if (!ctx)
248 		return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
249 
250 	spin_lock(&caps_list_lock);
251 	dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
252 	     ctx, ctx->count, caps_total_count, caps_use_count,
253 	     caps_reserve_count, caps_avail_count);
254 	BUG_ON(!ctx->count);
255 	BUG_ON(ctx->count > caps_reserve_count);
256 	BUG_ON(list_empty(&caps_list));
257 
258 	ctx->count--;
259 	caps_reserve_count--;
260 	caps_use_count++;
261 
262 	cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
263 	list_del(&cap->caps_item);
264 
265 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
266 	       caps_avail_count);
267 	spin_unlock(&caps_list_lock);
268 	return cap;
269 }
270 
271 void ceph_put_cap(struct ceph_cap *cap)
272 {
273 	spin_lock(&caps_list_lock);
274 	dout("put_cap %p %d = %d used + %d resv + %d avail\n",
275 	     cap, caps_total_count, caps_use_count,
276 	     caps_reserve_count, caps_avail_count);
277 	caps_use_count--;
278 	/*
279 	 * Keep some preallocated caps around (ceph_min_count), to
280 	 * avoid lots of free/alloc churn.
281 	 */
282 	if (caps_avail_count >= caps_reserve_count + caps_min_count) {
283 		caps_total_count--;
284 		kmem_cache_free(ceph_cap_cachep, cap);
285 	} else {
286 		caps_avail_count++;
287 		list_add(&cap->caps_item, &caps_list);
288 	}
289 
290 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
291 	       caps_avail_count);
292 	spin_unlock(&caps_list_lock);
293 }
294 
295 void ceph_reservation_status(struct ceph_client *client,
296 			     int *total, int *avail, int *used, int *reserved,
297 			     int *min)
298 {
299 	if (total)
300 		*total = caps_total_count;
301 	if (avail)
302 		*avail = caps_avail_count;
303 	if (used)
304 		*used = caps_use_count;
305 	if (reserved)
306 		*reserved = caps_reserve_count;
307 	if (min)
308 		*min = caps_min_count;
309 }
310 
311 /*
312  * Find ceph_cap for given mds, if any.
313  *
314  * Called with i_lock held.
315  */
316 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317 {
318 	struct ceph_cap *cap;
319 	struct rb_node *n = ci->i_caps.rb_node;
320 
321 	while (n) {
322 		cap = rb_entry(n, struct ceph_cap, ci_node);
323 		if (mds < cap->mds)
324 			n = n->rb_left;
325 		else if (mds > cap->mds)
326 			n = n->rb_right;
327 		else
328 			return cap;
329 	}
330 	return NULL;
331 }
332 
333 /*
334  * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
335  * -1.
336  */
337 static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
338 {
339 	struct ceph_cap *cap;
340 	int mds = -1;
341 	struct rb_node *p;
342 
343 	/* prefer mds with WR|WRBUFFER|EXCL caps */
344 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
345 		cap = rb_entry(p, struct ceph_cap, ci_node);
346 		mds = cap->mds;
347 		if (mseq)
348 			*mseq = cap->mseq;
349 		if (cap->issued & (CEPH_CAP_FILE_WR |
350 				   CEPH_CAP_FILE_BUFFER |
351 				   CEPH_CAP_FILE_EXCL))
352 			break;
353 	}
354 	return mds;
355 }
356 
357 int ceph_get_cap_mds(struct inode *inode)
358 {
359 	int mds;
360 	spin_lock(&inode->i_lock);
361 	mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
362 	spin_unlock(&inode->i_lock);
363 	return mds;
364 }
365 
366 /*
367  * Called under i_lock.
368  */
369 static void __insert_cap_node(struct ceph_inode_info *ci,
370 			      struct ceph_cap *new)
371 {
372 	struct rb_node **p = &ci->i_caps.rb_node;
373 	struct rb_node *parent = NULL;
374 	struct ceph_cap *cap = NULL;
375 
376 	while (*p) {
377 		parent = *p;
378 		cap = rb_entry(parent, struct ceph_cap, ci_node);
379 		if (new->mds < cap->mds)
380 			p = &(*p)->rb_left;
381 		else if (new->mds > cap->mds)
382 			p = &(*p)->rb_right;
383 		else
384 			BUG();
385 	}
386 
387 	rb_link_node(&new->ci_node, parent, p);
388 	rb_insert_color(&new->ci_node, &ci->i_caps);
389 }
390 
391 /*
392  * (re)set cap hold timeouts, which control the delayed release
393  * of unused caps back to the MDS.  Should be called on cap use.
394  */
395 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
396 			       struct ceph_inode_info *ci)
397 {
398 	struct ceph_mount_args *ma = mdsc->client->mount_args;
399 
400 	ci->i_hold_caps_min = round_jiffies(jiffies +
401 					    ma->caps_wanted_delay_min * HZ);
402 	ci->i_hold_caps_max = round_jiffies(jiffies +
403 					    ma->caps_wanted_delay_max * HZ);
404 	dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
405 	     ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
406 }
407 
408 /*
409  * (Re)queue cap at the end of the delayed cap release list.
410  *
411  * If I_FLUSH is set, leave the inode at the front of the list.
412  *
413  * Caller holds i_lock
414  *    -> we take mdsc->cap_delay_lock
415  */
416 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
417 				struct ceph_inode_info *ci)
418 {
419 	__cap_set_timeouts(mdsc, ci);
420 	dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
421 	     ci->i_ceph_flags, ci->i_hold_caps_max);
422 	if (!mdsc->stopping) {
423 		spin_lock(&mdsc->cap_delay_lock);
424 		if (!list_empty(&ci->i_cap_delay_list)) {
425 			if (ci->i_ceph_flags & CEPH_I_FLUSH)
426 				goto no_change;
427 			list_del_init(&ci->i_cap_delay_list);
428 		}
429 		list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
430 no_change:
431 		spin_unlock(&mdsc->cap_delay_lock);
432 	}
433 }
434 
435 /*
436  * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
437  * indicating we should send a cap message to flush dirty metadata
438  * asap, and move to the front of the delayed cap list.
439  */
440 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
441 				      struct ceph_inode_info *ci)
442 {
443 	dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
444 	spin_lock(&mdsc->cap_delay_lock);
445 	ci->i_ceph_flags |= CEPH_I_FLUSH;
446 	if (!list_empty(&ci->i_cap_delay_list))
447 		list_del_init(&ci->i_cap_delay_list);
448 	list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
449 	spin_unlock(&mdsc->cap_delay_lock);
450 }
451 
452 /*
453  * Cancel delayed work on cap.
454  *
455  * Caller must hold i_lock.
456  */
457 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
458 			       struct ceph_inode_info *ci)
459 {
460 	dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
461 	if (list_empty(&ci->i_cap_delay_list))
462 		return;
463 	spin_lock(&mdsc->cap_delay_lock);
464 	list_del_init(&ci->i_cap_delay_list);
465 	spin_unlock(&mdsc->cap_delay_lock);
466 }
467 
468 /*
469  * Common issue checks for add_cap, handle_cap_grant.
470  */
471 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
472 			      unsigned issued)
473 {
474 	unsigned had = __ceph_caps_issued(ci, NULL);
475 
476 	/*
477 	 * Each time we receive FILE_CACHE anew, we increment
478 	 * i_rdcache_gen.
479 	 */
480 	if ((issued & CEPH_CAP_FILE_CACHE) &&
481 	    (had & CEPH_CAP_FILE_CACHE) == 0)
482 		ci->i_rdcache_gen++;
483 
484 	/*
485 	 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
486 	 * don't know what happened to this directory while we didn't
487 	 * have the cap.
488 	 */
489 	if ((issued & CEPH_CAP_FILE_SHARED) &&
490 	    (had & CEPH_CAP_FILE_SHARED) == 0) {
491 		ci->i_shared_gen++;
492 		if (S_ISDIR(ci->vfs_inode.i_mode)) {
493 			dout(" marking %p NOT complete\n", &ci->vfs_inode);
494 			ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
495 		}
496 	}
497 }
498 
499 /*
500  * Add a capability under the given MDS session.
501  *
502  * Caller should hold session snap_rwsem (read) and s_mutex.
503  *
504  * @fmode is the open file mode, if we are opening a file, otherwise
505  * it is < 0.  (This is so we can atomically add the cap and add an
506  * open file reference to it.)
507  */
508 int ceph_add_cap(struct inode *inode,
509 		 struct ceph_mds_session *session, u64 cap_id,
510 		 int fmode, unsigned issued, unsigned wanted,
511 		 unsigned seq, unsigned mseq, u64 realmino, int flags,
512 		 struct ceph_cap_reservation *caps_reservation)
513 {
514 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
515 	struct ceph_inode_info *ci = ceph_inode(inode);
516 	struct ceph_cap *new_cap = NULL;
517 	struct ceph_cap *cap;
518 	int mds = session->s_mds;
519 	int actual_wanted;
520 
521 	dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
522 	     session->s_mds, cap_id, ceph_cap_string(issued), seq);
523 
524 	/*
525 	 * If we are opening the file, include file mode wanted bits
526 	 * in wanted.
527 	 */
528 	if (fmode >= 0)
529 		wanted |= ceph_caps_for_mode(fmode);
530 
531 retry:
532 	spin_lock(&inode->i_lock);
533 	cap = __get_cap_for_mds(ci, mds);
534 	if (!cap) {
535 		if (new_cap) {
536 			cap = new_cap;
537 			new_cap = NULL;
538 		} else {
539 			spin_unlock(&inode->i_lock);
540 			new_cap = get_cap(caps_reservation);
541 			if (new_cap == NULL)
542 				return -ENOMEM;
543 			goto retry;
544 		}
545 
546 		cap->issued = 0;
547 		cap->implemented = 0;
548 		cap->mds = mds;
549 		cap->mds_wanted = 0;
550 
551 		cap->ci = ci;
552 		__insert_cap_node(ci, cap);
553 
554 		/* clear out old exporting info?  (i.e. on cap import) */
555 		if (ci->i_cap_exporting_mds == mds) {
556 			ci->i_cap_exporting_issued = 0;
557 			ci->i_cap_exporting_mseq = 0;
558 			ci->i_cap_exporting_mds = -1;
559 		}
560 
561 		/* add to session cap list */
562 		cap->session = session;
563 		spin_lock(&session->s_cap_lock);
564 		list_add_tail(&cap->session_caps, &session->s_caps);
565 		session->s_nr_caps++;
566 		spin_unlock(&session->s_cap_lock);
567 	}
568 
569 	if (!ci->i_snap_realm) {
570 		/*
571 		 * add this inode to the appropriate snap realm
572 		 */
573 		struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
574 							       realmino);
575 		if (realm) {
576 			ceph_get_snap_realm(mdsc, realm);
577 			spin_lock(&realm->inodes_with_caps_lock);
578 			ci->i_snap_realm = realm;
579 			list_add(&ci->i_snap_realm_item,
580 				 &realm->inodes_with_caps);
581 			spin_unlock(&realm->inodes_with_caps_lock);
582 		} else {
583 			pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
584 			       realmino);
585 		}
586 	}
587 
588 	__check_cap_issue(ci, cap, issued);
589 
590 	/*
591 	 * If we are issued caps we don't want, or the mds' wanted
592 	 * value appears to be off, queue a check so we'll release
593 	 * later and/or update the mds wanted value.
594 	 */
595 	actual_wanted = __ceph_caps_wanted(ci);
596 	if ((wanted & ~actual_wanted) ||
597 	    (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
598 		dout(" issued %s, mds wanted %s, actual %s, queueing\n",
599 		     ceph_cap_string(issued), ceph_cap_string(wanted),
600 		     ceph_cap_string(actual_wanted));
601 		__cap_delay_requeue(mdsc, ci);
602 	}
603 
604 	if (flags & CEPH_CAP_FLAG_AUTH)
605 		ci->i_auth_cap = cap;
606 	else if (ci->i_auth_cap == cap)
607 		ci->i_auth_cap = NULL;
608 
609 	dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
610 	     inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
611 	     ceph_cap_string(issued|cap->issued), seq, mds);
612 	cap->cap_id = cap_id;
613 	cap->issued = issued;
614 	cap->implemented |= issued;
615 	cap->mds_wanted |= wanted;
616 	cap->seq = seq;
617 	cap->issue_seq = seq;
618 	cap->mseq = mseq;
619 	cap->cap_gen = session->s_cap_gen;
620 
621 	if (fmode >= 0)
622 		__ceph_get_fmode(ci, fmode);
623 	spin_unlock(&inode->i_lock);
624 	wake_up(&ci->i_cap_wq);
625 	return 0;
626 }
627 
628 /*
629  * Return true if cap has not timed out and belongs to the current
630  * generation of the MDS session (i.e. has not gone 'stale' due to
631  * us losing touch with the mds).
632  */
633 static int __cap_is_valid(struct ceph_cap *cap)
634 {
635 	unsigned long ttl;
636 	u32 gen;
637 
638 	spin_lock(&cap->session->s_cap_lock);
639 	gen = cap->session->s_cap_gen;
640 	ttl = cap->session->s_cap_ttl;
641 	spin_unlock(&cap->session->s_cap_lock);
642 
643 	if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
644 		dout("__cap_is_valid %p cap %p issued %s "
645 		     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
646 		     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
647 		return 0;
648 	}
649 
650 	return 1;
651 }
652 
653 /*
654  * Return set of valid cap bits issued to us.  Note that caps time
655  * out, and may be invalidated in bulk if the client session times out
656  * and session->s_cap_gen is bumped.
657  */
658 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
659 {
660 	int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
661 	struct ceph_cap *cap;
662 	struct rb_node *p;
663 
664 	if (implemented)
665 		*implemented = 0;
666 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
667 		cap = rb_entry(p, struct ceph_cap, ci_node);
668 		if (!__cap_is_valid(cap))
669 			continue;
670 		dout("__ceph_caps_issued %p cap %p issued %s\n",
671 		     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
672 		have |= cap->issued;
673 		if (implemented)
674 			*implemented |= cap->implemented;
675 	}
676 	return have;
677 }
678 
679 /*
680  * Get cap bits issued by caps other than @ocap
681  */
682 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
683 {
684 	int have = ci->i_snap_caps;
685 	struct ceph_cap *cap;
686 	struct rb_node *p;
687 
688 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
689 		cap = rb_entry(p, struct ceph_cap, ci_node);
690 		if (cap == ocap)
691 			continue;
692 		if (!__cap_is_valid(cap))
693 			continue;
694 		have |= cap->issued;
695 	}
696 	return have;
697 }
698 
699 /*
700  * Move a cap to the end of the LRU (oldest caps at list head, newest
701  * at list tail).
702  */
703 static void __touch_cap(struct ceph_cap *cap)
704 {
705 	struct ceph_mds_session *s = cap->session;
706 
707 	spin_lock(&s->s_cap_lock);
708 	if (s->s_cap_iterator == NULL) {
709 		dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
710 		     s->s_mds);
711 		list_move_tail(&cap->session_caps, &s->s_caps);
712 	} else {
713 		dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
714 		     &cap->ci->vfs_inode, cap, s->s_mds);
715 	}
716 	spin_unlock(&s->s_cap_lock);
717 }
718 
719 /*
720  * Check if we hold the given mask.  If so, move the cap(s) to the
721  * front of their respective LRUs.  (This is the preferred way for
722  * callers to check for caps they want.)
723  */
724 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
725 {
726 	struct ceph_cap *cap;
727 	struct rb_node *p;
728 	int have = ci->i_snap_caps;
729 
730 	if ((have & mask) == mask) {
731 		dout("__ceph_caps_issued_mask %p snap issued %s"
732 		     " (mask %s)\n", &ci->vfs_inode,
733 		     ceph_cap_string(have),
734 		     ceph_cap_string(mask));
735 		return 1;
736 	}
737 
738 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
739 		cap = rb_entry(p, struct ceph_cap, ci_node);
740 		if (!__cap_is_valid(cap))
741 			continue;
742 		if ((cap->issued & mask) == mask) {
743 			dout("__ceph_caps_issued_mask %p cap %p issued %s"
744 			     " (mask %s)\n", &ci->vfs_inode, cap,
745 			     ceph_cap_string(cap->issued),
746 			     ceph_cap_string(mask));
747 			if (touch)
748 				__touch_cap(cap);
749 			return 1;
750 		}
751 
752 		/* does a combination of caps satisfy mask? */
753 		have |= cap->issued;
754 		if ((have & mask) == mask) {
755 			dout("__ceph_caps_issued_mask %p combo issued %s"
756 			     " (mask %s)\n", &ci->vfs_inode,
757 			     ceph_cap_string(cap->issued),
758 			     ceph_cap_string(mask));
759 			if (touch) {
760 				struct rb_node *q;
761 
762 				/* touch this + preceeding caps */
763 				__touch_cap(cap);
764 				for (q = rb_first(&ci->i_caps); q != p;
765 				     q = rb_next(q)) {
766 					cap = rb_entry(q, struct ceph_cap,
767 						       ci_node);
768 					if (!__cap_is_valid(cap))
769 						continue;
770 					__touch_cap(cap);
771 				}
772 			}
773 			return 1;
774 		}
775 	}
776 
777 	return 0;
778 }
779 
780 /*
781  * Return true if mask caps are currently being revoked by an MDS.
782  */
783 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
784 {
785 	struct inode *inode = &ci->vfs_inode;
786 	struct ceph_cap *cap;
787 	struct rb_node *p;
788 	int ret = 0;
789 
790 	spin_lock(&inode->i_lock);
791 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
792 		cap = rb_entry(p, struct ceph_cap, ci_node);
793 		if (__cap_is_valid(cap) &&
794 		    (cap->implemented & ~cap->issued & mask)) {
795 			ret = 1;
796 			break;
797 		}
798 	}
799 	spin_unlock(&inode->i_lock);
800 	dout("ceph_caps_revoking %p %s = %d\n", inode,
801 	     ceph_cap_string(mask), ret);
802 	return ret;
803 }
804 
805 int __ceph_caps_used(struct ceph_inode_info *ci)
806 {
807 	int used = 0;
808 	if (ci->i_pin_ref)
809 		used |= CEPH_CAP_PIN;
810 	if (ci->i_rd_ref)
811 		used |= CEPH_CAP_FILE_RD;
812 	if (ci->i_rdcache_ref || ci->i_rdcache_gen)
813 		used |= CEPH_CAP_FILE_CACHE;
814 	if (ci->i_wr_ref)
815 		used |= CEPH_CAP_FILE_WR;
816 	if (ci->i_wrbuffer_ref)
817 		used |= CEPH_CAP_FILE_BUFFER;
818 	return used;
819 }
820 
821 /*
822  * wanted, by virtue of open file modes
823  */
824 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
825 {
826 	int want = 0;
827 	int mode;
828 	for (mode = 0; mode < 4; mode++)
829 		if (ci->i_nr_by_mode[mode])
830 			want |= ceph_caps_for_mode(mode);
831 	return want;
832 }
833 
834 /*
835  * Return caps we have registered with the MDS(s) as 'wanted'.
836  */
837 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
838 {
839 	struct ceph_cap *cap;
840 	struct rb_node *p;
841 	int mds_wanted = 0;
842 
843 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
844 		cap = rb_entry(p, struct ceph_cap, ci_node);
845 		if (!__cap_is_valid(cap))
846 			continue;
847 		mds_wanted |= cap->mds_wanted;
848 	}
849 	return mds_wanted;
850 }
851 
852 /*
853  * called under i_lock
854  */
855 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
856 {
857 	return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
858 }
859 
860 /*
861  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
862  *
863  * caller should hold i_lock.
864  * caller will not hold session s_mutex if called from destroy_inode.
865  */
866 void __ceph_remove_cap(struct ceph_cap *cap)
867 {
868 	struct ceph_mds_session *session = cap->session;
869 	struct ceph_inode_info *ci = cap->ci;
870 	struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
871 	int removed = 0;
872 
873 	dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
874 
875 	/* remove from session list */
876 	spin_lock(&session->s_cap_lock);
877 	if (session->s_cap_iterator == cap) {
878 		/* not yet, we are iterating over this very cap */
879 		dout("__ceph_remove_cap  delaying %p removal from session %p\n",
880 		     cap, cap->session);
881 	} else {
882 		list_del_init(&cap->session_caps);
883 		session->s_nr_caps--;
884 		cap->session = NULL;
885 		removed = 1;
886 	}
887 	/* protect backpointer with s_cap_lock: see iterate_session_caps */
888 	cap->ci = NULL;
889 	spin_unlock(&session->s_cap_lock);
890 
891 	/* remove from inode list */
892 	rb_erase(&cap->ci_node, &ci->i_caps);
893 	if (ci->i_auth_cap == cap)
894 		ci->i_auth_cap = NULL;
895 
896 	if (removed)
897 		ceph_put_cap(cap);
898 
899 	if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
900 		struct ceph_snap_realm *realm = ci->i_snap_realm;
901 		spin_lock(&realm->inodes_with_caps_lock);
902 		list_del_init(&ci->i_snap_realm_item);
903 		ci->i_snap_realm_counter++;
904 		ci->i_snap_realm = NULL;
905 		spin_unlock(&realm->inodes_with_caps_lock);
906 		ceph_put_snap_realm(mdsc, realm);
907 	}
908 	if (!__ceph_is_any_real_caps(ci))
909 		__cap_delay_cancel(mdsc, ci);
910 }
911 
912 /*
913  * Build and send a cap message to the given MDS.
914  *
915  * Caller should be holding s_mutex.
916  */
917 static int send_cap_msg(struct ceph_mds_session *session,
918 			u64 ino, u64 cid, int op,
919 			int caps, int wanted, int dirty,
920 			u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
921 			u64 size, u64 max_size,
922 			struct timespec *mtime, struct timespec *atime,
923 			u64 time_warp_seq,
924 			uid_t uid, gid_t gid, mode_t mode,
925 			u64 xattr_version,
926 			struct ceph_buffer *xattrs_buf,
927 			u64 follows)
928 {
929 	struct ceph_mds_caps *fc;
930 	struct ceph_msg *msg;
931 
932 	dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
933 	     " seq %u/%u mseq %u follows %lld size %llu/%llu"
934 	     " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
935 	     cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
936 	     ceph_cap_string(dirty),
937 	     seq, issue_seq, mseq, follows, size, max_size,
938 	     xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
939 
940 	msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
941 	if (IS_ERR(msg))
942 		return PTR_ERR(msg);
943 
944 	msg->hdr.tid = cpu_to_le64(flush_tid);
945 
946 	fc = msg->front.iov_base;
947 	memset(fc, 0, sizeof(*fc));
948 
949 	fc->cap_id = cpu_to_le64(cid);
950 	fc->op = cpu_to_le32(op);
951 	fc->seq = cpu_to_le32(seq);
952 	fc->issue_seq = cpu_to_le32(issue_seq);
953 	fc->migrate_seq = cpu_to_le32(mseq);
954 	fc->caps = cpu_to_le32(caps);
955 	fc->wanted = cpu_to_le32(wanted);
956 	fc->dirty = cpu_to_le32(dirty);
957 	fc->ino = cpu_to_le64(ino);
958 	fc->snap_follows = cpu_to_le64(follows);
959 
960 	fc->size = cpu_to_le64(size);
961 	fc->max_size = cpu_to_le64(max_size);
962 	if (mtime)
963 		ceph_encode_timespec(&fc->mtime, mtime);
964 	if (atime)
965 		ceph_encode_timespec(&fc->atime, atime);
966 	fc->time_warp_seq = cpu_to_le32(time_warp_seq);
967 
968 	fc->uid = cpu_to_le32(uid);
969 	fc->gid = cpu_to_le32(gid);
970 	fc->mode = cpu_to_le32(mode);
971 
972 	fc->xattr_version = cpu_to_le64(xattr_version);
973 	if (xattrs_buf) {
974 		msg->middle = ceph_buffer_get(xattrs_buf);
975 		fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
976 		msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
977 	}
978 
979 	ceph_con_send(&session->s_con, msg);
980 	return 0;
981 }
982 
983 /*
984  * Queue cap releases when an inode is dropped from our cache.  Since
985  * inode is about to be destroyed, there is no need for i_lock.
986  */
987 void ceph_queue_caps_release(struct inode *inode)
988 {
989 	struct ceph_inode_info *ci = ceph_inode(inode);
990 	struct rb_node *p;
991 
992 	p = rb_first(&ci->i_caps);
993 	while (p) {
994 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
995 		struct ceph_mds_session *session = cap->session;
996 		struct ceph_msg *msg;
997 		struct ceph_mds_cap_release *head;
998 		struct ceph_mds_cap_item *item;
999 
1000 		spin_lock(&session->s_cap_lock);
1001 		BUG_ON(!session->s_num_cap_releases);
1002 		msg = list_first_entry(&session->s_cap_releases,
1003 				       struct ceph_msg, list_head);
1004 
1005 		dout(" adding %p release to mds%d msg %p (%d left)\n",
1006 		     inode, session->s_mds, msg, session->s_num_cap_releases);
1007 
1008 		BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1009 		head = msg->front.iov_base;
1010 		head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1011 		item = msg->front.iov_base + msg->front.iov_len;
1012 		item->ino = cpu_to_le64(ceph_ino(inode));
1013 		item->cap_id = cpu_to_le64(cap->cap_id);
1014 		item->migrate_seq = cpu_to_le32(cap->mseq);
1015 		item->seq = cpu_to_le32(cap->issue_seq);
1016 
1017 		session->s_num_cap_releases--;
1018 
1019 		msg->front.iov_len += sizeof(*item);
1020 		if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1021 			dout(" release msg %p full\n", msg);
1022 			list_move_tail(&msg->list_head,
1023 				       &session->s_cap_releases_done);
1024 		} else {
1025 			dout(" release msg %p at %d/%d (%d)\n", msg,
1026 			     (int)le32_to_cpu(head->num),
1027 			     (int)CEPH_CAPS_PER_RELEASE,
1028 			     (int)msg->front.iov_len);
1029 		}
1030 		spin_unlock(&session->s_cap_lock);
1031 		p = rb_next(p);
1032 		__ceph_remove_cap(cap);
1033 	}
1034 }
1035 
1036 /*
1037  * Send a cap msg on the given inode.  Update our caps state, then
1038  * drop i_lock and send the message.
1039  *
1040  * Make note of max_size reported/requested from mds, revoked caps
1041  * that have now been implemented.
1042  *
1043  * Make half-hearted attempt ot to invalidate page cache if we are
1044  * dropping RDCACHE.  Note that this will leave behind locked pages
1045  * that we'll then need to deal with elsewhere.
1046  *
1047  * Return non-zero if delayed release, or we experienced an error
1048  * such that the caller should requeue + retry later.
1049  *
1050  * called with i_lock, then drops it.
1051  * caller should hold snap_rwsem (read), s_mutex.
1052  */
1053 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1054 		      int op, int used, int want, int retain, int flushing,
1055 		      unsigned *pflush_tid)
1056 	__releases(cap->ci->vfs_inode->i_lock)
1057 {
1058 	struct ceph_inode_info *ci = cap->ci;
1059 	struct inode *inode = &ci->vfs_inode;
1060 	u64 cap_id = cap->cap_id;
1061 	int held, revoking, dropping, keep;
1062 	u64 seq, issue_seq, mseq, time_warp_seq, follows;
1063 	u64 size, max_size;
1064 	struct timespec mtime, atime;
1065 	int wake = 0;
1066 	mode_t mode;
1067 	uid_t uid;
1068 	gid_t gid;
1069 	struct ceph_mds_session *session;
1070 	u64 xattr_version = 0;
1071 	int delayed = 0;
1072 	u64 flush_tid = 0;
1073 	int i;
1074 	int ret;
1075 
1076 	held = cap->issued | cap->implemented;
1077 	revoking = cap->implemented & ~cap->issued;
1078 	retain &= ~revoking;
1079 	dropping = cap->issued & ~retain;
1080 
1081 	dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1082 	     inode, cap, cap->session,
1083 	     ceph_cap_string(held), ceph_cap_string(held & retain),
1084 	     ceph_cap_string(revoking));
1085 	BUG_ON((retain & CEPH_CAP_PIN) == 0);
1086 
1087 	session = cap->session;
1088 
1089 	/* don't release wanted unless we've waited a bit. */
1090 	if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1091 	    time_before(jiffies, ci->i_hold_caps_min)) {
1092 		dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1093 		     ceph_cap_string(cap->issued),
1094 		     ceph_cap_string(cap->issued & retain),
1095 		     ceph_cap_string(cap->mds_wanted),
1096 		     ceph_cap_string(want));
1097 		want |= cap->mds_wanted;
1098 		retain |= cap->issued;
1099 		delayed = 1;
1100 	}
1101 	ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1102 
1103 	cap->issued &= retain;  /* drop bits we don't want */
1104 	if (cap->implemented & ~cap->issued) {
1105 		/*
1106 		 * Wake up any waiters on wanted -> needed transition.
1107 		 * This is due to the weird transition from buffered
1108 		 * to sync IO... we need to flush dirty pages _before_
1109 		 * allowing sync writes to avoid reordering.
1110 		 */
1111 		wake = 1;
1112 	}
1113 	cap->implemented &= cap->issued | used;
1114 	cap->mds_wanted = want;
1115 
1116 	if (flushing) {
1117 		/*
1118 		 * assign a tid for flush operations so we can avoid
1119 		 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1120 		 * clean type races.  track latest tid for every bit
1121 		 * so we can handle flush AxFw, flush Fw, and have the
1122 		 * first ack clean Ax.
1123 		 */
1124 		flush_tid = ++ci->i_cap_flush_last_tid;
1125 		if (pflush_tid)
1126 			*pflush_tid = flush_tid;
1127 		dout(" cap_flush_tid %d\n", (int)flush_tid);
1128 		for (i = 0; i < CEPH_CAP_BITS; i++)
1129 			if (flushing & (1 << i))
1130 				ci->i_cap_flush_tid[i] = flush_tid;
1131 	}
1132 
1133 	keep = cap->implemented;
1134 	seq = cap->seq;
1135 	issue_seq = cap->issue_seq;
1136 	mseq = cap->mseq;
1137 	size = inode->i_size;
1138 	ci->i_reported_size = size;
1139 	max_size = ci->i_wanted_max_size;
1140 	ci->i_requested_max_size = max_size;
1141 	mtime = inode->i_mtime;
1142 	atime = inode->i_atime;
1143 	time_warp_seq = ci->i_time_warp_seq;
1144 	follows = ci->i_snap_realm->cached_context->seq;
1145 	uid = inode->i_uid;
1146 	gid = inode->i_gid;
1147 	mode = inode->i_mode;
1148 
1149 	if (dropping & CEPH_CAP_XATTR_EXCL) {
1150 		__ceph_build_xattrs_blob(ci);
1151 		xattr_version = ci->i_xattrs.version + 1;
1152 	}
1153 
1154 	spin_unlock(&inode->i_lock);
1155 
1156 	ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1157 		op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1158 		size, max_size, &mtime, &atime, time_warp_seq,
1159 		uid, gid, mode,
1160 		xattr_version,
1161 		(flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1162 		follows);
1163 	if (ret < 0) {
1164 		dout("error sending cap msg, must requeue %p\n", inode);
1165 		delayed = 1;
1166 	}
1167 
1168 	if (wake)
1169 		wake_up(&ci->i_cap_wq);
1170 
1171 	return delayed;
1172 }
1173 
1174 /*
1175  * When a snapshot is taken, clients accumulate dirty metadata on
1176  * inodes with capabilities in ceph_cap_snaps to describe the file
1177  * state at the time the snapshot was taken.  This must be flushed
1178  * asynchronously back to the MDS once sync writes complete and dirty
1179  * data is written out.
1180  *
1181  * Called under i_lock.  Takes s_mutex as needed.
1182  */
1183 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1184 			struct ceph_mds_session **psession)
1185 {
1186 	struct inode *inode = &ci->vfs_inode;
1187 	int mds;
1188 	struct ceph_cap_snap *capsnap;
1189 	u32 mseq;
1190 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1191 	struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1192 						    session->s_mutex */
1193 	u64 next_follows = 0;  /* keep track of how far we've gotten through the
1194 			     i_cap_snaps list, and skip these entries next time
1195 			     around to avoid an infinite loop */
1196 
1197 	if (psession)
1198 		session = *psession;
1199 
1200 	dout("__flush_snaps %p\n", inode);
1201 retry:
1202 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1203 		/* avoid an infiniute loop after retry */
1204 		if (capsnap->follows < next_follows)
1205 			continue;
1206 		/*
1207 		 * we need to wait for sync writes to complete and for dirty
1208 		 * pages to be written out.
1209 		 */
1210 		if (capsnap->dirty_pages || capsnap->writing)
1211 			continue;
1212 
1213 		/*
1214 		 * if cap writeback already occurred, we should have dropped
1215 		 * the capsnap in ceph_put_wrbuffer_cap_refs.
1216 		 */
1217 		BUG_ON(capsnap->dirty == 0);
1218 
1219 		/* pick mds, take s_mutex */
1220 		mds = __ceph_get_cap_mds(ci, &mseq);
1221 		if (session && session->s_mds != mds) {
1222 			dout("oops, wrong session %p mutex\n", session);
1223 			mutex_unlock(&session->s_mutex);
1224 			ceph_put_mds_session(session);
1225 			session = NULL;
1226 		}
1227 		if (!session) {
1228 			spin_unlock(&inode->i_lock);
1229 			mutex_lock(&mdsc->mutex);
1230 			session = __ceph_lookup_mds_session(mdsc, mds);
1231 			mutex_unlock(&mdsc->mutex);
1232 			if (session) {
1233 				dout("inverting session/ino locks on %p\n",
1234 				     session);
1235 				mutex_lock(&session->s_mutex);
1236 			}
1237 			/*
1238 			 * if session == NULL, we raced against a cap
1239 			 * deletion.  retry, and we'll get a better
1240 			 * @mds value next time.
1241 			 */
1242 			spin_lock(&inode->i_lock);
1243 			goto retry;
1244 		}
1245 
1246 		capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1247 		atomic_inc(&capsnap->nref);
1248 		if (!list_empty(&capsnap->flushing_item))
1249 			list_del_init(&capsnap->flushing_item);
1250 		list_add_tail(&capsnap->flushing_item,
1251 			      &session->s_cap_snaps_flushing);
1252 		spin_unlock(&inode->i_lock);
1253 
1254 		dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
1255 		     inode, capsnap, next_follows, capsnap->size);
1256 		send_cap_msg(session, ceph_vino(inode).ino, 0,
1257 			     CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1258 			     capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1259 			     capsnap->size, 0,
1260 			     &capsnap->mtime, &capsnap->atime,
1261 			     capsnap->time_warp_seq,
1262 			     capsnap->uid, capsnap->gid, capsnap->mode,
1263 			     0, NULL,
1264 			     capsnap->follows);
1265 
1266 		next_follows = capsnap->follows + 1;
1267 		ceph_put_cap_snap(capsnap);
1268 
1269 		spin_lock(&inode->i_lock);
1270 		goto retry;
1271 	}
1272 
1273 	/* we flushed them all; remove this inode from the queue */
1274 	spin_lock(&mdsc->snap_flush_lock);
1275 	list_del_init(&ci->i_snap_flush_item);
1276 	spin_unlock(&mdsc->snap_flush_lock);
1277 
1278 	if (psession)
1279 		*psession = session;
1280 	else if (session) {
1281 		mutex_unlock(&session->s_mutex);
1282 		ceph_put_mds_session(session);
1283 	}
1284 }
1285 
1286 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1287 {
1288 	struct inode *inode = &ci->vfs_inode;
1289 
1290 	spin_lock(&inode->i_lock);
1291 	__ceph_flush_snaps(ci, NULL);
1292 	spin_unlock(&inode->i_lock);
1293 }
1294 
1295 /*
1296  * Mark caps dirty.  If inode is newly dirty, add to the global dirty
1297  * list.
1298  */
1299 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1300 {
1301 	struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
1302 	struct inode *inode = &ci->vfs_inode;
1303 	int was = ci->i_dirty_caps;
1304 	int dirty = 0;
1305 
1306 	dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1307 	     ceph_cap_string(mask), ceph_cap_string(was),
1308 	     ceph_cap_string(was | mask));
1309 	ci->i_dirty_caps |= mask;
1310 	if (was == 0) {
1311 		dout(" inode %p now dirty\n", &ci->vfs_inode);
1312 		BUG_ON(!list_empty(&ci->i_dirty_item));
1313 		spin_lock(&mdsc->cap_dirty_lock);
1314 		list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1315 		spin_unlock(&mdsc->cap_dirty_lock);
1316 		if (ci->i_flushing_caps == 0) {
1317 			igrab(inode);
1318 			dirty |= I_DIRTY_SYNC;
1319 		}
1320 	}
1321 	BUG_ON(list_empty(&ci->i_dirty_item));
1322 	if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1323 	    (mask & CEPH_CAP_FILE_BUFFER))
1324 		dirty |= I_DIRTY_DATASYNC;
1325 	if (dirty)
1326 		__mark_inode_dirty(inode, dirty);
1327 	__cap_delay_requeue(mdsc, ci);
1328 }
1329 
1330 /*
1331  * Add dirty inode to the flushing list.  Assigned a seq number so we
1332  * can wait for caps to flush without starving.
1333  *
1334  * Called under i_lock.
1335  */
1336 static int __mark_caps_flushing(struct inode *inode,
1337 				 struct ceph_mds_session *session)
1338 {
1339 	struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1340 	struct ceph_inode_info *ci = ceph_inode(inode);
1341 	int flushing;
1342 
1343 	BUG_ON(ci->i_dirty_caps == 0);
1344 	BUG_ON(list_empty(&ci->i_dirty_item));
1345 
1346 	flushing = ci->i_dirty_caps;
1347 	dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1348 	     ceph_cap_string(flushing),
1349 	     ceph_cap_string(ci->i_flushing_caps),
1350 	     ceph_cap_string(ci->i_flushing_caps | flushing));
1351 	ci->i_flushing_caps |= flushing;
1352 	ci->i_dirty_caps = 0;
1353 	dout(" inode %p now !dirty\n", inode);
1354 
1355 	spin_lock(&mdsc->cap_dirty_lock);
1356 	list_del_init(&ci->i_dirty_item);
1357 
1358 	ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1359 	if (list_empty(&ci->i_flushing_item)) {
1360 		list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1361 		mdsc->num_cap_flushing++;
1362 		dout(" inode %p now flushing seq %lld\n", inode,
1363 		     ci->i_cap_flush_seq);
1364 	} else {
1365 		list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1366 		dout(" inode %p now flushing (more) seq %lld\n", inode,
1367 		     ci->i_cap_flush_seq);
1368 	}
1369 	spin_unlock(&mdsc->cap_dirty_lock);
1370 
1371 	return flushing;
1372 }
1373 
1374 /*
1375  * try to invalidate mapping pages without blocking.
1376  */
1377 static int mapping_is_empty(struct address_space *mapping)
1378 {
1379 	struct page *page = find_get_page(mapping, 0);
1380 
1381 	if (!page)
1382 		return 1;
1383 
1384 	put_page(page);
1385 	return 0;
1386 }
1387 
1388 static int try_nonblocking_invalidate(struct inode *inode)
1389 {
1390 	struct ceph_inode_info *ci = ceph_inode(inode);
1391 	u32 invalidating_gen = ci->i_rdcache_gen;
1392 
1393 	spin_unlock(&inode->i_lock);
1394 	invalidate_mapping_pages(&inode->i_data, 0, -1);
1395 	spin_lock(&inode->i_lock);
1396 
1397 	if (mapping_is_empty(&inode->i_data) &&
1398 	    invalidating_gen == ci->i_rdcache_gen) {
1399 		/* success. */
1400 		dout("try_nonblocking_invalidate %p success\n", inode);
1401 		ci->i_rdcache_gen = 0;
1402 		ci->i_rdcache_revoking = 0;
1403 		return 0;
1404 	}
1405 	dout("try_nonblocking_invalidate %p failed\n", inode);
1406 	return -1;
1407 }
1408 
1409 /*
1410  * Swiss army knife function to examine currently used and wanted
1411  * versus held caps.  Release, flush, ack revoked caps to mds as
1412  * appropriate.
1413  *
1414  *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1415  *    cap release further.
1416  *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1417  *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1418  *    further delay.
1419  */
1420 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1421 		     struct ceph_mds_session *session)
1422 	__releases(session->s_mutex)
1423 {
1424 	struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1425 	struct ceph_mds_client *mdsc = &client->mdsc;
1426 	struct inode *inode = &ci->vfs_inode;
1427 	struct ceph_cap *cap;
1428 	int file_wanted, used;
1429 	int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1430 	int issued, implemented, want, retain, revoking, flushing = 0;
1431 	int mds = -1;   /* keep track of how far we've gone through i_caps list
1432 			   to avoid an infinite loop on retry */
1433 	struct rb_node *p;
1434 	int tried_invalidate = 0;
1435 	int delayed = 0, sent = 0, force_requeue = 0, num;
1436 	int queue_invalidate = 0;
1437 	int is_delayed = flags & CHECK_CAPS_NODELAY;
1438 
1439 	/* if we are unmounting, flush any unused caps immediately. */
1440 	if (mdsc->stopping)
1441 		is_delayed = 1;
1442 
1443 	spin_lock(&inode->i_lock);
1444 
1445 	if (ci->i_ceph_flags & CEPH_I_FLUSH)
1446 		flags |= CHECK_CAPS_FLUSH;
1447 
1448 	/* flush snaps first time around only */
1449 	if (!list_empty(&ci->i_cap_snaps))
1450 		__ceph_flush_snaps(ci, &session);
1451 	goto retry_locked;
1452 retry:
1453 	spin_lock(&inode->i_lock);
1454 retry_locked:
1455 	file_wanted = __ceph_caps_file_wanted(ci);
1456 	used = __ceph_caps_used(ci);
1457 	want = file_wanted | used;
1458 	issued = __ceph_caps_issued(ci, &implemented);
1459 	revoking = implemented & ~issued;
1460 
1461 	retain = want | CEPH_CAP_PIN;
1462 	if (!mdsc->stopping && inode->i_nlink > 0) {
1463 		if (want) {
1464 			retain |= CEPH_CAP_ANY;       /* be greedy */
1465 		} else {
1466 			retain |= CEPH_CAP_ANY_SHARED;
1467 			/*
1468 			 * keep RD only if we didn't have the file open RW,
1469 			 * because then the mds would revoke it anyway to
1470 			 * journal max_size=0.
1471 			 */
1472 			if (ci->i_max_size == 0)
1473 				retain |= CEPH_CAP_ANY_RD;
1474 		}
1475 	}
1476 
1477 	dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1478 	     " issued %s revoking %s retain %s %s%s%s\n", inode,
1479 	     ceph_cap_string(file_wanted),
1480 	     ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1481 	     ceph_cap_string(ci->i_flushing_caps),
1482 	     ceph_cap_string(issued), ceph_cap_string(revoking),
1483 	     ceph_cap_string(retain),
1484 	     (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1485 	     (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1486 	     (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1487 
1488 	/*
1489 	 * If we no longer need to hold onto old our caps, and we may
1490 	 * have cached pages, but don't want them, then try to invalidate.
1491 	 * If we fail, it's because pages are locked.... try again later.
1492 	 */
1493 	if ((!is_delayed || mdsc->stopping) &&
1494 	    ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
1495 	    ci->i_rdcache_gen &&                     /* may have cached pages */
1496 	    (file_wanted == 0 ||                     /* no open files */
1497 	     (revoking & CEPH_CAP_FILE_CACHE)) &&     /*  or revoking cache */
1498 	    !tried_invalidate) {
1499 		dout("check_caps trying to invalidate on %p\n", inode);
1500 		if (try_nonblocking_invalidate(inode) < 0) {
1501 			if (revoking & CEPH_CAP_FILE_CACHE) {
1502 				dout("check_caps queuing invalidate\n");
1503 				queue_invalidate = 1;
1504 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
1505 			} else {
1506 				dout("check_caps failed to invalidate pages\n");
1507 				/* we failed to invalidate pages.  check these
1508 				   caps again later. */
1509 				force_requeue = 1;
1510 				__cap_set_timeouts(mdsc, ci);
1511 			}
1512 		}
1513 		tried_invalidate = 1;
1514 		goto retry_locked;
1515 	}
1516 
1517 	num = 0;
1518 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1519 		cap = rb_entry(p, struct ceph_cap, ci_node);
1520 		num++;
1521 
1522 		/* avoid looping forever */
1523 		if (mds >= cap->mds ||
1524 		    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1525 			continue;
1526 
1527 		/* NOTE: no side-effects allowed, until we take s_mutex */
1528 
1529 		revoking = cap->implemented & ~cap->issued;
1530 		if (revoking)
1531 			dout(" mds%d revoking %s\n", cap->mds,
1532 			     ceph_cap_string(revoking));
1533 
1534 		if (cap == ci->i_auth_cap &&
1535 		    (cap->issued & CEPH_CAP_FILE_WR)) {
1536 			/* request larger max_size from MDS? */
1537 			if (ci->i_wanted_max_size > ci->i_max_size &&
1538 			    ci->i_wanted_max_size > ci->i_requested_max_size) {
1539 				dout("requesting new max_size\n");
1540 				goto ack;
1541 			}
1542 
1543 			/* approaching file_max? */
1544 			if ((inode->i_size << 1) >= ci->i_max_size &&
1545 			    (ci->i_reported_size << 1) < ci->i_max_size) {
1546 				dout("i_size approaching max_size\n");
1547 				goto ack;
1548 			}
1549 		}
1550 		/* flush anything dirty? */
1551 		if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1552 		    ci->i_dirty_caps) {
1553 			dout("flushing dirty caps\n");
1554 			goto ack;
1555 		}
1556 
1557 		/* completed revocation? going down and there are no caps? */
1558 		if (revoking && (revoking & used) == 0) {
1559 			dout("completed revocation of %s\n",
1560 			     ceph_cap_string(cap->implemented & ~cap->issued));
1561 			goto ack;
1562 		}
1563 
1564 		/* want more caps from mds? */
1565 		if (want & ~(cap->mds_wanted | cap->issued))
1566 			goto ack;
1567 
1568 		/* things we might delay */
1569 		if ((cap->issued & ~retain) == 0 &&
1570 		    cap->mds_wanted == want)
1571 			continue;     /* nope, all good */
1572 
1573 		if (is_delayed)
1574 			goto ack;
1575 
1576 		/* delay? */
1577 		if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1578 		    time_before(jiffies, ci->i_hold_caps_max)) {
1579 			dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1580 			     ceph_cap_string(cap->issued),
1581 			     ceph_cap_string(cap->issued & retain),
1582 			     ceph_cap_string(cap->mds_wanted),
1583 			     ceph_cap_string(want));
1584 			delayed++;
1585 			continue;
1586 		}
1587 
1588 ack:
1589 		if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1590 			dout(" skipping %p I_NOFLUSH set\n", inode);
1591 			continue;
1592 		}
1593 
1594 		if (session && session != cap->session) {
1595 			dout("oops, wrong session %p mutex\n", session);
1596 			mutex_unlock(&session->s_mutex);
1597 			session = NULL;
1598 		}
1599 		if (!session) {
1600 			session = cap->session;
1601 			if (mutex_trylock(&session->s_mutex) == 0) {
1602 				dout("inverting session/ino locks on %p\n",
1603 				     session);
1604 				spin_unlock(&inode->i_lock);
1605 				if (took_snap_rwsem) {
1606 					up_read(&mdsc->snap_rwsem);
1607 					took_snap_rwsem = 0;
1608 				}
1609 				mutex_lock(&session->s_mutex);
1610 				goto retry;
1611 			}
1612 		}
1613 		/* take snap_rwsem after session mutex */
1614 		if (!took_snap_rwsem) {
1615 			if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1616 				dout("inverting snap/in locks on %p\n",
1617 				     inode);
1618 				spin_unlock(&inode->i_lock);
1619 				down_read(&mdsc->snap_rwsem);
1620 				took_snap_rwsem = 1;
1621 				goto retry;
1622 			}
1623 			took_snap_rwsem = 1;
1624 		}
1625 
1626 		if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1627 			flushing = __mark_caps_flushing(inode, session);
1628 
1629 		mds = cap->mds;  /* remember mds, so we don't repeat */
1630 		sent++;
1631 
1632 		/* __send_cap drops i_lock */
1633 		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1634 				      retain, flushing, NULL);
1635 		goto retry; /* retake i_lock and restart our cap scan. */
1636 	}
1637 
1638 	/*
1639 	 * Reschedule delayed caps release if we delayed anything,
1640 	 * otherwise cancel.
1641 	 */
1642 	if (delayed && is_delayed)
1643 		force_requeue = 1;   /* __send_cap delayed release; requeue */
1644 	if (!delayed && !is_delayed)
1645 		__cap_delay_cancel(mdsc, ci);
1646 	else if (!is_delayed || force_requeue)
1647 		__cap_delay_requeue(mdsc, ci);
1648 
1649 	spin_unlock(&inode->i_lock);
1650 
1651 	if (queue_invalidate)
1652 		ceph_queue_invalidate(inode);
1653 
1654 	if (session)
1655 		mutex_unlock(&session->s_mutex);
1656 	if (took_snap_rwsem)
1657 		up_read(&mdsc->snap_rwsem);
1658 }
1659 
1660 /*
1661  * Try to flush dirty caps back to the auth mds.
1662  */
1663 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1664 			  unsigned *flush_tid)
1665 {
1666 	struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1667 	struct ceph_inode_info *ci = ceph_inode(inode);
1668 	int unlock_session = session ? 0 : 1;
1669 	int flushing = 0;
1670 
1671 retry:
1672 	spin_lock(&inode->i_lock);
1673 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1674 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1675 		goto out;
1676 	}
1677 	if (ci->i_dirty_caps && ci->i_auth_cap) {
1678 		struct ceph_cap *cap = ci->i_auth_cap;
1679 		int used = __ceph_caps_used(ci);
1680 		int want = __ceph_caps_wanted(ci);
1681 		int delayed;
1682 
1683 		if (!session) {
1684 			spin_unlock(&inode->i_lock);
1685 			session = cap->session;
1686 			mutex_lock(&session->s_mutex);
1687 			goto retry;
1688 		}
1689 		BUG_ON(session != cap->session);
1690 		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1691 			goto out;
1692 
1693 		flushing = __mark_caps_flushing(inode, session);
1694 
1695 		/* __send_cap drops i_lock */
1696 		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1697 				     cap->issued | cap->implemented, flushing,
1698 				     flush_tid);
1699 		if (!delayed)
1700 			goto out_unlocked;
1701 
1702 		spin_lock(&inode->i_lock);
1703 		__cap_delay_requeue(mdsc, ci);
1704 	}
1705 out:
1706 	spin_unlock(&inode->i_lock);
1707 out_unlocked:
1708 	if (session && unlock_session)
1709 		mutex_unlock(&session->s_mutex);
1710 	return flushing;
1711 }
1712 
1713 /*
1714  * Return true if we've flushed caps through the given flush_tid.
1715  */
1716 static int caps_are_flushed(struct inode *inode, unsigned tid)
1717 {
1718 	struct ceph_inode_info *ci = ceph_inode(inode);
1719 	int dirty, i, ret = 1;
1720 
1721 	spin_lock(&inode->i_lock);
1722 	dirty = __ceph_caps_dirty(ci);
1723 	for (i = 0; i < CEPH_CAP_BITS; i++)
1724 		if ((ci->i_flushing_caps & (1 << i)) &&
1725 		    ci->i_cap_flush_tid[i] <= tid) {
1726 			/* still flushing this bit */
1727 			ret = 0;
1728 			break;
1729 		}
1730 	spin_unlock(&inode->i_lock);
1731 	return ret;
1732 }
1733 
1734 /*
1735  * Wait on any unsafe replies for the given inode.  First wait on the
1736  * newest request, and make that the upper bound.  Then, if there are
1737  * more requests, keep waiting on the oldest as long as it is still older
1738  * than the original request.
1739  */
1740 static void sync_write_wait(struct inode *inode)
1741 {
1742 	struct ceph_inode_info *ci = ceph_inode(inode);
1743 	struct list_head *head = &ci->i_unsafe_writes;
1744 	struct ceph_osd_request *req;
1745 	u64 last_tid;
1746 
1747 	spin_lock(&ci->i_unsafe_lock);
1748 	if (list_empty(head))
1749 		goto out;
1750 
1751 	/* set upper bound as _last_ entry in chain */
1752 	req = list_entry(head->prev, struct ceph_osd_request,
1753 			 r_unsafe_item);
1754 	last_tid = req->r_tid;
1755 
1756 	do {
1757 		ceph_osdc_get_request(req);
1758 		spin_unlock(&ci->i_unsafe_lock);
1759 		dout("sync_write_wait on tid %llu (until %llu)\n",
1760 		     req->r_tid, last_tid);
1761 		wait_for_completion(&req->r_safe_completion);
1762 		spin_lock(&ci->i_unsafe_lock);
1763 		ceph_osdc_put_request(req);
1764 
1765 		/*
1766 		 * from here on look at first entry in chain, since we
1767 		 * only want to wait for anything older than last_tid
1768 		 */
1769 		if (list_empty(head))
1770 			break;
1771 		req = list_entry(head->next, struct ceph_osd_request,
1772 				 r_unsafe_item);
1773 	} while (req->r_tid < last_tid);
1774 out:
1775 	spin_unlock(&ci->i_unsafe_lock);
1776 }
1777 
1778 int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
1779 {
1780 	struct inode *inode = dentry->d_inode;
1781 	struct ceph_inode_info *ci = ceph_inode(inode);
1782 	unsigned flush_tid;
1783 	int ret;
1784 	int dirty;
1785 
1786 	dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1787 	sync_write_wait(inode);
1788 
1789 	ret = filemap_write_and_wait(inode->i_mapping);
1790 	if (ret < 0)
1791 		return ret;
1792 
1793 	dirty = try_flush_caps(inode, NULL, &flush_tid);
1794 	dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1795 
1796 	/*
1797 	 * only wait on non-file metadata writeback (the mds
1798 	 * can recover size and mtime, so we don't need to
1799 	 * wait for that)
1800 	 */
1801 	if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1802 		dout("fsync waiting for flush_tid %u\n", flush_tid);
1803 		ret = wait_event_interruptible(ci->i_cap_wq,
1804 				       caps_are_flushed(inode, flush_tid));
1805 	}
1806 
1807 	dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1808 	return ret;
1809 }
1810 
1811 /*
1812  * Flush any dirty caps back to the mds.  If we aren't asked to wait,
1813  * queue inode for flush but don't do so immediately, because we can
1814  * get by with fewer MDS messages if we wait for data writeback to
1815  * complete first.
1816  */
1817 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1818 {
1819 	struct ceph_inode_info *ci = ceph_inode(inode);
1820 	unsigned flush_tid;
1821 	int err = 0;
1822 	int dirty;
1823 	int wait = wbc->sync_mode == WB_SYNC_ALL;
1824 
1825 	dout("write_inode %p wait=%d\n", inode, wait);
1826 	if (wait) {
1827 		dirty = try_flush_caps(inode, NULL, &flush_tid);
1828 		if (dirty)
1829 			err = wait_event_interruptible(ci->i_cap_wq,
1830 				       caps_are_flushed(inode, flush_tid));
1831 	} else {
1832 		struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1833 
1834 		spin_lock(&inode->i_lock);
1835 		if (__ceph_caps_dirty(ci))
1836 			__cap_delay_requeue_front(mdsc, ci);
1837 		spin_unlock(&inode->i_lock);
1838 	}
1839 	return err;
1840 }
1841 
1842 /*
1843  * After a recovering MDS goes active, we need to resend any caps
1844  * we were flushing.
1845  *
1846  * Caller holds session->s_mutex.
1847  */
1848 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1849 				   struct ceph_mds_session *session)
1850 {
1851 	struct ceph_cap_snap *capsnap;
1852 
1853 	dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1854 	list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1855 			    flushing_item) {
1856 		struct ceph_inode_info *ci = capsnap->ci;
1857 		struct inode *inode = &ci->vfs_inode;
1858 		struct ceph_cap *cap;
1859 
1860 		spin_lock(&inode->i_lock);
1861 		cap = ci->i_auth_cap;
1862 		if (cap && cap->session == session) {
1863 			dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1864 			     cap, capsnap);
1865 			__ceph_flush_snaps(ci, &session);
1866 		} else {
1867 			pr_err("%p auth cap %p not mds%d ???\n", inode,
1868 			       cap, session->s_mds);
1869 		}
1870 		spin_unlock(&inode->i_lock);
1871 	}
1872 }
1873 
1874 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1875 			     struct ceph_mds_session *session)
1876 {
1877 	struct ceph_inode_info *ci;
1878 
1879 	kick_flushing_capsnaps(mdsc, session);
1880 
1881 	dout("kick_flushing_caps mds%d\n", session->s_mds);
1882 	list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1883 		struct inode *inode = &ci->vfs_inode;
1884 		struct ceph_cap *cap;
1885 		int delayed = 0;
1886 
1887 		spin_lock(&inode->i_lock);
1888 		cap = ci->i_auth_cap;
1889 		if (cap && cap->session == session) {
1890 			dout("kick_flushing_caps %p cap %p %s\n", inode,
1891 			     cap, ceph_cap_string(ci->i_flushing_caps));
1892 			delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1893 					     __ceph_caps_used(ci),
1894 					     __ceph_caps_wanted(ci),
1895 					     cap->issued | cap->implemented,
1896 					     ci->i_flushing_caps, NULL);
1897 			if (delayed) {
1898 				spin_lock(&inode->i_lock);
1899 				__cap_delay_requeue(mdsc, ci);
1900 				spin_unlock(&inode->i_lock);
1901 			}
1902 		} else {
1903 			pr_err("%p auth cap %p not mds%d ???\n", inode,
1904 			       cap, session->s_mds);
1905 			spin_unlock(&inode->i_lock);
1906 		}
1907 	}
1908 }
1909 
1910 
1911 /*
1912  * Take references to capabilities we hold, so that we don't release
1913  * them to the MDS prematurely.
1914  *
1915  * Protected by i_lock.
1916  */
1917 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1918 {
1919 	if (got & CEPH_CAP_PIN)
1920 		ci->i_pin_ref++;
1921 	if (got & CEPH_CAP_FILE_RD)
1922 		ci->i_rd_ref++;
1923 	if (got & CEPH_CAP_FILE_CACHE)
1924 		ci->i_rdcache_ref++;
1925 	if (got & CEPH_CAP_FILE_WR)
1926 		ci->i_wr_ref++;
1927 	if (got & CEPH_CAP_FILE_BUFFER) {
1928 		if (ci->i_wrbuffer_ref == 0)
1929 			igrab(&ci->vfs_inode);
1930 		ci->i_wrbuffer_ref++;
1931 		dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1932 		     &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1933 	}
1934 }
1935 
1936 /*
1937  * Try to grab cap references.  Specify those refs we @want, and the
1938  * minimal set we @need.  Also include the larger offset we are writing
1939  * to (when applicable), and check against max_size here as well.
1940  * Note that caller is responsible for ensuring max_size increases are
1941  * requested from the MDS.
1942  */
1943 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1944 			    int *got, loff_t endoff, int *check_max, int *err)
1945 {
1946 	struct inode *inode = &ci->vfs_inode;
1947 	int ret = 0;
1948 	int have, implemented;
1949 	int file_wanted;
1950 
1951 	dout("get_cap_refs %p need %s want %s\n", inode,
1952 	     ceph_cap_string(need), ceph_cap_string(want));
1953 	spin_lock(&inode->i_lock);
1954 
1955 	/* make sure file is actually open */
1956 	file_wanted = __ceph_caps_file_wanted(ci);
1957 	if ((file_wanted & need) == 0) {
1958 		dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
1959 		     ceph_cap_string(need), ceph_cap_string(file_wanted));
1960 		*err = -EBADF;
1961 		ret = 1;
1962 		goto out;
1963 	}
1964 
1965 	if (need & CEPH_CAP_FILE_WR) {
1966 		if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
1967 			dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
1968 			     inode, endoff, ci->i_max_size);
1969 			if (endoff > ci->i_wanted_max_size) {
1970 				*check_max = 1;
1971 				ret = 1;
1972 			}
1973 			goto out;
1974 		}
1975 		/*
1976 		 * If a sync write is in progress, we must wait, so that we
1977 		 * can get a final snapshot value for size+mtime.
1978 		 */
1979 		if (__ceph_have_pending_cap_snap(ci)) {
1980 			dout("get_cap_refs %p cap_snap_pending\n", inode);
1981 			goto out;
1982 		}
1983 	}
1984 	have = __ceph_caps_issued(ci, &implemented);
1985 
1986 	/*
1987 	 * disallow writes while a truncate is pending
1988 	 */
1989 	if (ci->i_truncate_pending)
1990 		have &= ~CEPH_CAP_FILE_WR;
1991 
1992 	if ((have & need) == need) {
1993 		/*
1994 		 * Look at (implemented & ~have & not) so that we keep waiting
1995 		 * on transition from wanted -> needed caps.  This is needed
1996 		 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
1997 		 * going before a prior buffered writeback happens.
1998 		 */
1999 		int not = want & ~(have & need);
2000 		int revoking = implemented & ~have;
2001 		dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2002 		     inode, ceph_cap_string(have), ceph_cap_string(not),
2003 		     ceph_cap_string(revoking));
2004 		if ((revoking & not) == 0) {
2005 			*got = need | (have & want);
2006 			__take_cap_refs(ci, *got);
2007 			ret = 1;
2008 		}
2009 	} else {
2010 		dout("get_cap_refs %p have %s needed %s\n", inode,
2011 		     ceph_cap_string(have), ceph_cap_string(need));
2012 	}
2013 out:
2014 	spin_unlock(&inode->i_lock);
2015 	dout("get_cap_refs %p ret %d got %s\n", inode,
2016 	     ret, ceph_cap_string(*got));
2017 	return ret;
2018 }
2019 
2020 /*
2021  * Check the offset we are writing up to against our current
2022  * max_size.  If necessary, tell the MDS we want to write to
2023  * a larger offset.
2024  */
2025 static void check_max_size(struct inode *inode, loff_t endoff)
2026 {
2027 	struct ceph_inode_info *ci = ceph_inode(inode);
2028 	int check = 0;
2029 
2030 	/* do we need to explicitly request a larger max_size? */
2031 	spin_lock(&inode->i_lock);
2032 	if ((endoff >= ci->i_max_size ||
2033 	     endoff > (inode->i_size << 1)) &&
2034 	    endoff > ci->i_wanted_max_size) {
2035 		dout("write %p at large endoff %llu, req max_size\n",
2036 		     inode, endoff);
2037 		ci->i_wanted_max_size = endoff;
2038 		check = 1;
2039 	}
2040 	spin_unlock(&inode->i_lock);
2041 	if (check)
2042 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2043 }
2044 
2045 /*
2046  * Wait for caps, and take cap references.  If we can't get a WR cap
2047  * due to a small max_size, make sure we check_max_size (and possibly
2048  * ask the mds) so we don't get hung up indefinitely.
2049  */
2050 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2051 		  loff_t endoff)
2052 {
2053 	int check_max, ret, err;
2054 
2055 retry:
2056 	if (endoff > 0)
2057 		check_max_size(&ci->vfs_inode, endoff);
2058 	check_max = 0;
2059 	err = 0;
2060 	ret = wait_event_interruptible(ci->i_cap_wq,
2061 				       try_get_cap_refs(ci, need, want,
2062 							got, endoff,
2063 							&check_max, &err));
2064 	if (err)
2065 		ret = err;
2066 	if (check_max)
2067 		goto retry;
2068 	return ret;
2069 }
2070 
2071 /*
2072  * Take cap refs.  Caller must already know we hold at least one ref
2073  * on the caps in question or we don't know this is safe.
2074  */
2075 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2076 {
2077 	spin_lock(&ci->vfs_inode.i_lock);
2078 	__take_cap_refs(ci, caps);
2079 	spin_unlock(&ci->vfs_inode.i_lock);
2080 }
2081 
2082 /*
2083  * Release cap refs.
2084  *
2085  * If we released the last ref on any given cap, call ceph_check_caps
2086  * to release (or schedule a release).
2087  *
2088  * If we are releasing a WR cap (from a sync write), finalize any affected
2089  * cap_snap, and wake up any waiters.
2090  */
2091 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2092 {
2093 	struct inode *inode = &ci->vfs_inode;
2094 	int last = 0, put = 0, flushsnaps = 0, wake = 0;
2095 	struct ceph_cap_snap *capsnap;
2096 
2097 	spin_lock(&inode->i_lock);
2098 	if (had & CEPH_CAP_PIN)
2099 		--ci->i_pin_ref;
2100 	if (had & CEPH_CAP_FILE_RD)
2101 		if (--ci->i_rd_ref == 0)
2102 			last++;
2103 	if (had & CEPH_CAP_FILE_CACHE)
2104 		if (--ci->i_rdcache_ref == 0)
2105 			last++;
2106 	if (had & CEPH_CAP_FILE_BUFFER) {
2107 		if (--ci->i_wrbuffer_ref == 0) {
2108 			last++;
2109 			put++;
2110 		}
2111 		dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2112 		     inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2113 	}
2114 	if (had & CEPH_CAP_FILE_WR)
2115 		if (--ci->i_wr_ref == 0) {
2116 			last++;
2117 			if (!list_empty(&ci->i_cap_snaps)) {
2118 				capsnap = list_first_entry(&ci->i_cap_snaps,
2119 						     struct ceph_cap_snap,
2120 						     ci_item);
2121 				if (capsnap->writing) {
2122 					capsnap->writing = 0;
2123 					flushsnaps =
2124 						__ceph_finish_cap_snap(ci,
2125 								       capsnap);
2126 					wake = 1;
2127 				}
2128 			}
2129 		}
2130 	spin_unlock(&inode->i_lock);
2131 
2132 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2133 	     last ? " last" : "", put ? " put" : "");
2134 
2135 	if (last && !flushsnaps)
2136 		ceph_check_caps(ci, 0, NULL);
2137 	else if (flushsnaps)
2138 		ceph_flush_snaps(ci);
2139 	if (wake)
2140 		wake_up(&ci->i_cap_wq);
2141 	if (put)
2142 		iput(inode);
2143 }
2144 
2145 /*
2146  * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2147  * context.  Adjust per-snap dirty page accounting as appropriate.
2148  * Once all dirty data for a cap_snap is flushed, flush snapped file
2149  * metadata back to the MDS.  If we dropped the last ref, call
2150  * ceph_check_caps.
2151  */
2152 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2153 				struct ceph_snap_context *snapc)
2154 {
2155 	struct inode *inode = &ci->vfs_inode;
2156 	int last = 0;
2157 	int complete_capsnap = 0;
2158 	int drop_capsnap = 0;
2159 	int found = 0;
2160 	struct ceph_cap_snap *capsnap = NULL;
2161 
2162 	spin_lock(&inode->i_lock);
2163 	ci->i_wrbuffer_ref -= nr;
2164 	last = !ci->i_wrbuffer_ref;
2165 
2166 	if (ci->i_head_snapc == snapc) {
2167 		ci->i_wrbuffer_ref_head -= nr;
2168 		if (!ci->i_wrbuffer_ref_head) {
2169 			ceph_put_snap_context(ci->i_head_snapc);
2170 			ci->i_head_snapc = NULL;
2171 		}
2172 		dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2173 		     inode,
2174 		     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2175 		     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2176 		     last ? " LAST" : "");
2177 	} else {
2178 		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2179 			if (capsnap->context == snapc) {
2180 				found = 1;
2181 				break;
2182 			}
2183 		}
2184 		BUG_ON(!found);
2185 		capsnap->dirty_pages -= nr;
2186 		if (capsnap->dirty_pages == 0) {
2187 			complete_capsnap = 1;
2188 			if (capsnap->dirty == 0)
2189 				/* cap writeback completed before we created
2190 				 * the cap_snap; no FLUSHSNAP is needed */
2191 				drop_capsnap = 1;
2192 		}
2193 		dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2194 		     " snap %lld %d/%d -> %d/%d %s%s%s\n",
2195 		     inode, capsnap, capsnap->context->seq,
2196 		     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2197 		     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2198 		     last ? " (wrbuffer last)" : "",
2199 		     complete_capsnap ? " (complete capsnap)" : "",
2200 		     drop_capsnap ? " (drop capsnap)" : "");
2201 		if (drop_capsnap) {
2202 			ceph_put_snap_context(capsnap->context);
2203 			list_del(&capsnap->ci_item);
2204 			list_del(&capsnap->flushing_item);
2205 			ceph_put_cap_snap(capsnap);
2206 		}
2207 	}
2208 
2209 	spin_unlock(&inode->i_lock);
2210 
2211 	if (last) {
2212 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2213 		iput(inode);
2214 	} else if (complete_capsnap) {
2215 		ceph_flush_snaps(ci);
2216 		wake_up(&ci->i_cap_wq);
2217 	}
2218 	if (drop_capsnap)
2219 		iput(inode);
2220 }
2221 
2222 /*
2223  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2224  * actually be a revocation if it specifies a smaller cap set.)
2225  *
2226  * caller holds s_mutex and i_lock, we drop both.
2227  *
2228  * return value:
2229  *  0 - ok
2230  *  1 - check_caps on auth cap only (writeback)
2231  *  2 - check_caps (ack revoke)
2232  */
2233 static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2234 			     struct ceph_mds_session *session,
2235 			     struct ceph_cap *cap,
2236 			     struct ceph_buffer *xattr_buf)
2237 	__releases(inode->i_lock)
2238 	__releases(session->s_mutex)
2239 {
2240 	struct ceph_inode_info *ci = ceph_inode(inode);
2241 	int mds = session->s_mds;
2242 	int seq = le32_to_cpu(grant->seq);
2243 	int newcaps = le32_to_cpu(grant->caps);
2244 	int issued, implemented, used, wanted, dirty;
2245 	u64 size = le64_to_cpu(grant->size);
2246 	u64 max_size = le64_to_cpu(grant->max_size);
2247 	struct timespec mtime, atime, ctime;
2248 	int check_caps = 0;
2249 	int wake = 0;
2250 	int writeback = 0;
2251 	int revoked_rdcache = 0;
2252 	int queue_invalidate = 0;
2253 
2254 	dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2255 	     inode, cap, mds, seq, ceph_cap_string(newcaps));
2256 	dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2257 		inode->i_size);
2258 
2259 	/*
2260 	 * If CACHE is being revoked, and we have no dirty buffers,
2261 	 * try to invalidate (once).  (If there are dirty buffers, we
2262 	 * will invalidate _after_ writeback.)
2263 	 */
2264 	if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2265 	    !ci->i_wrbuffer_ref) {
2266 		if (try_nonblocking_invalidate(inode) == 0) {
2267 			revoked_rdcache = 1;
2268 		} else {
2269 			/* there were locked pages.. invalidate later
2270 			   in a separate thread. */
2271 			if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2272 				queue_invalidate = 1;
2273 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
2274 			}
2275 		}
2276 	}
2277 
2278 	/* side effects now are allowed */
2279 
2280 	issued = __ceph_caps_issued(ci, &implemented);
2281 	issued |= implemented | __ceph_caps_dirty(ci);
2282 
2283 	cap->cap_gen = session->s_cap_gen;
2284 
2285 	__check_cap_issue(ci, cap, newcaps);
2286 
2287 	if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2288 		inode->i_mode = le32_to_cpu(grant->mode);
2289 		inode->i_uid = le32_to_cpu(grant->uid);
2290 		inode->i_gid = le32_to_cpu(grant->gid);
2291 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2292 		     inode->i_uid, inode->i_gid);
2293 	}
2294 
2295 	if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2296 		inode->i_nlink = le32_to_cpu(grant->nlink);
2297 
2298 	if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2299 		int len = le32_to_cpu(grant->xattr_len);
2300 		u64 version = le64_to_cpu(grant->xattr_version);
2301 
2302 		if (version > ci->i_xattrs.version) {
2303 			dout(" got new xattrs v%llu on %p len %d\n",
2304 			     version, inode, len);
2305 			if (ci->i_xattrs.blob)
2306 				ceph_buffer_put(ci->i_xattrs.blob);
2307 			ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2308 			ci->i_xattrs.version = version;
2309 		}
2310 	}
2311 
2312 	/* size/ctime/mtime/atime? */
2313 	ceph_fill_file_size(inode, issued,
2314 			    le32_to_cpu(grant->truncate_seq),
2315 			    le64_to_cpu(grant->truncate_size), size);
2316 	ceph_decode_timespec(&mtime, &grant->mtime);
2317 	ceph_decode_timespec(&atime, &grant->atime);
2318 	ceph_decode_timespec(&ctime, &grant->ctime);
2319 	ceph_fill_file_time(inode, issued,
2320 			    le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2321 			    &atime);
2322 
2323 	/* max size increase? */
2324 	if (max_size != ci->i_max_size) {
2325 		dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2326 		ci->i_max_size = max_size;
2327 		if (max_size >= ci->i_wanted_max_size) {
2328 			ci->i_wanted_max_size = 0;  /* reset */
2329 			ci->i_requested_max_size = 0;
2330 		}
2331 		wake = 1;
2332 	}
2333 
2334 	/* check cap bits */
2335 	wanted = __ceph_caps_wanted(ci);
2336 	used = __ceph_caps_used(ci);
2337 	dirty = __ceph_caps_dirty(ci);
2338 	dout(" my wanted = %s, used = %s, dirty %s\n",
2339 	     ceph_cap_string(wanted),
2340 	     ceph_cap_string(used),
2341 	     ceph_cap_string(dirty));
2342 	if (wanted != le32_to_cpu(grant->wanted)) {
2343 		dout("mds wanted %s -> %s\n",
2344 		     ceph_cap_string(le32_to_cpu(grant->wanted)),
2345 		     ceph_cap_string(wanted));
2346 		grant->wanted = cpu_to_le32(wanted);
2347 	}
2348 
2349 	cap->seq = seq;
2350 
2351 	/* file layout may have changed */
2352 	ci->i_layout = grant->layout;
2353 
2354 	/* revocation, grant, or no-op? */
2355 	if (cap->issued & ~newcaps) {
2356 		dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
2357 		     ceph_cap_string(newcaps));
2358 		if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
2359 			writeback = 1; /* will delay ack */
2360 		else if (dirty & ~newcaps)
2361 			check_caps = 1;  /* initiate writeback in check_caps */
2362 		else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
2363 			   revoked_rdcache)
2364 			check_caps = 2;     /* send revoke ack in check_caps */
2365 		cap->issued = newcaps;
2366 		cap->implemented |= newcaps;
2367 	} else if (cap->issued == newcaps) {
2368 		dout("caps unchanged: %s -> %s\n",
2369 		     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2370 	} else {
2371 		dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2372 		     ceph_cap_string(newcaps));
2373 		cap->issued = newcaps;
2374 		cap->implemented |= newcaps; /* add bits only, to
2375 					      * avoid stepping on a
2376 					      * pending revocation */
2377 		wake = 1;
2378 	}
2379 	BUG_ON(cap->issued & ~cap->implemented);
2380 
2381 	spin_unlock(&inode->i_lock);
2382 	if (writeback)
2383 		/*
2384 		 * queue inode for writeback: we can't actually call
2385 		 * filemap_write_and_wait, etc. from message handler
2386 		 * context.
2387 		 */
2388 		ceph_queue_writeback(inode);
2389 	if (queue_invalidate)
2390 		ceph_queue_invalidate(inode);
2391 	if (wake)
2392 		wake_up(&ci->i_cap_wq);
2393 
2394 	if (check_caps == 1)
2395 		ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2396 				session);
2397 	else if (check_caps == 2)
2398 		ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2399 	else
2400 		mutex_unlock(&session->s_mutex);
2401 }
2402 
2403 /*
2404  * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2405  * MDS has been safely committed.
2406  */
2407 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2408 				 struct ceph_mds_caps *m,
2409 				 struct ceph_mds_session *session,
2410 				 struct ceph_cap *cap)
2411 	__releases(inode->i_lock)
2412 {
2413 	struct ceph_inode_info *ci = ceph_inode(inode);
2414 	struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
2415 	unsigned seq = le32_to_cpu(m->seq);
2416 	int dirty = le32_to_cpu(m->dirty);
2417 	int cleaned = 0;
2418 	int drop = 0;
2419 	int i;
2420 
2421 	for (i = 0; i < CEPH_CAP_BITS; i++)
2422 		if ((dirty & (1 << i)) &&
2423 		    flush_tid == ci->i_cap_flush_tid[i])
2424 			cleaned |= 1 << i;
2425 
2426 	dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2427 	     " flushing %s -> %s\n",
2428 	     inode, session->s_mds, seq, ceph_cap_string(dirty),
2429 	     ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2430 	     ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2431 
2432 	if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2433 		goto out;
2434 
2435 	ci->i_flushing_caps &= ~cleaned;
2436 
2437 	spin_lock(&mdsc->cap_dirty_lock);
2438 	if (ci->i_flushing_caps == 0) {
2439 		list_del_init(&ci->i_flushing_item);
2440 		if (!list_empty(&session->s_cap_flushing))
2441 			dout(" mds%d still flushing cap on %p\n",
2442 			     session->s_mds,
2443 			     &list_entry(session->s_cap_flushing.next,
2444 					 struct ceph_inode_info,
2445 					 i_flushing_item)->vfs_inode);
2446 		mdsc->num_cap_flushing--;
2447 		wake_up(&mdsc->cap_flushing_wq);
2448 		dout(" inode %p now !flushing\n", inode);
2449 
2450 		if (ci->i_dirty_caps == 0) {
2451 			dout(" inode %p now clean\n", inode);
2452 			BUG_ON(!list_empty(&ci->i_dirty_item));
2453 			drop = 1;
2454 		} else {
2455 			BUG_ON(list_empty(&ci->i_dirty_item));
2456 		}
2457 	}
2458 	spin_unlock(&mdsc->cap_dirty_lock);
2459 	wake_up(&ci->i_cap_wq);
2460 
2461 out:
2462 	spin_unlock(&inode->i_lock);
2463 	if (drop)
2464 		iput(inode);
2465 }
2466 
2467 /*
2468  * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
2469  * throw away our cap_snap.
2470  *
2471  * Caller hold s_mutex.
2472  */
2473 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2474 				     struct ceph_mds_caps *m,
2475 				     struct ceph_mds_session *session)
2476 {
2477 	struct ceph_inode_info *ci = ceph_inode(inode);
2478 	u64 follows = le64_to_cpu(m->snap_follows);
2479 	struct ceph_cap_snap *capsnap;
2480 	int drop = 0;
2481 
2482 	dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2483 	     inode, ci, session->s_mds, follows);
2484 
2485 	spin_lock(&inode->i_lock);
2486 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2487 		if (capsnap->follows == follows) {
2488 			if (capsnap->flush_tid != flush_tid) {
2489 				dout(" cap_snap %p follows %lld tid %lld !="
2490 				     " %lld\n", capsnap, follows,
2491 				     flush_tid, capsnap->flush_tid);
2492 				break;
2493 			}
2494 			WARN_ON(capsnap->dirty_pages || capsnap->writing);
2495 			dout(" removing %p cap_snap %p follows %lld\n",
2496 			     inode, capsnap, follows);
2497 			ceph_put_snap_context(capsnap->context);
2498 			list_del(&capsnap->ci_item);
2499 			list_del(&capsnap->flushing_item);
2500 			ceph_put_cap_snap(capsnap);
2501 			drop = 1;
2502 			break;
2503 		} else {
2504 			dout(" skipping cap_snap %p follows %lld\n",
2505 			     capsnap, capsnap->follows);
2506 		}
2507 	}
2508 	spin_unlock(&inode->i_lock);
2509 	if (drop)
2510 		iput(inode);
2511 }
2512 
2513 /*
2514  * Handle TRUNC from MDS, indicating file truncation.
2515  *
2516  * caller hold s_mutex.
2517  */
2518 static void handle_cap_trunc(struct inode *inode,
2519 			     struct ceph_mds_caps *trunc,
2520 			     struct ceph_mds_session *session)
2521 	__releases(inode->i_lock)
2522 {
2523 	struct ceph_inode_info *ci = ceph_inode(inode);
2524 	int mds = session->s_mds;
2525 	int seq = le32_to_cpu(trunc->seq);
2526 	u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2527 	u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2528 	u64 size = le64_to_cpu(trunc->size);
2529 	int implemented = 0;
2530 	int dirty = __ceph_caps_dirty(ci);
2531 	int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2532 	int queue_trunc = 0;
2533 
2534 	issued |= implemented | dirty;
2535 
2536 	dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2537 	     inode, mds, seq, truncate_size, truncate_seq);
2538 	queue_trunc = ceph_fill_file_size(inode, issued,
2539 					  truncate_seq, truncate_size, size);
2540 	spin_unlock(&inode->i_lock);
2541 
2542 	if (queue_trunc)
2543 		ceph_queue_vmtruncate(inode);
2544 }
2545 
2546 /*
2547  * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
2548  * different one.  If we are the most recent migration we've seen (as
2549  * indicated by mseq), make note of the migrating cap bits for the
2550  * duration (until we see the corresponding IMPORT).
2551  *
2552  * caller holds s_mutex
2553  */
2554 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2555 			      struct ceph_mds_session *session)
2556 {
2557 	struct ceph_inode_info *ci = ceph_inode(inode);
2558 	int mds = session->s_mds;
2559 	unsigned mseq = le32_to_cpu(ex->migrate_seq);
2560 	struct ceph_cap *cap = NULL, *t;
2561 	struct rb_node *p;
2562 	int remember = 1;
2563 
2564 	dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2565 	     inode, ci, mds, mseq);
2566 
2567 	spin_lock(&inode->i_lock);
2568 
2569 	/* make sure we haven't seen a higher mseq */
2570 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2571 		t = rb_entry(p, struct ceph_cap, ci_node);
2572 		if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2573 			dout(" higher mseq on cap from mds%d\n",
2574 			     t->session->s_mds);
2575 			remember = 0;
2576 		}
2577 		if (t->session->s_mds == mds)
2578 			cap = t;
2579 	}
2580 
2581 	if (cap) {
2582 		if (remember) {
2583 			/* make note */
2584 			ci->i_cap_exporting_mds = mds;
2585 			ci->i_cap_exporting_mseq = mseq;
2586 			ci->i_cap_exporting_issued = cap->issued;
2587 		}
2588 		__ceph_remove_cap(cap);
2589 	}
2590 	/* else, we already released it */
2591 
2592 	spin_unlock(&inode->i_lock);
2593 }
2594 
2595 /*
2596  * Handle cap IMPORT.  If there are temp bits from an older EXPORT,
2597  * clean them up.
2598  *
2599  * caller holds s_mutex.
2600  */
2601 static void handle_cap_import(struct ceph_mds_client *mdsc,
2602 			      struct inode *inode, struct ceph_mds_caps *im,
2603 			      struct ceph_mds_session *session,
2604 			      void *snaptrace, int snaptrace_len)
2605 {
2606 	struct ceph_inode_info *ci = ceph_inode(inode);
2607 	int mds = session->s_mds;
2608 	unsigned issued = le32_to_cpu(im->caps);
2609 	unsigned wanted = le32_to_cpu(im->wanted);
2610 	unsigned seq = le32_to_cpu(im->seq);
2611 	unsigned mseq = le32_to_cpu(im->migrate_seq);
2612 	u64 realmino = le64_to_cpu(im->realm);
2613 	u64 cap_id = le64_to_cpu(im->cap_id);
2614 
2615 	if (ci->i_cap_exporting_mds >= 0 &&
2616 	    ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2617 		dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2618 		     " - cleared exporting from mds%d\n",
2619 		     inode, ci, mds, mseq,
2620 		     ci->i_cap_exporting_mds);
2621 		ci->i_cap_exporting_issued = 0;
2622 		ci->i_cap_exporting_mseq = 0;
2623 		ci->i_cap_exporting_mds = -1;
2624 	} else {
2625 		dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2626 		     inode, ci, mds, mseq);
2627 	}
2628 
2629 	down_write(&mdsc->snap_rwsem);
2630 	ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2631 			       false);
2632 	downgrade_write(&mdsc->snap_rwsem);
2633 	ceph_add_cap(inode, session, cap_id, -1,
2634 		     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2635 		     NULL /* no caps context */);
2636 	try_flush_caps(inode, session, NULL);
2637 	up_read(&mdsc->snap_rwsem);
2638 }
2639 
2640 /*
2641  * Handle a caps message from the MDS.
2642  *
2643  * Identify the appropriate session, inode, and call the right handler
2644  * based on the cap op.
2645  */
2646 void ceph_handle_caps(struct ceph_mds_session *session,
2647 		      struct ceph_msg *msg)
2648 {
2649 	struct ceph_mds_client *mdsc = session->s_mdsc;
2650 	struct super_block *sb = mdsc->client->sb;
2651 	struct inode *inode;
2652 	struct ceph_cap *cap;
2653 	struct ceph_mds_caps *h;
2654 	int mds = session->s_mds;
2655 	int op;
2656 	u32 seq;
2657 	struct ceph_vino vino;
2658 	u64 cap_id;
2659 	u64 size, max_size;
2660 	u64 tid;
2661 	void *snaptrace;
2662 
2663 	dout("handle_caps from mds%d\n", mds);
2664 
2665 	/* decode */
2666 	tid = le64_to_cpu(msg->hdr.tid);
2667 	if (msg->front.iov_len < sizeof(*h))
2668 		goto bad;
2669 	h = msg->front.iov_base;
2670 	snaptrace = h + 1;
2671 	op = le32_to_cpu(h->op);
2672 	vino.ino = le64_to_cpu(h->ino);
2673 	vino.snap = CEPH_NOSNAP;
2674 	cap_id = le64_to_cpu(h->cap_id);
2675 	seq = le32_to_cpu(h->seq);
2676 	size = le64_to_cpu(h->size);
2677 	max_size = le64_to_cpu(h->max_size);
2678 
2679 	mutex_lock(&session->s_mutex);
2680 	session->s_seq++;
2681 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2682 	     (unsigned)seq);
2683 
2684 	/* lookup ino */
2685 	inode = ceph_find_inode(sb, vino);
2686 	dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2687 	     vino.snap, inode);
2688 	if (!inode) {
2689 		dout(" i don't have ino %llx\n", vino.ino);
2690 		goto done;
2691 	}
2692 
2693 	/* these will work even if we don't have a cap yet */
2694 	switch (op) {
2695 	case CEPH_CAP_OP_FLUSHSNAP_ACK:
2696 		handle_cap_flushsnap_ack(inode, tid, h, session);
2697 		goto done;
2698 
2699 	case CEPH_CAP_OP_EXPORT:
2700 		handle_cap_export(inode, h, session);
2701 		goto done;
2702 
2703 	case CEPH_CAP_OP_IMPORT:
2704 		handle_cap_import(mdsc, inode, h, session,
2705 				  snaptrace, le32_to_cpu(h->snap_trace_len));
2706 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
2707 				session);
2708 		goto done_unlocked;
2709 	}
2710 
2711 	/* the rest require a cap */
2712 	spin_lock(&inode->i_lock);
2713 	cap = __get_cap_for_mds(ceph_inode(inode), mds);
2714 	if (!cap) {
2715 		dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
2716 		     inode, ceph_ino(inode), ceph_snap(inode), mds);
2717 		spin_unlock(&inode->i_lock);
2718 		goto done;
2719 	}
2720 
2721 	/* note that each of these drops i_lock for us */
2722 	switch (op) {
2723 	case CEPH_CAP_OP_REVOKE:
2724 	case CEPH_CAP_OP_GRANT:
2725 		handle_cap_grant(inode, h, session, cap, msg->middle);
2726 		goto done_unlocked;
2727 
2728 	case CEPH_CAP_OP_FLUSH_ACK:
2729 		handle_cap_flush_ack(inode, tid, h, session, cap);
2730 		break;
2731 
2732 	case CEPH_CAP_OP_TRUNC:
2733 		handle_cap_trunc(inode, h, session);
2734 		break;
2735 
2736 	default:
2737 		spin_unlock(&inode->i_lock);
2738 		pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2739 		       ceph_cap_op_name(op));
2740 	}
2741 
2742 done:
2743 	mutex_unlock(&session->s_mutex);
2744 done_unlocked:
2745 	if (inode)
2746 		iput(inode);
2747 	return;
2748 
2749 bad:
2750 	pr_err("ceph_handle_caps: corrupt message\n");
2751 	ceph_msg_dump(msg);
2752 	return;
2753 }
2754 
2755 /*
2756  * Delayed work handler to process end of delayed cap release LRU list.
2757  */
2758 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2759 {
2760 	struct ceph_inode_info *ci;
2761 	int flags = CHECK_CAPS_NODELAY;
2762 
2763 	dout("check_delayed_caps\n");
2764 	while (1) {
2765 		spin_lock(&mdsc->cap_delay_lock);
2766 		if (list_empty(&mdsc->cap_delay_list))
2767 			break;
2768 		ci = list_first_entry(&mdsc->cap_delay_list,
2769 				      struct ceph_inode_info,
2770 				      i_cap_delay_list);
2771 		if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2772 		    time_before(jiffies, ci->i_hold_caps_max))
2773 			break;
2774 		list_del_init(&ci->i_cap_delay_list);
2775 		spin_unlock(&mdsc->cap_delay_lock);
2776 		dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2777 		ceph_check_caps(ci, flags, NULL);
2778 	}
2779 	spin_unlock(&mdsc->cap_delay_lock);
2780 }
2781 
2782 /*
2783  * Flush all dirty caps to the mds
2784  */
2785 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2786 {
2787 	struct ceph_inode_info *ci, *nci = NULL;
2788 	struct inode *inode, *ninode = NULL;
2789 	struct list_head *p, *n;
2790 
2791 	dout("flush_dirty_caps\n");
2792 	spin_lock(&mdsc->cap_dirty_lock);
2793 	list_for_each_safe(p, n, &mdsc->cap_dirty) {
2794 		if (nci) {
2795 			ci = nci;
2796 			inode = ninode;
2797 			ci->i_ceph_flags &= ~CEPH_I_NOFLUSH;
2798 			dout("flush_dirty_caps inode %p (was next inode)\n",
2799 			     inode);
2800 		} else {
2801 			ci = list_entry(p, struct ceph_inode_info,
2802 					i_dirty_item);
2803 			inode = igrab(&ci->vfs_inode);
2804 			BUG_ON(!inode);
2805 			dout("flush_dirty_caps inode %p\n", inode);
2806 		}
2807 		if (n != &mdsc->cap_dirty) {
2808 			nci = list_entry(n, struct ceph_inode_info,
2809 					 i_dirty_item);
2810 			ninode = igrab(&nci->vfs_inode);
2811 			BUG_ON(!ninode);
2812 			nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2813 			dout("flush_dirty_caps next inode %p, noflush\n",
2814 			     ninode);
2815 		} else {
2816 			nci = NULL;
2817 			ninode = NULL;
2818 		}
2819 		spin_unlock(&mdsc->cap_dirty_lock);
2820 		if (inode) {
2821 			ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2822 					NULL);
2823 			iput(inode);
2824 		}
2825 		spin_lock(&mdsc->cap_dirty_lock);
2826 	}
2827 	spin_unlock(&mdsc->cap_dirty_lock);
2828 }
2829 
2830 /*
2831  * Drop open file reference.  If we were the last open file,
2832  * we may need to release capabilities to the MDS (or schedule
2833  * their delayed release).
2834  */
2835 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2836 {
2837 	struct inode *inode = &ci->vfs_inode;
2838 	int last = 0;
2839 
2840 	spin_lock(&inode->i_lock);
2841 	dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2842 	     ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2843 	BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2844 	if (--ci->i_nr_by_mode[fmode] == 0)
2845 		last++;
2846 	spin_unlock(&inode->i_lock);
2847 
2848 	if (last && ci->i_vino.snap == CEPH_NOSNAP)
2849 		ceph_check_caps(ci, 0, NULL);
2850 }
2851 
2852 /*
2853  * Helpers for embedding cap and dentry lease releases into mds
2854  * requests.
2855  *
2856  * @force is used by dentry_release (below) to force inclusion of a
2857  * record for the directory inode, even when there aren't any caps to
2858  * drop.
2859  */
2860 int ceph_encode_inode_release(void **p, struct inode *inode,
2861 			      int mds, int drop, int unless, int force)
2862 {
2863 	struct ceph_inode_info *ci = ceph_inode(inode);
2864 	struct ceph_cap *cap;
2865 	struct ceph_mds_request_release *rel = *p;
2866 	int ret = 0;
2867 	int used = 0;
2868 
2869 	spin_lock(&inode->i_lock);
2870 	used = __ceph_caps_used(ci);
2871 
2872 	dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
2873 	     mds, ceph_cap_string(used), ceph_cap_string(drop),
2874 	     ceph_cap_string(unless));
2875 
2876 	/* only drop unused caps */
2877 	drop &= ~used;
2878 
2879 	cap = __get_cap_for_mds(ci, mds);
2880 	if (cap && __cap_is_valid(cap)) {
2881 		if (force ||
2882 		    ((cap->issued & drop) &&
2883 		     (cap->issued & unless) == 0)) {
2884 			if ((cap->issued & drop) &&
2885 			    (cap->issued & unless) == 0) {
2886 				dout("encode_inode_release %p cap %p %s -> "
2887 				     "%s\n", inode, cap,
2888 				     ceph_cap_string(cap->issued),
2889 				     ceph_cap_string(cap->issued & ~drop));
2890 				cap->issued &= ~drop;
2891 				cap->implemented &= ~drop;
2892 				if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2893 					int wanted = __ceph_caps_wanted(ci);
2894 					dout("  wanted %s -> %s (act %s)\n",
2895 					     ceph_cap_string(cap->mds_wanted),
2896 					     ceph_cap_string(cap->mds_wanted &
2897 							     ~wanted),
2898 					     ceph_cap_string(wanted));
2899 					cap->mds_wanted &= wanted;
2900 				}
2901 			} else {
2902 				dout("encode_inode_release %p cap %p %s"
2903 				     " (force)\n", inode, cap,
2904 				     ceph_cap_string(cap->issued));
2905 			}
2906 
2907 			rel->ino = cpu_to_le64(ceph_ino(inode));
2908 			rel->cap_id = cpu_to_le64(cap->cap_id);
2909 			rel->seq = cpu_to_le32(cap->seq);
2910 			rel->issue_seq = cpu_to_le32(cap->issue_seq),
2911 			rel->mseq = cpu_to_le32(cap->mseq);
2912 			rel->caps = cpu_to_le32(cap->issued);
2913 			rel->wanted = cpu_to_le32(cap->mds_wanted);
2914 			rel->dname_len = 0;
2915 			rel->dname_seq = 0;
2916 			*p += sizeof(*rel);
2917 			ret = 1;
2918 		} else {
2919 			dout("encode_inode_release %p cap %p %s\n",
2920 			     inode, cap, ceph_cap_string(cap->issued));
2921 		}
2922 	}
2923 	spin_unlock(&inode->i_lock);
2924 	return ret;
2925 }
2926 
2927 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2928 			       int mds, int drop, int unless)
2929 {
2930 	struct inode *dir = dentry->d_parent->d_inode;
2931 	struct ceph_mds_request_release *rel = *p;
2932 	struct ceph_dentry_info *di = ceph_dentry(dentry);
2933 	int force = 0;
2934 	int ret;
2935 
2936 	/*
2937 	 * force an record for the directory caps if we have a dentry lease.
2938 	 * this is racy (can't take i_lock and d_lock together), but it
2939 	 * doesn't have to be perfect; the mds will revoke anything we don't
2940 	 * release.
2941 	 */
2942 	spin_lock(&dentry->d_lock);
2943 	if (di->lease_session && di->lease_session->s_mds == mds)
2944 		force = 1;
2945 	spin_unlock(&dentry->d_lock);
2946 
2947 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
2948 
2949 	spin_lock(&dentry->d_lock);
2950 	if (ret && di->lease_session && di->lease_session->s_mds == mds) {
2951 		dout("encode_dentry_release %p mds%d seq %d\n",
2952 		     dentry, mds, (int)di->lease_seq);
2953 		rel->dname_len = cpu_to_le32(dentry->d_name.len);
2954 		memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2955 		*p += dentry->d_name.len;
2956 		rel->dname_seq = cpu_to_le32(di->lease_seq);
2957 	}
2958 	spin_unlock(&dentry->d_lock);
2959 	return ret;
2960 }
2961