xref: /linux/fs/ceph/caps.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 #include "ceph_debug.h"
2 
3 #include <linux/fs.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
10 
11 #include "super.h"
12 #include "decode.h"
13 #include "messenger.h"
14 
15 /*
16  * Capability management
17  *
18  * The Ceph metadata servers control client access to inode metadata
19  * and file data by issuing capabilities, granting clients permission
20  * to read and/or write both inode field and file data to OSDs
21  * (storage nodes).  Each capability consists of a set of bits
22  * indicating which operations are allowed.
23  *
24  * If the client holds a *_SHARED cap, the client has a coherent value
25  * that can be safely read from the cached inode.
26  *
27  * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
28  * client is allowed to change inode attributes (e.g., file size,
29  * mtime), note its dirty state in the ceph_cap, and asynchronously
30  * flush that metadata change to the MDS.
31  *
32  * In the event of a conflicting operation (perhaps by another
33  * client), the MDS will revoke the conflicting client capabilities.
34  *
35  * In order for a client to cache an inode, it must hold a capability
36  * with at least one MDS server.  When inodes are released, release
37  * notifications are batched and periodically sent en masse to the MDS
38  * cluster to release server state.
39  */
40 
41 
42 /*
43  * Generate readable cap strings for debugging output.
44  */
45 #define MAX_CAP_STR 20
46 static char cap_str[MAX_CAP_STR][40];
47 static DEFINE_SPINLOCK(cap_str_lock);
48 static int last_cap_str;
49 
50 static char *gcap_string(char *s, int c)
51 {
52 	if (c & CEPH_CAP_GSHARED)
53 		*s++ = 's';
54 	if (c & CEPH_CAP_GEXCL)
55 		*s++ = 'x';
56 	if (c & CEPH_CAP_GCACHE)
57 		*s++ = 'c';
58 	if (c & CEPH_CAP_GRD)
59 		*s++ = 'r';
60 	if (c & CEPH_CAP_GWR)
61 		*s++ = 'w';
62 	if (c & CEPH_CAP_GBUFFER)
63 		*s++ = 'b';
64 	if (c & CEPH_CAP_GLAZYIO)
65 		*s++ = 'l';
66 	return s;
67 }
68 
69 const char *ceph_cap_string(int caps)
70 {
71 	int i;
72 	char *s;
73 	int c;
74 
75 	spin_lock(&cap_str_lock);
76 	i = last_cap_str++;
77 	if (last_cap_str == MAX_CAP_STR)
78 		last_cap_str = 0;
79 	spin_unlock(&cap_str_lock);
80 
81 	s = cap_str[i];
82 
83 	if (caps & CEPH_CAP_PIN)
84 		*s++ = 'p';
85 
86 	c = (caps >> CEPH_CAP_SAUTH) & 3;
87 	if (c) {
88 		*s++ = 'A';
89 		s = gcap_string(s, c);
90 	}
91 
92 	c = (caps >> CEPH_CAP_SLINK) & 3;
93 	if (c) {
94 		*s++ = 'L';
95 		s = gcap_string(s, c);
96 	}
97 
98 	c = (caps >> CEPH_CAP_SXATTR) & 3;
99 	if (c) {
100 		*s++ = 'X';
101 		s = gcap_string(s, c);
102 	}
103 
104 	c = caps >> CEPH_CAP_SFILE;
105 	if (c) {
106 		*s++ = 'F';
107 		s = gcap_string(s, c);
108 	}
109 
110 	if (s == cap_str[i])
111 		*s++ = '-';
112 	*s = 0;
113 	return cap_str[i];
114 }
115 
116 /*
117  * Cap reservations
118  *
119  * Maintain a global pool of preallocated struct ceph_caps, referenced
120  * by struct ceph_caps_reservations.  This ensures that we preallocate
121  * memory needed to successfully process an MDS response.  (If an MDS
122  * sends us cap information and we fail to process it, we will have
123  * problems due to the client and MDS being out of sync.)
124  *
125  * Reservations are 'owned' by a ceph_cap_reservation context.
126  */
127 static spinlock_t caps_list_lock;
128 static struct list_head caps_list;  /* unused (reserved or unreserved) */
129 static int caps_total_count;        /* total caps allocated */
130 static int caps_use_count;          /* in use */
131 static int caps_reserve_count;      /* unused, reserved */
132 static int caps_avail_count;        /* unused, unreserved */
133 static int caps_min_count;          /* keep at least this many (unreserved) */
134 
135 void __init ceph_caps_init(void)
136 {
137 	INIT_LIST_HEAD(&caps_list);
138 	spin_lock_init(&caps_list_lock);
139 }
140 
141 void ceph_caps_finalize(void)
142 {
143 	struct ceph_cap *cap;
144 
145 	spin_lock(&caps_list_lock);
146 	while (!list_empty(&caps_list)) {
147 		cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
148 		list_del(&cap->caps_item);
149 		kmem_cache_free(ceph_cap_cachep, cap);
150 	}
151 	caps_total_count = 0;
152 	caps_avail_count = 0;
153 	caps_use_count = 0;
154 	caps_reserve_count = 0;
155 	caps_min_count = 0;
156 	spin_unlock(&caps_list_lock);
157 }
158 
159 void ceph_adjust_min_caps(int delta)
160 {
161 	spin_lock(&caps_list_lock);
162 	caps_min_count += delta;
163 	BUG_ON(caps_min_count < 0);
164 	spin_unlock(&caps_list_lock);
165 }
166 
167 int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
168 {
169 	int i;
170 	struct ceph_cap *cap;
171 	int have;
172 	int alloc = 0;
173 	LIST_HEAD(newcaps);
174 	int ret = 0;
175 
176 	dout("reserve caps ctx=%p need=%d\n", ctx, need);
177 
178 	/* first reserve any caps that are already allocated */
179 	spin_lock(&caps_list_lock);
180 	if (caps_avail_count >= need)
181 		have = need;
182 	else
183 		have = caps_avail_count;
184 	caps_avail_count -= have;
185 	caps_reserve_count += have;
186 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
187 	       caps_avail_count);
188 	spin_unlock(&caps_list_lock);
189 
190 	for (i = have; i < need; i++) {
191 		cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
192 		if (!cap) {
193 			ret = -ENOMEM;
194 			goto out_alloc_count;
195 		}
196 		list_add(&cap->caps_item, &newcaps);
197 		alloc++;
198 	}
199 	BUG_ON(have + alloc != need);
200 
201 	spin_lock(&caps_list_lock);
202 	caps_total_count += alloc;
203 	caps_reserve_count += alloc;
204 	list_splice(&newcaps, &caps_list);
205 
206 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
207 	       caps_avail_count);
208 	spin_unlock(&caps_list_lock);
209 
210 	ctx->count = need;
211 	dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
212 	     ctx, caps_total_count, caps_use_count, caps_reserve_count,
213 	     caps_avail_count);
214 	return 0;
215 
216 out_alloc_count:
217 	/* we didn't manage to reserve as much as we needed */
218 	pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
219 		   ctx, need, have);
220 	return ret;
221 }
222 
223 int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
224 {
225 	dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
226 	if (ctx->count) {
227 		spin_lock(&caps_list_lock);
228 		BUG_ON(caps_reserve_count < ctx->count);
229 		caps_reserve_count -= ctx->count;
230 		caps_avail_count += ctx->count;
231 		ctx->count = 0;
232 		dout("unreserve caps %d = %d used + %d resv + %d avail\n",
233 		     caps_total_count, caps_use_count, caps_reserve_count,
234 		     caps_avail_count);
235 		BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
236 		       caps_avail_count);
237 		spin_unlock(&caps_list_lock);
238 	}
239 	return 0;
240 }
241 
242 static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
243 {
244 	struct ceph_cap *cap = NULL;
245 
246 	/* temporary, until we do something about cap import/export */
247 	if (!ctx)
248 		return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
249 
250 	spin_lock(&caps_list_lock);
251 	dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
252 	     ctx, ctx->count, caps_total_count, caps_use_count,
253 	     caps_reserve_count, caps_avail_count);
254 	BUG_ON(!ctx->count);
255 	BUG_ON(ctx->count > caps_reserve_count);
256 	BUG_ON(list_empty(&caps_list));
257 
258 	ctx->count--;
259 	caps_reserve_count--;
260 	caps_use_count++;
261 
262 	cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
263 	list_del(&cap->caps_item);
264 
265 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
266 	       caps_avail_count);
267 	spin_unlock(&caps_list_lock);
268 	return cap;
269 }
270 
271 void ceph_put_cap(struct ceph_cap *cap)
272 {
273 	spin_lock(&caps_list_lock);
274 	dout("put_cap %p %d = %d used + %d resv + %d avail\n",
275 	     cap, caps_total_count, caps_use_count,
276 	     caps_reserve_count, caps_avail_count);
277 	caps_use_count--;
278 	/*
279 	 * Keep some preallocated caps around (ceph_min_count), to
280 	 * avoid lots of free/alloc churn.
281 	 */
282 	if (caps_avail_count >= caps_reserve_count + caps_min_count) {
283 		caps_total_count--;
284 		kmem_cache_free(ceph_cap_cachep, cap);
285 	} else {
286 		caps_avail_count++;
287 		list_add(&cap->caps_item, &caps_list);
288 	}
289 
290 	BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
291 	       caps_avail_count);
292 	spin_unlock(&caps_list_lock);
293 }
294 
295 void ceph_reservation_status(struct ceph_client *client,
296 			     int *total, int *avail, int *used, int *reserved,
297 			     int *min)
298 {
299 	if (total)
300 		*total = caps_total_count;
301 	if (avail)
302 		*avail = caps_avail_count;
303 	if (used)
304 		*used = caps_use_count;
305 	if (reserved)
306 		*reserved = caps_reserve_count;
307 	if (min)
308 		*min = caps_min_count;
309 }
310 
311 /*
312  * Find ceph_cap for given mds, if any.
313  *
314  * Called with i_lock held.
315  */
316 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317 {
318 	struct ceph_cap *cap;
319 	struct rb_node *n = ci->i_caps.rb_node;
320 
321 	while (n) {
322 		cap = rb_entry(n, struct ceph_cap, ci_node);
323 		if (mds < cap->mds)
324 			n = n->rb_left;
325 		else if (mds > cap->mds)
326 			n = n->rb_right;
327 		else
328 			return cap;
329 	}
330 	return NULL;
331 }
332 
333 /*
334  * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
335  * -1.
336  */
337 static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
338 {
339 	struct ceph_cap *cap;
340 	int mds = -1;
341 	struct rb_node *p;
342 
343 	/* prefer mds with WR|WRBUFFER|EXCL caps */
344 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
345 		cap = rb_entry(p, struct ceph_cap, ci_node);
346 		mds = cap->mds;
347 		if (mseq)
348 			*mseq = cap->mseq;
349 		if (cap->issued & (CEPH_CAP_FILE_WR |
350 				   CEPH_CAP_FILE_BUFFER |
351 				   CEPH_CAP_FILE_EXCL))
352 			break;
353 	}
354 	return mds;
355 }
356 
357 int ceph_get_cap_mds(struct inode *inode)
358 {
359 	int mds;
360 	spin_lock(&inode->i_lock);
361 	mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
362 	spin_unlock(&inode->i_lock);
363 	return mds;
364 }
365 
366 /*
367  * Called under i_lock.
368  */
369 static void __insert_cap_node(struct ceph_inode_info *ci,
370 			      struct ceph_cap *new)
371 {
372 	struct rb_node **p = &ci->i_caps.rb_node;
373 	struct rb_node *parent = NULL;
374 	struct ceph_cap *cap = NULL;
375 
376 	while (*p) {
377 		parent = *p;
378 		cap = rb_entry(parent, struct ceph_cap, ci_node);
379 		if (new->mds < cap->mds)
380 			p = &(*p)->rb_left;
381 		else if (new->mds > cap->mds)
382 			p = &(*p)->rb_right;
383 		else
384 			BUG();
385 	}
386 
387 	rb_link_node(&new->ci_node, parent, p);
388 	rb_insert_color(&new->ci_node, &ci->i_caps);
389 }
390 
391 /*
392  * (re)set cap hold timeouts, which control the delayed release
393  * of unused caps back to the MDS.  Should be called on cap use.
394  */
395 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
396 			       struct ceph_inode_info *ci)
397 {
398 	struct ceph_mount_args *ma = mdsc->client->mount_args;
399 
400 	ci->i_hold_caps_min = round_jiffies(jiffies +
401 					    ma->caps_wanted_delay_min * HZ);
402 	ci->i_hold_caps_max = round_jiffies(jiffies +
403 					    ma->caps_wanted_delay_max * HZ);
404 	dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
405 	     ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
406 }
407 
408 /*
409  * (Re)queue cap at the end of the delayed cap release list.
410  *
411  * If I_FLUSH is set, leave the inode at the front of the list.
412  *
413  * Caller holds i_lock
414  *    -> we take mdsc->cap_delay_lock
415  */
416 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
417 				struct ceph_inode_info *ci)
418 {
419 	__cap_set_timeouts(mdsc, ci);
420 	dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
421 	     ci->i_ceph_flags, ci->i_hold_caps_max);
422 	if (!mdsc->stopping) {
423 		spin_lock(&mdsc->cap_delay_lock);
424 		if (!list_empty(&ci->i_cap_delay_list)) {
425 			if (ci->i_ceph_flags & CEPH_I_FLUSH)
426 				goto no_change;
427 			list_del_init(&ci->i_cap_delay_list);
428 		}
429 		list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
430 no_change:
431 		spin_unlock(&mdsc->cap_delay_lock);
432 	}
433 }
434 
435 /*
436  * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
437  * indicating we should send a cap message to flush dirty metadata
438  * asap, and move to the front of the delayed cap list.
439  */
440 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
441 				      struct ceph_inode_info *ci)
442 {
443 	dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
444 	spin_lock(&mdsc->cap_delay_lock);
445 	ci->i_ceph_flags |= CEPH_I_FLUSH;
446 	if (!list_empty(&ci->i_cap_delay_list))
447 		list_del_init(&ci->i_cap_delay_list);
448 	list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
449 	spin_unlock(&mdsc->cap_delay_lock);
450 }
451 
452 /*
453  * Cancel delayed work on cap.
454  *
455  * Caller must hold i_lock.
456  */
457 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
458 			       struct ceph_inode_info *ci)
459 {
460 	dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
461 	if (list_empty(&ci->i_cap_delay_list))
462 		return;
463 	spin_lock(&mdsc->cap_delay_lock);
464 	list_del_init(&ci->i_cap_delay_list);
465 	spin_unlock(&mdsc->cap_delay_lock);
466 }
467 
468 /*
469  * Common issue checks for add_cap, handle_cap_grant.
470  */
471 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
472 			      unsigned issued)
473 {
474 	unsigned had = __ceph_caps_issued(ci, NULL);
475 
476 	/*
477 	 * Each time we receive FILE_CACHE anew, we increment
478 	 * i_rdcache_gen.
479 	 */
480 	if ((issued & CEPH_CAP_FILE_CACHE) &&
481 	    (had & CEPH_CAP_FILE_CACHE) == 0)
482 		ci->i_rdcache_gen++;
483 
484 	/*
485 	 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
486 	 * don't know what happened to this directory while we didn't
487 	 * have the cap.
488 	 */
489 	if ((issued & CEPH_CAP_FILE_SHARED) &&
490 	    (had & CEPH_CAP_FILE_SHARED) == 0) {
491 		ci->i_shared_gen++;
492 		if (S_ISDIR(ci->vfs_inode.i_mode)) {
493 			dout(" marking %p NOT complete\n", &ci->vfs_inode);
494 			ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
495 		}
496 	}
497 }
498 
499 /*
500  * Add a capability under the given MDS session.
501  *
502  * Caller should hold session snap_rwsem (read) and s_mutex.
503  *
504  * @fmode is the open file mode, if we are opening a file, otherwise
505  * it is < 0.  (This is so we can atomically add the cap and add an
506  * open file reference to it.)
507  */
508 int ceph_add_cap(struct inode *inode,
509 		 struct ceph_mds_session *session, u64 cap_id,
510 		 int fmode, unsigned issued, unsigned wanted,
511 		 unsigned seq, unsigned mseq, u64 realmino, int flags,
512 		 struct ceph_cap_reservation *caps_reservation)
513 {
514 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
515 	struct ceph_inode_info *ci = ceph_inode(inode);
516 	struct ceph_cap *new_cap = NULL;
517 	struct ceph_cap *cap;
518 	int mds = session->s_mds;
519 	int actual_wanted;
520 
521 	dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
522 	     session->s_mds, cap_id, ceph_cap_string(issued), seq);
523 
524 	/*
525 	 * If we are opening the file, include file mode wanted bits
526 	 * in wanted.
527 	 */
528 	if (fmode >= 0)
529 		wanted |= ceph_caps_for_mode(fmode);
530 
531 retry:
532 	spin_lock(&inode->i_lock);
533 	cap = __get_cap_for_mds(ci, mds);
534 	if (!cap) {
535 		if (new_cap) {
536 			cap = new_cap;
537 			new_cap = NULL;
538 		} else {
539 			spin_unlock(&inode->i_lock);
540 			new_cap = get_cap(caps_reservation);
541 			if (new_cap == NULL)
542 				return -ENOMEM;
543 			goto retry;
544 		}
545 
546 		cap->issued = 0;
547 		cap->implemented = 0;
548 		cap->mds = mds;
549 		cap->mds_wanted = 0;
550 
551 		cap->ci = ci;
552 		__insert_cap_node(ci, cap);
553 
554 		/* clear out old exporting info?  (i.e. on cap import) */
555 		if (ci->i_cap_exporting_mds == mds) {
556 			ci->i_cap_exporting_issued = 0;
557 			ci->i_cap_exporting_mseq = 0;
558 			ci->i_cap_exporting_mds = -1;
559 		}
560 
561 		/* add to session cap list */
562 		cap->session = session;
563 		spin_lock(&session->s_cap_lock);
564 		list_add_tail(&cap->session_caps, &session->s_caps);
565 		session->s_nr_caps++;
566 		spin_unlock(&session->s_cap_lock);
567 	}
568 
569 	if (!ci->i_snap_realm) {
570 		/*
571 		 * add this inode to the appropriate snap realm
572 		 */
573 		struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
574 							       realmino);
575 		if (realm) {
576 			ceph_get_snap_realm(mdsc, realm);
577 			spin_lock(&realm->inodes_with_caps_lock);
578 			ci->i_snap_realm = realm;
579 			list_add(&ci->i_snap_realm_item,
580 				 &realm->inodes_with_caps);
581 			spin_unlock(&realm->inodes_with_caps_lock);
582 		} else {
583 			pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
584 			       realmino);
585 		}
586 	}
587 
588 	__check_cap_issue(ci, cap, issued);
589 
590 	/*
591 	 * If we are issued caps we don't want, or the mds' wanted
592 	 * value appears to be off, queue a check so we'll release
593 	 * later and/or update the mds wanted value.
594 	 */
595 	actual_wanted = __ceph_caps_wanted(ci);
596 	if ((wanted & ~actual_wanted) ||
597 	    (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
598 		dout(" issued %s, mds wanted %s, actual %s, queueing\n",
599 		     ceph_cap_string(issued), ceph_cap_string(wanted),
600 		     ceph_cap_string(actual_wanted));
601 		__cap_delay_requeue(mdsc, ci);
602 	}
603 
604 	if (flags & CEPH_CAP_FLAG_AUTH)
605 		ci->i_auth_cap = cap;
606 	else if (ci->i_auth_cap == cap)
607 		ci->i_auth_cap = NULL;
608 
609 	dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
610 	     inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
611 	     ceph_cap_string(issued|cap->issued), seq, mds);
612 	cap->cap_id = cap_id;
613 	cap->issued = issued;
614 	cap->implemented |= issued;
615 	cap->mds_wanted |= wanted;
616 	cap->seq = seq;
617 	cap->issue_seq = seq;
618 	cap->mseq = mseq;
619 	cap->cap_gen = session->s_cap_gen;
620 
621 	if (fmode >= 0)
622 		__ceph_get_fmode(ci, fmode);
623 	spin_unlock(&inode->i_lock);
624 	wake_up(&ci->i_cap_wq);
625 	return 0;
626 }
627 
628 /*
629  * Return true if cap has not timed out and belongs to the current
630  * generation of the MDS session (i.e. has not gone 'stale' due to
631  * us losing touch with the mds).
632  */
633 static int __cap_is_valid(struct ceph_cap *cap)
634 {
635 	unsigned long ttl;
636 	u32 gen;
637 
638 	spin_lock(&cap->session->s_cap_lock);
639 	gen = cap->session->s_cap_gen;
640 	ttl = cap->session->s_cap_ttl;
641 	spin_unlock(&cap->session->s_cap_lock);
642 
643 	if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
644 		dout("__cap_is_valid %p cap %p issued %s "
645 		     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
646 		     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
647 		return 0;
648 	}
649 
650 	return 1;
651 }
652 
653 /*
654  * Return set of valid cap bits issued to us.  Note that caps time
655  * out, and may be invalidated in bulk if the client session times out
656  * and session->s_cap_gen is bumped.
657  */
658 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
659 {
660 	int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
661 	struct ceph_cap *cap;
662 	struct rb_node *p;
663 
664 	if (implemented)
665 		*implemented = 0;
666 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
667 		cap = rb_entry(p, struct ceph_cap, ci_node);
668 		if (!__cap_is_valid(cap))
669 			continue;
670 		dout("__ceph_caps_issued %p cap %p issued %s\n",
671 		     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
672 		have |= cap->issued;
673 		if (implemented)
674 			*implemented |= cap->implemented;
675 	}
676 	return have;
677 }
678 
679 /*
680  * Get cap bits issued by caps other than @ocap
681  */
682 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
683 {
684 	int have = ci->i_snap_caps;
685 	struct ceph_cap *cap;
686 	struct rb_node *p;
687 
688 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
689 		cap = rb_entry(p, struct ceph_cap, ci_node);
690 		if (cap == ocap)
691 			continue;
692 		if (!__cap_is_valid(cap))
693 			continue;
694 		have |= cap->issued;
695 	}
696 	return have;
697 }
698 
699 /*
700  * Move a cap to the end of the LRU (oldest caps at list head, newest
701  * at list tail).
702  */
703 static void __touch_cap(struct ceph_cap *cap)
704 {
705 	struct ceph_mds_session *s = cap->session;
706 
707 	spin_lock(&s->s_cap_lock);
708 	if (s->s_cap_iterator == NULL) {
709 		dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
710 		     s->s_mds);
711 		list_move_tail(&cap->session_caps, &s->s_caps);
712 	} else {
713 		dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
714 		     &cap->ci->vfs_inode, cap, s->s_mds);
715 	}
716 	spin_unlock(&s->s_cap_lock);
717 }
718 
719 /*
720  * Check if we hold the given mask.  If so, move the cap(s) to the
721  * front of their respective LRUs.  (This is the preferred way for
722  * callers to check for caps they want.)
723  */
724 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
725 {
726 	struct ceph_cap *cap;
727 	struct rb_node *p;
728 	int have = ci->i_snap_caps;
729 
730 	if ((have & mask) == mask) {
731 		dout("__ceph_caps_issued_mask %p snap issued %s"
732 		     " (mask %s)\n", &ci->vfs_inode,
733 		     ceph_cap_string(have),
734 		     ceph_cap_string(mask));
735 		return 1;
736 	}
737 
738 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
739 		cap = rb_entry(p, struct ceph_cap, ci_node);
740 		if (!__cap_is_valid(cap))
741 			continue;
742 		if ((cap->issued & mask) == mask) {
743 			dout("__ceph_caps_issued_mask %p cap %p issued %s"
744 			     " (mask %s)\n", &ci->vfs_inode, cap,
745 			     ceph_cap_string(cap->issued),
746 			     ceph_cap_string(mask));
747 			if (touch)
748 				__touch_cap(cap);
749 			return 1;
750 		}
751 
752 		/* does a combination of caps satisfy mask? */
753 		have |= cap->issued;
754 		if ((have & mask) == mask) {
755 			dout("__ceph_caps_issued_mask %p combo issued %s"
756 			     " (mask %s)\n", &ci->vfs_inode,
757 			     ceph_cap_string(cap->issued),
758 			     ceph_cap_string(mask));
759 			if (touch) {
760 				struct rb_node *q;
761 
762 				/* touch this + preceeding caps */
763 				__touch_cap(cap);
764 				for (q = rb_first(&ci->i_caps); q != p;
765 				     q = rb_next(q)) {
766 					cap = rb_entry(q, struct ceph_cap,
767 						       ci_node);
768 					if (!__cap_is_valid(cap))
769 						continue;
770 					__touch_cap(cap);
771 				}
772 			}
773 			return 1;
774 		}
775 	}
776 
777 	return 0;
778 }
779 
780 /*
781  * Return true if mask caps are currently being revoked by an MDS.
782  */
783 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
784 {
785 	struct inode *inode = &ci->vfs_inode;
786 	struct ceph_cap *cap;
787 	struct rb_node *p;
788 	int ret = 0;
789 
790 	spin_lock(&inode->i_lock);
791 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
792 		cap = rb_entry(p, struct ceph_cap, ci_node);
793 		if (__cap_is_valid(cap) &&
794 		    (cap->implemented & ~cap->issued & mask)) {
795 			ret = 1;
796 			break;
797 		}
798 	}
799 	spin_unlock(&inode->i_lock);
800 	dout("ceph_caps_revoking %p %s = %d\n", inode,
801 	     ceph_cap_string(mask), ret);
802 	return ret;
803 }
804 
805 int __ceph_caps_used(struct ceph_inode_info *ci)
806 {
807 	int used = 0;
808 	if (ci->i_pin_ref)
809 		used |= CEPH_CAP_PIN;
810 	if (ci->i_rd_ref)
811 		used |= CEPH_CAP_FILE_RD;
812 	if (ci->i_rdcache_ref || ci->i_rdcache_gen)
813 		used |= CEPH_CAP_FILE_CACHE;
814 	if (ci->i_wr_ref)
815 		used |= CEPH_CAP_FILE_WR;
816 	if (ci->i_wrbuffer_ref)
817 		used |= CEPH_CAP_FILE_BUFFER;
818 	return used;
819 }
820 
821 /*
822  * wanted, by virtue of open file modes
823  */
824 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
825 {
826 	int want = 0;
827 	int mode;
828 	for (mode = 0; mode < 4; mode++)
829 		if (ci->i_nr_by_mode[mode])
830 			want |= ceph_caps_for_mode(mode);
831 	return want;
832 }
833 
834 /*
835  * Return caps we have registered with the MDS(s) as 'wanted'.
836  */
837 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
838 {
839 	struct ceph_cap *cap;
840 	struct rb_node *p;
841 	int mds_wanted = 0;
842 
843 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
844 		cap = rb_entry(p, struct ceph_cap, ci_node);
845 		if (!__cap_is_valid(cap))
846 			continue;
847 		mds_wanted |= cap->mds_wanted;
848 	}
849 	return mds_wanted;
850 }
851 
852 /*
853  * called under i_lock
854  */
855 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
856 {
857 	return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
858 }
859 
860 /*
861  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
862  *
863  * caller should hold i_lock.
864  * caller will not hold session s_mutex if called from destroy_inode.
865  */
866 void __ceph_remove_cap(struct ceph_cap *cap)
867 {
868 	struct ceph_mds_session *session = cap->session;
869 	struct ceph_inode_info *ci = cap->ci;
870 	struct ceph_mds_client *mdsc =
871 		&ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
872 	int removed = 0;
873 
874 	dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
875 
876 	/* remove from session list */
877 	spin_lock(&session->s_cap_lock);
878 	if (session->s_cap_iterator == cap) {
879 		/* not yet, we are iterating over this very cap */
880 		dout("__ceph_remove_cap  delaying %p removal from session %p\n",
881 		     cap, cap->session);
882 	} else {
883 		list_del_init(&cap->session_caps);
884 		session->s_nr_caps--;
885 		cap->session = NULL;
886 		removed = 1;
887 	}
888 	/* protect backpointer with s_cap_lock: see iterate_session_caps */
889 	cap->ci = NULL;
890 	spin_unlock(&session->s_cap_lock);
891 
892 	/* remove from inode list */
893 	rb_erase(&cap->ci_node, &ci->i_caps);
894 	if (ci->i_auth_cap == cap)
895 		ci->i_auth_cap = NULL;
896 
897 	if (removed)
898 		ceph_put_cap(cap);
899 
900 	if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
901 		struct ceph_snap_realm *realm = ci->i_snap_realm;
902 		spin_lock(&realm->inodes_with_caps_lock);
903 		list_del_init(&ci->i_snap_realm_item);
904 		ci->i_snap_realm_counter++;
905 		ci->i_snap_realm = NULL;
906 		spin_unlock(&realm->inodes_with_caps_lock);
907 		ceph_put_snap_realm(mdsc, realm);
908 	}
909 	if (!__ceph_is_any_real_caps(ci))
910 		__cap_delay_cancel(mdsc, ci);
911 }
912 
913 /*
914  * Build and send a cap message to the given MDS.
915  *
916  * Caller should be holding s_mutex.
917  */
918 static int send_cap_msg(struct ceph_mds_session *session,
919 			u64 ino, u64 cid, int op,
920 			int caps, int wanted, int dirty,
921 			u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
922 			u64 size, u64 max_size,
923 			struct timespec *mtime, struct timespec *atime,
924 			u64 time_warp_seq,
925 			uid_t uid, gid_t gid, mode_t mode,
926 			u64 xattr_version,
927 			struct ceph_buffer *xattrs_buf,
928 			u64 follows)
929 {
930 	struct ceph_mds_caps *fc;
931 	struct ceph_msg *msg;
932 
933 	dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
934 	     " seq %u/%u mseq %u follows %lld size %llu/%llu"
935 	     " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
936 	     cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
937 	     ceph_cap_string(dirty),
938 	     seq, issue_seq, mseq, follows, size, max_size,
939 	     xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
940 
941 	msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS);
942 	if (!msg)
943 		return -ENOMEM;
944 
945 	msg->hdr.tid = cpu_to_le64(flush_tid);
946 
947 	fc = msg->front.iov_base;
948 	memset(fc, 0, sizeof(*fc));
949 
950 	fc->cap_id = cpu_to_le64(cid);
951 	fc->op = cpu_to_le32(op);
952 	fc->seq = cpu_to_le32(seq);
953 	fc->issue_seq = cpu_to_le32(issue_seq);
954 	fc->migrate_seq = cpu_to_le32(mseq);
955 	fc->caps = cpu_to_le32(caps);
956 	fc->wanted = cpu_to_le32(wanted);
957 	fc->dirty = cpu_to_le32(dirty);
958 	fc->ino = cpu_to_le64(ino);
959 	fc->snap_follows = cpu_to_le64(follows);
960 
961 	fc->size = cpu_to_le64(size);
962 	fc->max_size = cpu_to_le64(max_size);
963 	if (mtime)
964 		ceph_encode_timespec(&fc->mtime, mtime);
965 	if (atime)
966 		ceph_encode_timespec(&fc->atime, atime);
967 	fc->time_warp_seq = cpu_to_le32(time_warp_seq);
968 
969 	fc->uid = cpu_to_le32(uid);
970 	fc->gid = cpu_to_le32(gid);
971 	fc->mode = cpu_to_le32(mode);
972 
973 	fc->xattr_version = cpu_to_le64(xattr_version);
974 	if (xattrs_buf) {
975 		msg->middle = ceph_buffer_get(xattrs_buf);
976 		fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
977 		msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
978 	}
979 
980 	ceph_con_send(&session->s_con, msg);
981 	return 0;
982 }
983 
984 /*
985  * Queue cap releases when an inode is dropped from our cache.  Since
986  * inode is about to be destroyed, there is no need for i_lock.
987  */
988 void ceph_queue_caps_release(struct inode *inode)
989 {
990 	struct ceph_inode_info *ci = ceph_inode(inode);
991 	struct rb_node *p;
992 
993 	p = rb_first(&ci->i_caps);
994 	while (p) {
995 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
996 		struct ceph_mds_session *session = cap->session;
997 		struct ceph_msg *msg;
998 		struct ceph_mds_cap_release *head;
999 		struct ceph_mds_cap_item *item;
1000 
1001 		spin_lock(&session->s_cap_lock);
1002 		BUG_ON(!session->s_num_cap_releases);
1003 		msg = list_first_entry(&session->s_cap_releases,
1004 				       struct ceph_msg, list_head);
1005 
1006 		dout(" adding %p release to mds%d msg %p (%d left)\n",
1007 		     inode, session->s_mds, msg, session->s_num_cap_releases);
1008 
1009 		BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1010 		head = msg->front.iov_base;
1011 		head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1012 		item = msg->front.iov_base + msg->front.iov_len;
1013 		item->ino = cpu_to_le64(ceph_ino(inode));
1014 		item->cap_id = cpu_to_le64(cap->cap_id);
1015 		item->migrate_seq = cpu_to_le32(cap->mseq);
1016 		item->seq = cpu_to_le32(cap->issue_seq);
1017 
1018 		session->s_num_cap_releases--;
1019 
1020 		msg->front.iov_len += sizeof(*item);
1021 		if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1022 			dout(" release msg %p full\n", msg);
1023 			list_move_tail(&msg->list_head,
1024 				       &session->s_cap_releases_done);
1025 		} else {
1026 			dout(" release msg %p at %d/%d (%d)\n", msg,
1027 			     (int)le32_to_cpu(head->num),
1028 			     (int)CEPH_CAPS_PER_RELEASE,
1029 			     (int)msg->front.iov_len);
1030 		}
1031 		spin_unlock(&session->s_cap_lock);
1032 		p = rb_next(p);
1033 		__ceph_remove_cap(cap);
1034 	}
1035 }
1036 
1037 /*
1038  * Send a cap msg on the given inode.  Update our caps state, then
1039  * drop i_lock and send the message.
1040  *
1041  * Make note of max_size reported/requested from mds, revoked caps
1042  * that have now been implemented.
1043  *
1044  * Make half-hearted attempt ot to invalidate page cache if we are
1045  * dropping RDCACHE.  Note that this will leave behind locked pages
1046  * that we'll then need to deal with elsewhere.
1047  *
1048  * Return non-zero if delayed release, or we experienced an error
1049  * such that the caller should requeue + retry later.
1050  *
1051  * called with i_lock, then drops it.
1052  * caller should hold snap_rwsem (read), s_mutex.
1053  */
1054 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1055 		      int op, int used, int want, int retain, int flushing,
1056 		      unsigned *pflush_tid)
1057 	__releases(cap->ci->vfs_inode->i_lock)
1058 {
1059 	struct ceph_inode_info *ci = cap->ci;
1060 	struct inode *inode = &ci->vfs_inode;
1061 	u64 cap_id = cap->cap_id;
1062 	int held, revoking, dropping, keep;
1063 	u64 seq, issue_seq, mseq, time_warp_seq, follows;
1064 	u64 size, max_size;
1065 	struct timespec mtime, atime;
1066 	int wake = 0;
1067 	mode_t mode;
1068 	uid_t uid;
1069 	gid_t gid;
1070 	struct ceph_mds_session *session;
1071 	u64 xattr_version = 0;
1072 	int delayed = 0;
1073 	u64 flush_tid = 0;
1074 	int i;
1075 	int ret;
1076 
1077 	held = cap->issued | cap->implemented;
1078 	revoking = cap->implemented & ~cap->issued;
1079 	retain &= ~revoking;
1080 	dropping = cap->issued & ~retain;
1081 
1082 	dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1083 	     inode, cap, cap->session,
1084 	     ceph_cap_string(held), ceph_cap_string(held & retain),
1085 	     ceph_cap_string(revoking));
1086 	BUG_ON((retain & CEPH_CAP_PIN) == 0);
1087 
1088 	session = cap->session;
1089 
1090 	/* don't release wanted unless we've waited a bit. */
1091 	if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1092 	    time_before(jiffies, ci->i_hold_caps_min)) {
1093 		dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1094 		     ceph_cap_string(cap->issued),
1095 		     ceph_cap_string(cap->issued & retain),
1096 		     ceph_cap_string(cap->mds_wanted),
1097 		     ceph_cap_string(want));
1098 		want |= cap->mds_wanted;
1099 		retain |= cap->issued;
1100 		delayed = 1;
1101 	}
1102 	ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1103 
1104 	cap->issued &= retain;  /* drop bits we don't want */
1105 	if (cap->implemented & ~cap->issued) {
1106 		/*
1107 		 * Wake up any waiters on wanted -> needed transition.
1108 		 * This is due to the weird transition from buffered
1109 		 * to sync IO... we need to flush dirty pages _before_
1110 		 * allowing sync writes to avoid reordering.
1111 		 */
1112 		wake = 1;
1113 	}
1114 	cap->implemented &= cap->issued | used;
1115 	cap->mds_wanted = want;
1116 
1117 	if (flushing) {
1118 		/*
1119 		 * assign a tid for flush operations so we can avoid
1120 		 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1121 		 * clean type races.  track latest tid for every bit
1122 		 * so we can handle flush AxFw, flush Fw, and have the
1123 		 * first ack clean Ax.
1124 		 */
1125 		flush_tid = ++ci->i_cap_flush_last_tid;
1126 		if (pflush_tid)
1127 			*pflush_tid = flush_tid;
1128 		dout(" cap_flush_tid %d\n", (int)flush_tid);
1129 		for (i = 0; i < CEPH_CAP_BITS; i++)
1130 			if (flushing & (1 << i))
1131 				ci->i_cap_flush_tid[i] = flush_tid;
1132 	}
1133 
1134 	keep = cap->implemented;
1135 	seq = cap->seq;
1136 	issue_seq = cap->issue_seq;
1137 	mseq = cap->mseq;
1138 	size = inode->i_size;
1139 	ci->i_reported_size = size;
1140 	max_size = ci->i_wanted_max_size;
1141 	ci->i_requested_max_size = max_size;
1142 	mtime = inode->i_mtime;
1143 	atime = inode->i_atime;
1144 	time_warp_seq = ci->i_time_warp_seq;
1145 	follows = ci->i_snap_realm->cached_context->seq;
1146 	uid = inode->i_uid;
1147 	gid = inode->i_gid;
1148 	mode = inode->i_mode;
1149 
1150 	if (dropping & CEPH_CAP_XATTR_EXCL) {
1151 		__ceph_build_xattrs_blob(ci);
1152 		xattr_version = ci->i_xattrs.version + 1;
1153 	}
1154 
1155 	spin_unlock(&inode->i_lock);
1156 
1157 	ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1158 		op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1159 		size, max_size, &mtime, &atime, time_warp_seq,
1160 		uid, gid, mode,
1161 		xattr_version,
1162 		(flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1163 		follows);
1164 	if (ret < 0) {
1165 		dout("error sending cap msg, must requeue %p\n", inode);
1166 		delayed = 1;
1167 	}
1168 
1169 	if (wake)
1170 		wake_up(&ci->i_cap_wq);
1171 
1172 	return delayed;
1173 }
1174 
1175 /*
1176  * When a snapshot is taken, clients accumulate dirty metadata on
1177  * inodes with capabilities in ceph_cap_snaps to describe the file
1178  * state at the time the snapshot was taken.  This must be flushed
1179  * asynchronously back to the MDS once sync writes complete and dirty
1180  * data is written out.
1181  *
1182  * Called under i_lock.  Takes s_mutex as needed.
1183  */
1184 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1185 			struct ceph_mds_session **psession)
1186 {
1187 	struct inode *inode = &ci->vfs_inode;
1188 	int mds;
1189 	struct ceph_cap_snap *capsnap;
1190 	u32 mseq;
1191 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1192 	struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1193 						    session->s_mutex */
1194 	u64 next_follows = 0;  /* keep track of how far we've gotten through the
1195 			     i_cap_snaps list, and skip these entries next time
1196 			     around to avoid an infinite loop */
1197 
1198 	if (psession)
1199 		session = *psession;
1200 
1201 	dout("__flush_snaps %p\n", inode);
1202 retry:
1203 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1204 		/* avoid an infiniute loop after retry */
1205 		if (capsnap->follows < next_follows)
1206 			continue;
1207 		/*
1208 		 * we need to wait for sync writes to complete and for dirty
1209 		 * pages to be written out.
1210 		 */
1211 		if (capsnap->dirty_pages || capsnap->writing)
1212 			continue;
1213 
1214 		/*
1215 		 * if cap writeback already occurred, we should have dropped
1216 		 * the capsnap in ceph_put_wrbuffer_cap_refs.
1217 		 */
1218 		BUG_ON(capsnap->dirty == 0);
1219 
1220 		/* pick mds, take s_mutex */
1221 		mds = __ceph_get_cap_mds(ci, &mseq);
1222 		if (session && session->s_mds != mds) {
1223 			dout("oops, wrong session %p mutex\n", session);
1224 			mutex_unlock(&session->s_mutex);
1225 			ceph_put_mds_session(session);
1226 			session = NULL;
1227 		}
1228 		if (!session) {
1229 			spin_unlock(&inode->i_lock);
1230 			mutex_lock(&mdsc->mutex);
1231 			session = __ceph_lookup_mds_session(mdsc, mds);
1232 			mutex_unlock(&mdsc->mutex);
1233 			if (session) {
1234 				dout("inverting session/ino locks on %p\n",
1235 				     session);
1236 				mutex_lock(&session->s_mutex);
1237 			}
1238 			/*
1239 			 * if session == NULL, we raced against a cap
1240 			 * deletion.  retry, and we'll get a better
1241 			 * @mds value next time.
1242 			 */
1243 			spin_lock(&inode->i_lock);
1244 			goto retry;
1245 		}
1246 
1247 		capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1248 		atomic_inc(&capsnap->nref);
1249 		if (!list_empty(&capsnap->flushing_item))
1250 			list_del_init(&capsnap->flushing_item);
1251 		list_add_tail(&capsnap->flushing_item,
1252 			      &session->s_cap_snaps_flushing);
1253 		spin_unlock(&inode->i_lock);
1254 
1255 		dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
1256 		     inode, capsnap, next_follows, capsnap->size);
1257 		send_cap_msg(session, ceph_vino(inode).ino, 0,
1258 			     CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1259 			     capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1260 			     capsnap->size, 0,
1261 			     &capsnap->mtime, &capsnap->atime,
1262 			     capsnap->time_warp_seq,
1263 			     capsnap->uid, capsnap->gid, capsnap->mode,
1264 			     0, NULL,
1265 			     capsnap->follows);
1266 
1267 		next_follows = capsnap->follows + 1;
1268 		ceph_put_cap_snap(capsnap);
1269 
1270 		spin_lock(&inode->i_lock);
1271 		goto retry;
1272 	}
1273 
1274 	/* we flushed them all; remove this inode from the queue */
1275 	spin_lock(&mdsc->snap_flush_lock);
1276 	list_del_init(&ci->i_snap_flush_item);
1277 	spin_unlock(&mdsc->snap_flush_lock);
1278 
1279 	if (psession)
1280 		*psession = session;
1281 	else if (session) {
1282 		mutex_unlock(&session->s_mutex);
1283 		ceph_put_mds_session(session);
1284 	}
1285 }
1286 
1287 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1288 {
1289 	struct inode *inode = &ci->vfs_inode;
1290 
1291 	spin_lock(&inode->i_lock);
1292 	__ceph_flush_snaps(ci, NULL);
1293 	spin_unlock(&inode->i_lock);
1294 }
1295 
1296 /*
1297  * Mark caps dirty.  If inode is newly dirty, add to the global dirty
1298  * list.
1299  */
1300 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1301 {
1302 	struct ceph_mds_client *mdsc =
1303 		&ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1304 	struct inode *inode = &ci->vfs_inode;
1305 	int was = ci->i_dirty_caps;
1306 	int dirty = 0;
1307 
1308 	dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1309 	     ceph_cap_string(mask), ceph_cap_string(was),
1310 	     ceph_cap_string(was | mask));
1311 	ci->i_dirty_caps |= mask;
1312 	if (was == 0) {
1313 		dout(" inode %p now dirty\n", &ci->vfs_inode);
1314 		BUG_ON(!list_empty(&ci->i_dirty_item));
1315 		spin_lock(&mdsc->cap_dirty_lock);
1316 		list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1317 		spin_unlock(&mdsc->cap_dirty_lock);
1318 		if (ci->i_flushing_caps == 0) {
1319 			igrab(inode);
1320 			dirty |= I_DIRTY_SYNC;
1321 		}
1322 	}
1323 	BUG_ON(list_empty(&ci->i_dirty_item));
1324 	if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1325 	    (mask & CEPH_CAP_FILE_BUFFER))
1326 		dirty |= I_DIRTY_DATASYNC;
1327 	if (dirty)
1328 		__mark_inode_dirty(inode, dirty);
1329 	__cap_delay_requeue(mdsc, ci);
1330 }
1331 
1332 /*
1333  * Add dirty inode to the flushing list.  Assigned a seq number so we
1334  * can wait for caps to flush without starving.
1335  *
1336  * Called under i_lock.
1337  */
1338 static int __mark_caps_flushing(struct inode *inode,
1339 				 struct ceph_mds_session *session)
1340 {
1341 	struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
1342 	struct ceph_inode_info *ci = ceph_inode(inode);
1343 	int flushing;
1344 
1345 	BUG_ON(ci->i_dirty_caps == 0);
1346 	BUG_ON(list_empty(&ci->i_dirty_item));
1347 
1348 	flushing = ci->i_dirty_caps;
1349 	dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1350 	     ceph_cap_string(flushing),
1351 	     ceph_cap_string(ci->i_flushing_caps),
1352 	     ceph_cap_string(ci->i_flushing_caps | flushing));
1353 	ci->i_flushing_caps |= flushing;
1354 	ci->i_dirty_caps = 0;
1355 	dout(" inode %p now !dirty\n", inode);
1356 
1357 	spin_lock(&mdsc->cap_dirty_lock);
1358 	list_del_init(&ci->i_dirty_item);
1359 
1360 	ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1361 	if (list_empty(&ci->i_flushing_item)) {
1362 		list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1363 		mdsc->num_cap_flushing++;
1364 		dout(" inode %p now flushing seq %lld\n", inode,
1365 		     ci->i_cap_flush_seq);
1366 	} else {
1367 		list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1368 		dout(" inode %p now flushing (more) seq %lld\n", inode,
1369 		     ci->i_cap_flush_seq);
1370 	}
1371 	spin_unlock(&mdsc->cap_dirty_lock);
1372 
1373 	return flushing;
1374 }
1375 
1376 /*
1377  * try to invalidate mapping pages without blocking.
1378  */
1379 static int mapping_is_empty(struct address_space *mapping)
1380 {
1381 	struct page *page = find_get_page(mapping, 0);
1382 
1383 	if (!page)
1384 		return 1;
1385 
1386 	put_page(page);
1387 	return 0;
1388 }
1389 
1390 static int try_nonblocking_invalidate(struct inode *inode)
1391 {
1392 	struct ceph_inode_info *ci = ceph_inode(inode);
1393 	u32 invalidating_gen = ci->i_rdcache_gen;
1394 
1395 	spin_unlock(&inode->i_lock);
1396 	invalidate_mapping_pages(&inode->i_data, 0, -1);
1397 	spin_lock(&inode->i_lock);
1398 
1399 	if (mapping_is_empty(&inode->i_data) &&
1400 	    invalidating_gen == ci->i_rdcache_gen) {
1401 		/* success. */
1402 		dout("try_nonblocking_invalidate %p success\n", inode);
1403 		ci->i_rdcache_gen = 0;
1404 		ci->i_rdcache_revoking = 0;
1405 		return 0;
1406 	}
1407 	dout("try_nonblocking_invalidate %p failed\n", inode);
1408 	return -1;
1409 }
1410 
1411 /*
1412  * Swiss army knife function to examine currently used and wanted
1413  * versus held caps.  Release, flush, ack revoked caps to mds as
1414  * appropriate.
1415  *
1416  *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1417  *    cap release further.
1418  *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1419  *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1420  *    further delay.
1421  */
1422 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1423 		     struct ceph_mds_session *session)
1424 	__releases(session->s_mutex)
1425 {
1426 	struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1427 	struct ceph_mds_client *mdsc = &client->mdsc;
1428 	struct inode *inode = &ci->vfs_inode;
1429 	struct ceph_cap *cap;
1430 	int file_wanted, used;
1431 	int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1432 	int issued, implemented, want, retain, revoking, flushing = 0;
1433 	int mds = -1;   /* keep track of how far we've gone through i_caps list
1434 			   to avoid an infinite loop on retry */
1435 	struct rb_node *p;
1436 	int tried_invalidate = 0;
1437 	int delayed = 0, sent = 0, force_requeue = 0, num;
1438 	int queue_invalidate = 0;
1439 	int is_delayed = flags & CHECK_CAPS_NODELAY;
1440 
1441 	/* if we are unmounting, flush any unused caps immediately. */
1442 	if (mdsc->stopping)
1443 		is_delayed = 1;
1444 
1445 	spin_lock(&inode->i_lock);
1446 
1447 	if (ci->i_ceph_flags & CEPH_I_FLUSH)
1448 		flags |= CHECK_CAPS_FLUSH;
1449 
1450 	/* flush snaps first time around only */
1451 	if (!list_empty(&ci->i_cap_snaps))
1452 		__ceph_flush_snaps(ci, &session);
1453 	goto retry_locked;
1454 retry:
1455 	spin_lock(&inode->i_lock);
1456 retry_locked:
1457 	file_wanted = __ceph_caps_file_wanted(ci);
1458 	used = __ceph_caps_used(ci);
1459 	want = file_wanted | used;
1460 	issued = __ceph_caps_issued(ci, &implemented);
1461 	revoking = implemented & ~issued;
1462 
1463 	retain = want | CEPH_CAP_PIN;
1464 	if (!mdsc->stopping && inode->i_nlink > 0) {
1465 		if (want) {
1466 			retain |= CEPH_CAP_ANY;       /* be greedy */
1467 		} else {
1468 			retain |= CEPH_CAP_ANY_SHARED;
1469 			/*
1470 			 * keep RD only if we didn't have the file open RW,
1471 			 * because then the mds would revoke it anyway to
1472 			 * journal max_size=0.
1473 			 */
1474 			if (ci->i_max_size == 0)
1475 				retain |= CEPH_CAP_ANY_RD;
1476 		}
1477 	}
1478 
1479 	dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1480 	     " issued %s revoking %s retain %s %s%s%s\n", inode,
1481 	     ceph_cap_string(file_wanted),
1482 	     ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1483 	     ceph_cap_string(ci->i_flushing_caps),
1484 	     ceph_cap_string(issued), ceph_cap_string(revoking),
1485 	     ceph_cap_string(retain),
1486 	     (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1487 	     (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1488 	     (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1489 
1490 	/*
1491 	 * If we no longer need to hold onto old our caps, and we may
1492 	 * have cached pages, but don't want them, then try to invalidate.
1493 	 * If we fail, it's because pages are locked.... try again later.
1494 	 */
1495 	if ((!is_delayed || mdsc->stopping) &&
1496 	    ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
1497 	    ci->i_rdcache_gen &&                     /* may have cached pages */
1498 	    (file_wanted == 0 ||                     /* no open files */
1499 	     (revoking & CEPH_CAP_FILE_CACHE)) &&     /*  or revoking cache */
1500 	    !tried_invalidate) {
1501 		dout("check_caps trying to invalidate on %p\n", inode);
1502 		if (try_nonblocking_invalidate(inode) < 0) {
1503 			if (revoking & CEPH_CAP_FILE_CACHE) {
1504 				dout("check_caps queuing invalidate\n");
1505 				queue_invalidate = 1;
1506 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
1507 			} else {
1508 				dout("check_caps failed to invalidate pages\n");
1509 				/* we failed to invalidate pages.  check these
1510 				   caps again later. */
1511 				force_requeue = 1;
1512 				__cap_set_timeouts(mdsc, ci);
1513 			}
1514 		}
1515 		tried_invalidate = 1;
1516 		goto retry_locked;
1517 	}
1518 
1519 	num = 0;
1520 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1521 		cap = rb_entry(p, struct ceph_cap, ci_node);
1522 		num++;
1523 
1524 		/* avoid looping forever */
1525 		if (mds >= cap->mds ||
1526 		    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1527 			continue;
1528 
1529 		/* NOTE: no side-effects allowed, until we take s_mutex */
1530 
1531 		revoking = cap->implemented & ~cap->issued;
1532 		if (revoking)
1533 			dout(" mds%d revoking %s\n", cap->mds,
1534 			     ceph_cap_string(revoking));
1535 
1536 		if (cap == ci->i_auth_cap &&
1537 		    (cap->issued & CEPH_CAP_FILE_WR)) {
1538 			/* request larger max_size from MDS? */
1539 			if (ci->i_wanted_max_size > ci->i_max_size &&
1540 			    ci->i_wanted_max_size > ci->i_requested_max_size) {
1541 				dout("requesting new max_size\n");
1542 				goto ack;
1543 			}
1544 
1545 			/* approaching file_max? */
1546 			if ((inode->i_size << 1) >= ci->i_max_size &&
1547 			    (ci->i_reported_size << 1) < ci->i_max_size) {
1548 				dout("i_size approaching max_size\n");
1549 				goto ack;
1550 			}
1551 		}
1552 		/* flush anything dirty? */
1553 		if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1554 		    ci->i_dirty_caps) {
1555 			dout("flushing dirty caps\n");
1556 			goto ack;
1557 		}
1558 
1559 		/* completed revocation? going down and there are no caps? */
1560 		if (revoking && (revoking & used) == 0) {
1561 			dout("completed revocation of %s\n",
1562 			     ceph_cap_string(cap->implemented & ~cap->issued));
1563 			goto ack;
1564 		}
1565 
1566 		/* want more caps from mds? */
1567 		if (want & ~(cap->mds_wanted | cap->issued))
1568 			goto ack;
1569 
1570 		/* things we might delay */
1571 		if ((cap->issued & ~retain) == 0 &&
1572 		    cap->mds_wanted == want)
1573 			continue;     /* nope, all good */
1574 
1575 		if (is_delayed)
1576 			goto ack;
1577 
1578 		/* delay? */
1579 		if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1580 		    time_before(jiffies, ci->i_hold_caps_max)) {
1581 			dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1582 			     ceph_cap_string(cap->issued),
1583 			     ceph_cap_string(cap->issued & retain),
1584 			     ceph_cap_string(cap->mds_wanted),
1585 			     ceph_cap_string(want));
1586 			delayed++;
1587 			continue;
1588 		}
1589 
1590 ack:
1591 		if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1592 			dout(" skipping %p I_NOFLUSH set\n", inode);
1593 			continue;
1594 		}
1595 
1596 		if (session && session != cap->session) {
1597 			dout("oops, wrong session %p mutex\n", session);
1598 			mutex_unlock(&session->s_mutex);
1599 			session = NULL;
1600 		}
1601 		if (!session) {
1602 			session = cap->session;
1603 			if (mutex_trylock(&session->s_mutex) == 0) {
1604 				dout("inverting session/ino locks on %p\n",
1605 				     session);
1606 				spin_unlock(&inode->i_lock);
1607 				if (took_snap_rwsem) {
1608 					up_read(&mdsc->snap_rwsem);
1609 					took_snap_rwsem = 0;
1610 				}
1611 				mutex_lock(&session->s_mutex);
1612 				goto retry;
1613 			}
1614 		}
1615 		/* take snap_rwsem after session mutex */
1616 		if (!took_snap_rwsem) {
1617 			if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1618 				dout("inverting snap/in locks on %p\n",
1619 				     inode);
1620 				spin_unlock(&inode->i_lock);
1621 				down_read(&mdsc->snap_rwsem);
1622 				took_snap_rwsem = 1;
1623 				goto retry;
1624 			}
1625 			took_snap_rwsem = 1;
1626 		}
1627 
1628 		if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1629 			flushing = __mark_caps_flushing(inode, session);
1630 
1631 		mds = cap->mds;  /* remember mds, so we don't repeat */
1632 		sent++;
1633 
1634 		/* __send_cap drops i_lock */
1635 		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1636 				      retain, flushing, NULL);
1637 		goto retry; /* retake i_lock and restart our cap scan. */
1638 	}
1639 
1640 	/*
1641 	 * Reschedule delayed caps release if we delayed anything,
1642 	 * otherwise cancel.
1643 	 */
1644 	if (delayed && is_delayed)
1645 		force_requeue = 1;   /* __send_cap delayed release; requeue */
1646 	if (!delayed && !is_delayed)
1647 		__cap_delay_cancel(mdsc, ci);
1648 	else if (!is_delayed || force_requeue)
1649 		__cap_delay_requeue(mdsc, ci);
1650 
1651 	spin_unlock(&inode->i_lock);
1652 
1653 	if (queue_invalidate)
1654 		ceph_queue_invalidate(inode);
1655 
1656 	if (session)
1657 		mutex_unlock(&session->s_mutex);
1658 	if (took_snap_rwsem)
1659 		up_read(&mdsc->snap_rwsem);
1660 }
1661 
1662 /*
1663  * Try to flush dirty caps back to the auth mds.
1664  */
1665 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1666 			  unsigned *flush_tid)
1667 {
1668 	struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
1669 	struct ceph_inode_info *ci = ceph_inode(inode);
1670 	int unlock_session = session ? 0 : 1;
1671 	int flushing = 0;
1672 
1673 retry:
1674 	spin_lock(&inode->i_lock);
1675 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1676 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1677 		goto out;
1678 	}
1679 	if (ci->i_dirty_caps && ci->i_auth_cap) {
1680 		struct ceph_cap *cap = ci->i_auth_cap;
1681 		int used = __ceph_caps_used(ci);
1682 		int want = __ceph_caps_wanted(ci);
1683 		int delayed;
1684 
1685 		if (!session) {
1686 			spin_unlock(&inode->i_lock);
1687 			session = cap->session;
1688 			mutex_lock(&session->s_mutex);
1689 			goto retry;
1690 		}
1691 		BUG_ON(session != cap->session);
1692 		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1693 			goto out;
1694 
1695 		flushing = __mark_caps_flushing(inode, session);
1696 
1697 		/* __send_cap drops i_lock */
1698 		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1699 				     cap->issued | cap->implemented, flushing,
1700 				     flush_tid);
1701 		if (!delayed)
1702 			goto out_unlocked;
1703 
1704 		spin_lock(&inode->i_lock);
1705 		__cap_delay_requeue(mdsc, ci);
1706 	}
1707 out:
1708 	spin_unlock(&inode->i_lock);
1709 out_unlocked:
1710 	if (session && unlock_session)
1711 		mutex_unlock(&session->s_mutex);
1712 	return flushing;
1713 }
1714 
1715 /*
1716  * Return true if we've flushed caps through the given flush_tid.
1717  */
1718 static int caps_are_flushed(struct inode *inode, unsigned tid)
1719 {
1720 	struct ceph_inode_info *ci = ceph_inode(inode);
1721 	int i, ret = 1;
1722 
1723 	spin_lock(&inode->i_lock);
1724 	for (i = 0; i < CEPH_CAP_BITS; i++)
1725 		if ((ci->i_flushing_caps & (1 << i)) &&
1726 		    ci->i_cap_flush_tid[i] <= tid) {
1727 			/* still flushing this bit */
1728 			ret = 0;
1729 			break;
1730 		}
1731 	spin_unlock(&inode->i_lock);
1732 	return ret;
1733 }
1734 
1735 /*
1736  * Wait on any unsafe replies for the given inode.  First wait on the
1737  * newest request, and make that the upper bound.  Then, if there are
1738  * more requests, keep waiting on the oldest as long as it is still older
1739  * than the original request.
1740  */
1741 static void sync_write_wait(struct inode *inode)
1742 {
1743 	struct ceph_inode_info *ci = ceph_inode(inode);
1744 	struct list_head *head = &ci->i_unsafe_writes;
1745 	struct ceph_osd_request *req;
1746 	u64 last_tid;
1747 
1748 	spin_lock(&ci->i_unsafe_lock);
1749 	if (list_empty(head))
1750 		goto out;
1751 
1752 	/* set upper bound as _last_ entry in chain */
1753 	req = list_entry(head->prev, struct ceph_osd_request,
1754 			 r_unsafe_item);
1755 	last_tid = req->r_tid;
1756 
1757 	do {
1758 		ceph_osdc_get_request(req);
1759 		spin_unlock(&ci->i_unsafe_lock);
1760 		dout("sync_write_wait on tid %llu (until %llu)\n",
1761 		     req->r_tid, last_tid);
1762 		wait_for_completion(&req->r_safe_completion);
1763 		spin_lock(&ci->i_unsafe_lock);
1764 		ceph_osdc_put_request(req);
1765 
1766 		/*
1767 		 * from here on look at first entry in chain, since we
1768 		 * only want to wait for anything older than last_tid
1769 		 */
1770 		if (list_empty(head))
1771 			break;
1772 		req = list_entry(head->next, struct ceph_osd_request,
1773 				 r_unsafe_item);
1774 	} while (req->r_tid < last_tid);
1775 out:
1776 	spin_unlock(&ci->i_unsafe_lock);
1777 }
1778 
1779 int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
1780 {
1781 	struct inode *inode = dentry->d_inode;
1782 	struct ceph_inode_info *ci = ceph_inode(inode);
1783 	unsigned flush_tid;
1784 	int ret;
1785 	int dirty;
1786 
1787 	dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1788 	sync_write_wait(inode);
1789 
1790 	ret = filemap_write_and_wait(inode->i_mapping);
1791 	if (ret < 0)
1792 		return ret;
1793 
1794 	dirty = try_flush_caps(inode, NULL, &flush_tid);
1795 	dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1796 
1797 	/*
1798 	 * only wait on non-file metadata writeback (the mds
1799 	 * can recover size and mtime, so we don't need to
1800 	 * wait for that)
1801 	 */
1802 	if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1803 		dout("fsync waiting for flush_tid %u\n", flush_tid);
1804 		ret = wait_event_interruptible(ci->i_cap_wq,
1805 				       caps_are_flushed(inode, flush_tid));
1806 	}
1807 
1808 	dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1809 	return ret;
1810 }
1811 
1812 /*
1813  * Flush any dirty caps back to the mds.  If we aren't asked to wait,
1814  * queue inode for flush but don't do so immediately, because we can
1815  * get by with fewer MDS messages if we wait for data writeback to
1816  * complete first.
1817  */
1818 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1819 {
1820 	struct ceph_inode_info *ci = ceph_inode(inode);
1821 	unsigned flush_tid;
1822 	int err = 0;
1823 	int dirty;
1824 	int wait = wbc->sync_mode == WB_SYNC_ALL;
1825 
1826 	dout("write_inode %p wait=%d\n", inode, wait);
1827 	if (wait) {
1828 		dirty = try_flush_caps(inode, NULL, &flush_tid);
1829 		if (dirty)
1830 			err = wait_event_interruptible(ci->i_cap_wq,
1831 				       caps_are_flushed(inode, flush_tid));
1832 	} else {
1833 		struct ceph_mds_client *mdsc =
1834 			&ceph_sb_to_client(inode->i_sb)->mdsc;
1835 
1836 		spin_lock(&inode->i_lock);
1837 		if (__ceph_caps_dirty(ci))
1838 			__cap_delay_requeue_front(mdsc, ci);
1839 		spin_unlock(&inode->i_lock);
1840 	}
1841 	return err;
1842 }
1843 
1844 /*
1845  * After a recovering MDS goes active, we need to resend any caps
1846  * we were flushing.
1847  *
1848  * Caller holds session->s_mutex.
1849  */
1850 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1851 				   struct ceph_mds_session *session)
1852 {
1853 	struct ceph_cap_snap *capsnap;
1854 
1855 	dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1856 	list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1857 			    flushing_item) {
1858 		struct ceph_inode_info *ci = capsnap->ci;
1859 		struct inode *inode = &ci->vfs_inode;
1860 		struct ceph_cap *cap;
1861 
1862 		spin_lock(&inode->i_lock);
1863 		cap = ci->i_auth_cap;
1864 		if (cap && cap->session == session) {
1865 			dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1866 			     cap, capsnap);
1867 			__ceph_flush_snaps(ci, &session);
1868 		} else {
1869 			pr_err("%p auth cap %p not mds%d ???\n", inode,
1870 			       cap, session->s_mds);
1871 		}
1872 		spin_unlock(&inode->i_lock);
1873 	}
1874 }
1875 
1876 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1877 			     struct ceph_mds_session *session)
1878 {
1879 	struct ceph_inode_info *ci;
1880 
1881 	kick_flushing_capsnaps(mdsc, session);
1882 
1883 	dout("kick_flushing_caps mds%d\n", session->s_mds);
1884 	list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1885 		struct inode *inode = &ci->vfs_inode;
1886 		struct ceph_cap *cap;
1887 		int delayed = 0;
1888 
1889 		spin_lock(&inode->i_lock);
1890 		cap = ci->i_auth_cap;
1891 		if (cap && cap->session == session) {
1892 			dout("kick_flushing_caps %p cap %p %s\n", inode,
1893 			     cap, ceph_cap_string(ci->i_flushing_caps));
1894 			delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1895 					     __ceph_caps_used(ci),
1896 					     __ceph_caps_wanted(ci),
1897 					     cap->issued | cap->implemented,
1898 					     ci->i_flushing_caps, NULL);
1899 			if (delayed) {
1900 				spin_lock(&inode->i_lock);
1901 				__cap_delay_requeue(mdsc, ci);
1902 				spin_unlock(&inode->i_lock);
1903 			}
1904 		} else {
1905 			pr_err("%p auth cap %p not mds%d ???\n", inode,
1906 			       cap, session->s_mds);
1907 			spin_unlock(&inode->i_lock);
1908 		}
1909 	}
1910 }
1911 
1912 
1913 /*
1914  * Take references to capabilities we hold, so that we don't release
1915  * them to the MDS prematurely.
1916  *
1917  * Protected by i_lock.
1918  */
1919 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1920 {
1921 	if (got & CEPH_CAP_PIN)
1922 		ci->i_pin_ref++;
1923 	if (got & CEPH_CAP_FILE_RD)
1924 		ci->i_rd_ref++;
1925 	if (got & CEPH_CAP_FILE_CACHE)
1926 		ci->i_rdcache_ref++;
1927 	if (got & CEPH_CAP_FILE_WR)
1928 		ci->i_wr_ref++;
1929 	if (got & CEPH_CAP_FILE_BUFFER) {
1930 		if (ci->i_wrbuffer_ref == 0)
1931 			igrab(&ci->vfs_inode);
1932 		ci->i_wrbuffer_ref++;
1933 		dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1934 		     &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1935 	}
1936 }
1937 
1938 /*
1939  * Try to grab cap references.  Specify those refs we @want, and the
1940  * minimal set we @need.  Also include the larger offset we are writing
1941  * to (when applicable), and check against max_size here as well.
1942  * Note that caller is responsible for ensuring max_size increases are
1943  * requested from the MDS.
1944  */
1945 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1946 			    int *got, loff_t endoff, int *check_max, int *err)
1947 {
1948 	struct inode *inode = &ci->vfs_inode;
1949 	int ret = 0;
1950 	int have, implemented;
1951 	int file_wanted;
1952 
1953 	dout("get_cap_refs %p need %s want %s\n", inode,
1954 	     ceph_cap_string(need), ceph_cap_string(want));
1955 	spin_lock(&inode->i_lock);
1956 
1957 	/* make sure file is actually open */
1958 	file_wanted = __ceph_caps_file_wanted(ci);
1959 	if ((file_wanted & need) == 0) {
1960 		dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
1961 		     ceph_cap_string(need), ceph_cap_string(file_wanted));
1962 		*err = -EBADF;
1963 		ret = 1;
1964 		goto out;
1965 	}
1966 
1967 	if (need & CEPH_CAP_FILE_WR) {
1968 		if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
1969 			dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
1970 			     inode, endoff, ci->i_max_size);
1971 			if (endoff > ci->i_wanted_max_size) {
1972 				*check_max = 1;
1973 				ret = 1;
1974 			}
1975 			goto out;
1976 		}
1977 		/*
1978 		 * If a sync write is in progress, we must wait, so that we
1979 		 * can get a final snapshot value for size+mtime.
1980 		 */
1981 		if (__ceph_have_pending_cap_snap(ci)) {
1982 			dout("get_cap_refs %p cap_snap_pending\n", inode);
1983 			goto out;
1984 		}
1985 	}
1986 	have = __ceph_caps_issued(ci, &implemented);
1987 
1988 	/*
1989 	 * disallow writes while a truncate is pending
1990 	 */
1991 	if (ci->i_truncate_pending)
1992 		have &= ~CEPH_CAP_FILE_WR;
1993 
1994 	if ((have & need) == need) {
1995 		/*
1996 		 * Look at (implemented & ~have & not) so that we keep waiting
1997 		 * on transition from wanted -> needed caps.  This is needed
1998 		 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
1999 		 * going before a prior buffered writeback happens.
2000 		 */
2001 		int not = want & ~(have & need);
2002 		int revoking = implemented & ~have;
2003 		dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2004 		     inode, ceph_cap_string(have), ceph_cap_string(not),
2005 		     ceph_cap_string(revoking));
2006 		if ((revoking & not) == 0) {
2007 			*got = need | (have & want);
2008 			__take_cap_refs(ci, *got);
2009 			ret = 1;
2010 		}
2011 	} else {
2012 		dout("get_cap_refs %p have %s needed %s\n", inode,
2013 		     ceph_cap_string(have), ceph_cap_string(need));
2014 	}
2015 out:
2016 	spin_unlock(&inode->i_lock);
2017 	dout("get_cap_refs %p ret %d got %s\n", inode,
2018 	     ret, ceph_cap_string(*got));
2019 	return ret;
2020 }
2021 
2022 /*
2023  * Check the offset we are writing up to against our current
2024  * max_size.  If necessary, tell the MDS we want to write to
2025  * a larger offset.
2026  */
2027 static void check_max_size(struct inode *inode, loff_t endoff)
2028 {
2029 	struct ceph_inode_info *ci = ceph_inode(inode);
2030 	int check = 0;
2031 
2032 	/* do we need to explicitly request a larger max_size? */
2033 	spin_lock(&inode->i_lock);
2034 	if ((endoff >= ci->i_max_size ||
2035 	     endoff > (inode->i_size << 1)) &&
2036 	    endoff > ci->i_wanted_max_size) {
2037 		dout("write %p at large endoff %llu, req max_size\n",
2038 		     inode, endoff);
2039 		ci->i_wanted_max_size = endoff;
2040 		check = 1;
2041 	}
2042 	spin_unlock(&inode->i_lock);
2043 	if (check)
2044 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2045 }
2046 
2047 /*
2048  * Wait for caps, and take cap references.  If we can't get a WR cap
2049  * due to a small max_size, make sure we check_max_size (and possibly
2050  * ask the mds) so we don't get hung up indefinitely.
2051  */
2052 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2053 		  loff_t endoff)
2054 {
2055 	int check_max, ret, err;
2056 
2057 retry:
2058 	if (endoff > 0)
2059 		check_max_size(&ci->vfs_inode, endoff);
2060 	check_max = 0;
2061 	err = 0;
2062 	ret = wait_event_interruptible(ci->i_cap_wq,
2063 				       try_get_cap_refs(ci, need, want,
2064 							got, endoff,
2065 							&check_max, &err));
2066 	if (err)
2067 		ret = err;
2068 	if (check_max)
2069 		goto retry;
2070 	return ret;
2071 }
2072 
2073 /*
2074  * Take cap refs.  Caller must already know we hold at least one ref
2075  * on the caps in question or we don't know this is safe.
2076  */
2077 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2078 {
2079 	spin_lock(&ci->vfs_inode.i_lock);
2080 	__take_cap_refs(ci, caps);
2081 	spin_unlock(&ci->vfs_inode.i_lock);
2082 }
2083 
2084 /*
2085  * Release cap refs.
2086  *
2087  * If we released the last ref on any given cap, call ceph_check_caps
2088  * to release (or schedule a release).
2089  *
2090  * If we are releasing a WR cap (from a sync write), finalize any affected
2091  * cap_snap, and wake up any waiters.
2092  */
2093 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2094 {
2095 	struct inode *inode = &ci->vfs_inode;
2096 	int last = 0, put = 0, flushsnaps = 0, wake = 0;
2097 	struct ceph_cap_snap *capsnap;
2098 
2099 	spin_lock(&inode->i_lock);
2100 	if (had & CEPH_CAP_PIN)
2101 		--ci->i_pin_ref;
2102 	if (had & CEPH_CAP_FILE_RD)
2103 		if (--ci->i_rd_ref == 0)
2104 			last++;
2105 	if (had & CEPH_CAP_FILE_CACHE)
2106 		if (--ci->i_rdcache_ref == 0)
2107 			last++;
2108 	if (had & CEPH_CAP_FILE_BUFFER) {
2109 		if (--ci->i_wrbuffer_ref == 0) {
2110 			last++;
2111 			put++;
2112 		}
2113 		dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2114 		     inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2115 	}
2116 	if (had & CEPH_CAP_FILE_WR)
2117 		if (--ci->i_wr_ref == 0) {
2118 			last++;
2119 			if (!list_empty(&ci->i_cap_snaps)) {
2120 				capsnap = list_first_entry(&ci->i_cap_snaps,
2121 						     struct ceph_cap_snap,
2122 						     ci_item);
2123 				if (capsnap->writing) {
2124 					capsnap->writing = 0;
2125 					flushsnaps =
2126 						__ceph_finish_cap_snap(ci,
2127 								       capsnap);
2128 					wake = 1;
2129 				}
2130 			}
2131 		}
2132 	spin_unlock(&inode->i_lock);
2133 
2134 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2135 	     last ? " last" : "", put ? " put" : "");
2136 
2137 	if (last && !flushsnaps)
2138 		ceph_check_caps(ci, 0, NULL);
2139 	else if (flushsnaps)
2140 		ceph_flush_snaps(ci);
2141 	if (wake)
2142 		wake_up(&ci->i_cap_wq);
2143 	if (put)
2144 		iput(inode);
2145 }
2146 
2147 /*
2148  * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2149  * context.  Adjust per-snap dirty page accounting as appropriate.
2150  * Once all dirty data for a cap_snap is flushed, flush snapped file
2151  * metadata back to the MDS.  If we dropped the last ref, call
2152  * ceph_check_caps.
2153  */
2154 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2155 				struct ceph_snap_context *snapc)
2156 {
2157 	struct inode *inode = &ci->vfs_inode;
2158 	int last = 0;
2159 	int complete_capsnap = 0;
2160 	int drop_capsnap = 0;
2161 	int found = 0;
2162 	struct ceph_cap_snap *capsnap = NULL;
2163 
2164 	spin_lock(&inode->i_lock);
2165 	ci->i_wrbuffer_ref -= nr;
2166 	last = !ci->i_wrbuffer_ref;
2167 
2168 	if (ci->i_head_snapc == snapc) {
2169 		ci->i_wrbuffer_ref_head -= nr;
2170 		if (!ci->i_wrbuffer_ref_head) {
2171 			ceph_put_snap_context(ci->i_head_snapc);
2172 			ci->i_head_snapc = NULL;
2173 		}
2174 		dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2175 		     inode,
2176 		     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2177 		     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2178 		     last ? " LAST" : "");
2179 	} else {
2180 		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2181 			if (capsnap->context == snapc) {
2182 				found = 1;
2183 				break;
2184 			}
2185 		}
2186 		BUG_ON(!found);
2187 		capsnap->dirty_pages -= nr;
2188 		if (capsnap->dirty_pages == 0) {
2189 			complete_capsnap = 1;
2190 			if (capsnap->dirty == 0)
2191 				/* cap writeback completed before we created
2192 				 * the cap_snap; no FLUSHSNAP is needed */
2193 				drop_capsnap = 1;
2194 		}
2195 		dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2196 		     " snap %lld %d/%d -> %d/%d %s%s%s\n",
2197 		     inode, capsnap, capsnap->context->seq,
2198 		     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2199 		     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2200 		     last ? " (wrbuffer last)" : "",
2201 		     complete_capsnap ? " (complete capsnap)" : "",
2202 		     drop_capsnap ? " (drop capsnap)" : "");
2203 		if (drop_capsnap) {
2204 			ceph_put_snap_context(capsnap->context);
2205 			list_del(&capsnap->ci_item);
2206 			list_del(&capsnap->flushing_item);
2207 			ceph_put_cap_snap(capsnap);
2208 		}
2209 	}
2210 
2211 	spin_unlock(&inode->i_lock);
2212 
2213 	if (last) {
2214 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2215 		iput(inode);
2216 	} else if (complete_capsnap) {
2217 		ceph_flush_snaps(ci);
2218 		wake_up(&ci->i_cap_wq);
2219 	}
2220 	if (drop_capsnap)
2221 		iput(inode);
2222 }
2223 
2224 /*
2225  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2226  * actually be a revocation if it specifies a smaller cap set.)
2227  *
2228  * caller holds s_mutex and i_lock, we drop both.
2229  *
2230  * return value:
2231  *  0 - ok
2232  *  1 - check_caps on auth cap only (writeback)
2233  *  2 - check_caps (ack revoke)
2234  */
2235 static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2236 			     struct ceph_mds_session *session,
2237 			     struct ceph_cap *cap,
2238 			     struct ceph_buffer *xattr_buf)
2239 	__releases(inode->i_lock)
2240 	__releases(session->s_mutex)
2241 {
2242 	struct ceph_inode_info *ci = ceph_inode(inode);
2243 	int mds = session->s_mds;
2244 	int seq = le32_to_cpu(grant->seq);
2245 	int newcaps = le32_to_cpu(grant->caps);
2246 	int issued, implemented, used, wanted, dirty;
2247 	u64 size = le64_to_cpu(grant->size);
2248 	u64 max_size = le64_to_cpu(grant->max_size);
2249 	struct timespec mtime, atime, ctime;
2250 	int check_caps = 0;
2251 	int wake = 0;
2252 	int writeback = 0;
2253 	int revoked_rdcache = 0;
2254 	int queue_invalidate = 0;
2255 
2256 	dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2257 	     inode, cap, mds, seq, ceph_cap_string(newcaps));
2258 	dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2259 		inode->i_size);
2260 
2261 	/*
2262 	 * If CACHE is being revoked, and we have no dirty buffers,
2263 	 * try to invalidate (once).  (If there are dirty buffers, we
2264 	 * will invalidate _after_ writeback.)
2265 	 */
2266 	if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2267 	    !ci->i_wrbuffer_ref) {
2268 		if (try_nonblocking_invalidate(inode) == 0) {
2269 			revoked_rdcache = 1;
2270 		} else {
2271 			/* there were locked pages.. invalidate later
2272 			   in a separate thread. */
2273 			if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2274 				queue_invalidate = 1;
2275 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
2276 			}
2277 		}
2278 	}
2279 
2280 	/* side effects now are allowed */
2281 
2282 	issued = __ceph_caps_issued(ci, &implemented);
2283 	issued |= implemented | __ceph_caps_dirty(ci);
2284 
2285 	cap->cap_gen = session->s_cap_gen;
2286 
2287 	__check_cap_issue(ci, cap, newcaps);
2288 
2289 	if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2290 		inode->i_mode = le32_to_cpu(grant->mode);
2291 		inode->i_uid = le32_to_cpu(grant->uid);
2292 		inode->i_gid = le32_to_cpu(grant->gid);
2293 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2294 		     inode->i_uid, inode->i_gid);
2295 	}
2296 
2297 	if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2298 		inode->i_nlink = le32_to_cpu(grant->nlink);
2299 
2300 	if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2301 		int len = le32_to_cpu(grant->xattr_len);
2302 		u64 version = le64_to_cpu(grant->xattr_version);
2303 
2304 		if (version > ci->i_xattrs.version) {
2305 			dout(" got new xattrs v%llu on %p len %d\n",
2306 			     version, inode, len);
2307 			if (ci->i_xattrs.blob)
2308 				ceph_buffer_put(ci->i_xattrs.blob);
2309 			ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2310 			ci->i_xattrs.version = version;
2311 		}
2312 	}
2313 
2314 	/* size/ctime/mtime/atime? */
2315 	ceph_fill_file_size(inode, issued,
2316 			    le32_to_cpu(grant->truncate_seq),
2317 			    le64_to_cpu(grant->truncate_size), size);
2318 	ceph_decode_timespec(&mtime, &grant->mtime);
2319 	ceph_decode_timespec(&atime, &grant->atime);
2320 	ceph_decode_timespec(&ctime, &grant->ctime);
2321 	ceph_fill_file_time(inode, issued,
2322 			    le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2323 			    &atime);
2324 
2325 	/* max size increase? */
2326 	if (max_size != ci->i_max_size) {
2327 		dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2328 		ci->i_max_size = max_size;
2329 		if (max_size >= ci->i_wanted_max_size) {
2330 			ci->i_wanted_max_size = 0;  /* reset */
2331 			ci->i_requested_max_size = 0;
2332 		}
2333 		wake = 1;
2334 	}
2335 
2336 	/* check cap bits */
2337 	wanted = __ceph_caps_wanted(ci);
2338 	used = __ceph_caps_used(ci);
2339 	dirty = __ceph_caps_dirty(ci);
2340 	dout(" my wanted = %s, used = %s, dirty %s\n",
2341 	     ceph_cap_string(wanted),
2342 	     ceph_cap_string(used),
2343 	     ceph_cap_string(dirty));
2344 	if (wanted != le32_to_cpu(grant->wanted)) {
2345 		dout("mds wanted %s -> %s\n",
2346 		     ceph_cap_string(le32_to_cpu(grant->wanted)),
2347 		     ceph_cap_string(wanted));
2348 		grant->wanted = cpu_to_le32(wanted);
2349 	}
2350 
2351 	cap->seq = seq;
2352 
2353 	/* file layout may have changed */
2354 	ci->i_layout = grant->layout;
2355 
2356 	/* revocation, grant, or no-op? */
2357 	if (cap->issued & ~newcaps) {
2358 		dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
2359 		     ceph_cap_string(newcaps));
2360 		if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
2361 			writeback = 1; /* will delay ack */
2362 		else if (dirty & ~newcaps)
2363 			check_caps = 1;  /* initiate writeback in check_caps */
2364 		else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
2365 			   revoked_rdcache)
2366 			check_caps = 2;     /* send revoke ack in check_caps */
2367 		cap->issued = newcaps;
2368 		cap->implemented |= newcaps;
2369 	} else if (cap->issued == newcaps) {
2370 		dout("caps unchanged: %s -> %s\n",
2371 		     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2372 	} else {
2373 		dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2374 		     ceph_cap_string(newcaps));
2375 		cap->issued = newcaps;
2376 		cap->implemented |= newcaps; /* add bits only, to
2377 					      * avoid stepping on a
2378 					      * pending revocation */
2379 		wake = 1;
2380 	}
2381 	BUG_ON(cap->issued & ~cap->implemented);
2382 
2383 	spin_unlock(&inode->i_lock);
2384 	if (writeback)
2385 		/*
2386 		 * queue inode for writeback: we can't actually call
2387 		 * filemap_write_and_wait, etc. from message handler
2388 		 * context.
2389 		 */
2390 		ceph_queue_writeback(inode);
2391 	if (queue_invalidate)
2392 		ceph_queue_invalidate(inode);
2393 	if (wake)
2394 		wake_up(&ci->i_cap_wq);
2395 
2396 	if (check_caps == 1)
2397 		ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2398 				session);
2399 	else if (check_caps == 2)
2400 		ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2401 	else
2402 		mutex_unlock(&session->s_mutex);
2403 }
2404 
2405 /*
2406  * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2407  * MDS has been safely committed.
2408  */
2409 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2410 				 struct ceph_mds_caps *m,
2411 				 struct ceph_mds_session *session,
2412 				 struct ceph_cap *cap)
2413 	__releases(inode->i_lock)
2414 {
2415 	struct ceph_inode_info *ci = ceph_inode(inode);
2416 	struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
2417 	unsigned seq = le32_to_cpu(m->seq);
2418 	int dirty = le32_to_cpu(m->dirty);
2419 	int cleaned = 0;
2420 	int drop = 0;
2421 	int i;
2422 
2423 	for (i = 0; i < CEPH_CAP_BITS; i++)
2424 		if ((dirty & (1 << i)) &&
2425 		    flush_tid == ci->i_cap_flush_tid[i])
2426 			cleaned |= 1 << i;
2427 
2428 	dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2429 	     " flushing %s -> %s\n",
2430 	     inode, session->s_mds, seq, ceph_cap_string(dirty),
2431 	     ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2432 	     ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2433 
2434 	if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2435 		goto out;
2436 
2437 	ci->i_flushing_caps &= ~cleaned;
2438 
2439 	spin_lock(&mdsc->cap_dirty_lock);
2440 	if (ci->i_flushing_caps == 0) {
2441 		list_del_init(&ci->i_flushing_item);
2442 		if (!list_empty(&session->s_cap_flushing))
2443 			dout(" mds%d still flushing cap on %p\n",
2444 			     session->s_mds,
2445 			     &list_entry(session->s_cap_flushing.next,
2446 					 struct ceph_inode_info,
2447 					 i_flushing_item)->vfs_inode);
2448 		mdsc->num_cap_flushing--;
2449 		wake_up(&mdsc->cap_flushing_wq);
2450 		dout(" inode %p now !flushing\n", inode);
2451 
2452 		if (ci->i_dirty_caps == 0) {
2453 			dout(" inode %p now clean\n", inode);
2454 			BUG_ON(!list_empty(&ci->i_dirty_item));
2455 			drop = 1;
2456 		} else {
2457 			BUG_ON(list_empty(&ci->i_dirty_item));
2458 		}
2459 	}
2460 	spin_unlock(&mdsc->cap_dirty_lock);
2461 	wake_up(&ci->i_cap_wq);
2462 
2463 out:
2464 	spin_unlock(&inode->i_lock);
2465 	if (drop)
2466 		iput(inode);
2467 }
2468 
2469 /*
2470  * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
2471  * throw away our cap_snap.
2472  *
2473  * Caller hold s_mutex.
2474  */
2475 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2476 				     struct ceph_mds_caps *m,
2477 				     struct ceph_mds_session *session)
2478 {
2479 	struct ceph_inode_info *ci = ceph_inode(inode);
2480 	u64 follows = le64_to_cpu(m->snap_follows);
2481 	struct ceph_cap_snap *capsnap;
2482 	int drop = 0;
2483 
2484 	dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2485 	     inode, ci, session->s_mds, follows);
2486 
2487 	spin_lock(&inode->i_lock);
2488 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2489 		if (capsnap->follows == follows) {
2490 			if (capsnap->flush_tid != flush_tid) {
2491 				dout(" cap_snap %p follows %lld tid %lld !="
2492 				     " %lld\n", capsnap, follows,
2493 				     flush_tid, capsnap->flush_tid);
2494 				break;
2495 			}
2496 			WARN_ON(capsnap->dirty_pages || capsnap->writing);
2497 			dout(" removing %p cap_snap %p follows %lld\n",
2498 			     inode, capsnap, follows);
2499 			ceph_put_snap_context(capsnap->context);
2500 			list_del(&capsnap->ci_item);
2501 			list_del(&capsnap->flushing_item);
2502 			ceph_put_cap_snap(capsnap);
2503 			drop = 1;
2504 			break;
2505 		} else {
2506 			dout(" skipping cap_snap %p follows %lld\n",
2507 			     capsnap, capsnap->follows);
2508 		}
2509 	}
2510 	spin_unlock(&inode->i_lock);
2511 	if (drop)
2512 		iput(inode);
2513 }
2514 
2515 /*
2516  * Handle TRUNC from MDS, indicating file truncation.
2517  *
2518  * caller hold s_mutex.
2519  */
2520 static void handle_cap_trunc(struct inode *inode,
2521 			     struct ceph_mds_caps *trunc,
2522 			     struct ceph_mds_session *session)
2523 	__releases(inode->i_lock)
2524 {
2525 	struct ceph_inode_info *ci = ceph_inode(inode);
2526 	int mds = session->s_mds;
2527 	int seq = le32_to_cpu(trunc->seq);
2528 	u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2529 	u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2530 	u64 size = le64_to_cpu(trunc->size);
2531 	int implemented = 0;
2532 	int dirty = __ceph_caps_dirty(ci);
2533 	int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2534 	int queue_trunc = 0;
2535 
2536 	issued |= implemented | dirty;
2537 
2538 	dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2539 	     inode, mds, seq, truncate_size, truncate_seq);
2540 	queue_trunc = ceph_fill_file_size(inode, issued,
2541 					  truncate_seq, truncate_size, size);
2542 	spin_unlock(&inode->i_lock);
2543 
2544 	if (queue_trunc)
2545 		ceph_queue_vmtruncate(inode);
2546 }
2547 
2548 /*
2549  * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
2550  * different one.  If we are the most recent migration we've seen (as
2551  * indicated by mseq), make note of the migrating cap bits for the
2552  * duration (until we see the corresponding IMPORT).
2553  *
2554  * caller holds s_mutex
2555  */
2556 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2557 			      struct ceph_mds_session *session)
2558 {
2559 	struct ceph_inode_info *ci = ceph_inode(inode);
2560 	int mds = session->s_mds;
2561 	unsigned mseq = le32_to_cpu(ex->migrate_seq);
2562 	struct ceph_cap *cap = NULL, *t;
2563 	struct rb_node *p;
2564 	int remember = 1;
2565 
2566 	dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2567 	     inode, ci, mds, mseq);
2568 
2569 	spin_lock(&inode->i_lock);
2570 
2571 	/* make sure we haven't seen a higher mseq */
2572 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2573 		t = rb_entry(p, struct ceph_cap, ci_node);
2574 		if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2575 			dout(" higher mseq on cap from mds%d\n",
2576 			     t->session->s_mds);
2577 			remember = 0;
2578 		}
2579 		if (t->session->s_mds == mds)
2580 			cap = t;
2581 	}
2582 
2583 	if (cap) {
2584 		if (remember) {
2585 			/* make note */
2586 			ci->i_cap_exporting_mds = mds;
2587 			ci->i_cap_exporting_mseq = mseq;
2588 			ci->i_cap_exporting_issued = cap->issued;
2589 		}
2590 		__ceph_remove_cap(cap);
2591 	}
2592 	/* else, we already released it */
2593 
2594 	spin_unlock(&inode->i_lock);
2595 }
2596 
2597 /*
2598  * Handle cap IMPORT.  If there are temp bits from an older EXPORT,
2599  * clean them up.
2600  *
2601  * caller holds s_mutex.
2602  */
2603 static void handle_cap_import(struct ceph_mds_client *mdsc,
2604 			      struct inode *inode, struct ceph_mds_caps *im,
2605 			      struct ceph_mds_session *session,
2606 			      void *snaptrace, int snaptrace_len)
2607 {
2608 	struct ceph_inode_info *ci = ceph_inode(inode);
2609 	int mds = session->s_mds;
2610 	unsigned issued = le32_to_cpu(im->caps);
2611 	unsigned wanted = le32_to_cpu(im->wanted);
2612 	unsigned seq = le32_to_cpu(im->seq);
2613 	unsigned mseq = le32_to_cpu(im->migrate_seq);
2614 	u64 realmino = le64_to_cpu(im->realm);
2615 	u64 cap_id = le64_to_cpu(im->cap_id);
2616 
2617 	if (ci->i_cap_exporting_mds >= 0 &&
2618 	    ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2619 		dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2620 		     " - cleared exporting from mds%d\n",
2621 		     inode, ci, mds, mseq,
2622 		     ci->i_cap_exporting_mds);
2623 		ci->i_cap_exporting_issued = 0;
2624 		ci->i_cap_exporting_mseq = 0;
2625 		ci->i_cap_exporting_mds = -1;
2626 	} else {
2627 		dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2628 		     inode, ci, mds, mseq);
2629 	}
2630 
2631 	down_write(&mdsc->snap_rwsem);
2632 	ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2633 			       false);
2634 	downgrade_write(&mdsc->snap_rwsem);
2635 	ceph_add_cap(inode, session, cap_id, -1,
2636 		     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2637 		     NULL /* no caps context */);
2638 	try_flush_caps(inode, session, NULL);
2639 	up_read(&mdsc->snap_rwsem);
2640 }
2641 
2642 /*
2643  * Handle a caps message from the MDS.
2644  *
2645  * Identify the appropriate session, inode, and call the right handler
2646  * based on the cap op.
2647  */
2648 void ceph_handle_caps(struct ceph_mds_session *session,
2649 		      struct ceph_msg *msg)
2650 {
2651 	struct ceph_mds_client *mdsc = session->s_mdsc;
2652 	struct super_block *sb = mdsc->client->sb;
2653 	struct inode *inode;
2654 	struct ceph_cap *cap;
2655 	struct ceph_mds_caps *h;
2656 	int mds = session->s_mds;
2657 	int op;
2658 	u32 seq;
2659 	struct ceph_vino vino;
2660 	u64 cap_id;
2661 	u64 size, max_size;
2662 	u64 tid;
2663 	void *snaptrace;
2664 
2665 	dout("handle_caps from mds%d\n", mds);
2666 
2667 	/* decode */
2668 	tid = le64_to_cpu(msg->hdr.tid);
2669 	if (msg->front.iov_len < sizeof(*h))
2670 		goto bad;
2671 	h = msg->front.iov_base;
2672 	snaptrace = h + 1;
2673 	op = le32_to_cpu(h->op);
2674 	vino.ino = le64_to_cpu(h->ino);
2675 	vino.snap = CEPH_NOSNAP;
2676 	cap_id = le64_to_cpu(h->cap_id);
2677 	seq = le32_to_cpu(h->seq);
2678 	size = le64_to_cpu(h->size);
2679 	max_size = le64_to_cpu(h->max_size);
2680 
2681 	mutex_lock(&session->s_mutex);
2682 	session->s_seq++;
2683 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2684 	     (unsigned)seq);
2685 
2686 	/* lookup ino */
2687 	inode = ceph_find_inode(sb, vino);
2688 	dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2689 	     vino.snap, inode);
2690 	if (!inode) {
2691 		dout(" i don't have ino %llx\n", vino.ino);
2692 		goto done;
2693 	}
2694 
2695 	/* these will work even if we don't have a cap yet */
2696 	switch (op) {
2697 	case CEPH_CAP_OP_FLUSHSNAP_ACK:
2698 		handle_cap_flushsnap_ack(inode, tid, h, session);
2699 		goto done;
2700 
2701 	case CEPH_CAP_OP_EXPORT:
2702 		handle_cap_export(inode, h, session);
2703 		goto done;
2704 
2705 	case CEPH_CAP_OP_IMPORT:
2706 		handle_cap_import(mdsc, inode, h, session,
2707 				  snaptrace, le32_to_cpu(h->snap_trace_len));
2708 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
2709 				session);
2710 		goto done_unlocked;
2711 	}
2712 
2713 	/* the rest require a cap */
2714 	spin_lock(&inode->i_lock);
2715 	cap = __get_cap_for_mds(ceph_inode(inode), mds);
2716 	if (!cap) {
2717 		dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
2718 		     inode, ceph_ino(inode), ceph_snap(inode), mds);
2719 		spin_unlock(&inode->i_lock);
2720 		goto done;
2721 	}
2722 
2723 	/* note that each of these drops i_lock for us */
2724 	switch (op) {
2725 	case CEPH_CAP_OP_REVOKE:
2726 	case CEPH_CAP_OP_GRANT:
2727 		handle_cap_grant(inode, h, session, cap, msg->middle);
2728 		goto done_unlocked;
2729 
2730 	case CEPH_CAP_OP_FLUSH_ACK:
2731 		handle_cap_flush_ack(inode, tid, h, session, cap);
2732 		break;
2733 
2734 	case CEPH_CAP_OP_TRUNC:
2735 		handle_cap_trunc(inode, h, session);
2736 		break;
2737 
2738 	default:
2739 		spin_unlock(&inode->i_lock);
2740 		pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2741 		       ceph_cap_op_name(op));
2742 	}
2743 
2744 done:
2745 	mutex_unlock(&session->s_mutex);
2746 done_unlocked:
2747 	if (inode)
2748 		iput(inode);
2749 	return;
2750 
2751 bad:
2752 	pr_err("ceph_handle_caps: corrupt message\n");
2753 	ceph_msg_dump(msg);
2754 	return;
2755 }
2756 
2757 /*
2758  * Delayed work handler to process end of delayed cap release LRU list.
2759  */
2760 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2761 {
2762 	struct ceph_inode_info *ci;
2763 	int flags = CHECK_CAPS_NODELAY;
2764 
2765 	dout("check_delayed_caps\n");
2766 	while (1) {
2767 		spin_lock(&mdsc->cap_delay_lock);
2768 		if (list_empty(&mdsc->cap_delay_list))
2769 			break;
2770 		ci = list_first_entry(&mdsc->cap_delay_list,
2771 				      struct ceph_inode_info,
2772 				      i_cap_delay_list);
2773 		if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2774 		    time_before(jiffies, ci->i_hold_caps_max))
2775 			break;
2776 		list_del_init(&ci->i_cap_delay_list);
2777 		spin_unlock(&mdsc->cap_delay_lock);
2778 		dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2779 		ceph_check_caps(ci, flags, NULL);
2780 	}
2781 	spin_unlock(&mdsc->cap_delay_lock);
2782 }
2783 
2784 /*
2785  * Flush all dirty caps to the mds
2786  */
2787 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2788 {
2789 	struct ceph_inode_info *ci, *nci = NULL;
2790 	struct inode *inode, *ninode = NULL;
2791 	struct list_head *p, *n;
2792 
2793 	dout("flush_dirty_caps\n");
2794 	spin_lock(&mdsc->cap_dirty_lock);
2795 	list_for_each_safe(p, n, &mdsc->cap_dirty) {
2796 		if (nci) {
2797 			ci = nci;
2798 			inode = ninode;
2799 			ci->i_ceph_flags &= ~CEPH_I_NOFLUSH;
2800 			dout("flush_dirty_caps inode %p (was next inode)\n",
2801 			     inode);
2802 		} else {
2803 			ci = list_entry(p, struct ceph_inode_info,
2804 					i_dirty_item);
2805 			inode = igrab(&ci->vfs_inode);
2806 			BUG_ON(!inode);
2807 			dout("flush_dirty_caps inode %p\n", inode);
2808 		}
2809 		if (n != &mdsc->cap_dirty) {
2810 			nci = list_entry(n, struct ceph_inode_info,
2811 					 i_dirty_item);
2812 			ninode = igrab(&nci->vfs_inode);
2813 			BUG_ON(!ninode);
2814 			nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2815 			dout("flush_dirty_caps next inode %p, noflush\n",
2816 			     ninode);
2817 		} else {
2818 			nci = NULL;
2819 			ninode = NULL;
2820 		}
2821 		spin_unlock(&mdsc->cap_dirty_lock);
2822 		if (inode) {
2823 			ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2824 					NULL);
2825 			iput(inode);
2826 		}
2827 		spin_lock(&mdsc->cap_dirty_lock);
2828 	}
2829 	spin_unlock(&mdsc->cap_dirty_lock);
2830 }
2831 
2832 /*
2833  * Drop open file reference.  If we were the last open file,
2834  * we may need to release capabilities to the MDS (or schedule
2835  * their delayed release).
2836  */
2837 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2838 {
2839 	struct inode *inode = &ci->vfs_inode;
2840 	int last = 0;
2841 
2842 	spin_lock(&inode->i_lock);
2843 	dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2844 	     ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2845 	BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2846 	if (--ci->i_nr_by_mode[fmode] == 0)
2847 		last++;
2848 	spin_unlock(&inode->i_lock);
2849 
2850 	if (last && ci->i_vino.snap == CEPH_NOSNAP)
2851 		ceph_check_caps(ci, 0, NULL);
2852 }
2853 
2854 /*
2855  * Helpers for embedding cap and dentry lease releases into mds
2856  * requests.
2857  *
2858  * @force is used by dentry_release (below) to force inclusion of a
2859  * record for the directory inode, even when there aren't any caps to
2860  * drop.
2861  */
2862 int ceph_encode_inode_release(void **p, struct inode *inode,
2863 			      int mds, int drop, int unless, int force)
2864 {
2865 	struct ceph_inode_info *ci = ceph_inode(inode);
2866 	struct ceph_cap *cap;
2867 	struct ceph_mds_request_release *rel = *p;
2868 	int ret = 0;
2869 	int used = 0;
2870 
2871 	spin_lock(&inode->i_lock);
2872 	used = __ceph_caps_used(ci);
2873 
2874 	dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
2875 	     mds, ceph_cap_string(used), ceph_cap_string(drop),
2876 	     ceph_cap_string(unless));
2877 
2878 	/* only drop unused caps */
2879 	drop &= ~used;
2880 
2881 	cap = __get_cap_for_mds(ci, mds);
2882 	if (cap && __cap_is_valid(cap)) {
2883 		if (force ||
2884 		    ((cap->issued & drop) &&
2885 		     (cap->issued & unless) == 0)) {
2886 			if ((cap->issued & drop) &&
2887 			    (cap->issued & unless) == 0) {
2888 				dout("encode_inode_release %p cap %p %s -> "
2889 				     "%s\n", inode, cap,
2890 				     ceph_cap_string(cap->issued),
2891 				     ceph_cap_string(cap->issued & ~drop));
2892 				cap->issued &= ~drop;
2893 				cap->implemented &= ~drop;
2894 				if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2895 					int wanted = __ceph_caps_wanted(ci);
2896 					dout("  wanted %s -> %s (act %s)\n",
2897 					     ceph_cap_string(cap->mds_wanted),
2898 					     ceph_cap_string(cap->mds_wanted &
2899 							     ~wanted),
2900 					     ceph_cap_string(wanted));
2901 					cap->mds_wanted &= wanted;
2902 				}
2903 			} else {
2904 				dout("encode_inode_release %p cap %p %s"
2905 				     " (force)\n", inode, cap,
2906 				     ceph_cap_string(cap->issued));
2907 			}
2908 
2909 			rel->ino = cpu_to_le64(ceph_ino(inode));
2910 			rel->cap_id = cpu_to_le64(cap->cap_id);
2911 			rel->seq = cpu_to_le32(cap->seq);
2912 			rel->issue_seq = cpu_to_le32(cap->issue_seq),
2913 			rel->mseq = cpu_to_le32(cap->mseq);
2914 			rel->caps = cpu_to_le32(cap->issued);
2915 			rel->wanted = cpu_to_le32(cap->mds_wanted);
2916 			rel->dname_len = 0;
2917 			rel->dname_seq = 0;
2918 			*p += sizeof(*rel);
2919 			ret = 1;
2920 		} else {
2921 			dout("encode_inode_release %p cap %p %s\n",
2922 			     inode, cap, ceph_cap_string(cap->issued));
2923 		}
2924 	}
2925 	spin_unlock(&inode->i_lock);
2926 	return ret;
2927 }
2928 
2929 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2930 			       int mds, int drop, int unless)
2931 {
2932 	struct inode *dir = dentry->d_parent->d_inode;
2933 	struct ceph_mds_request_release *rel = *p;
2934 	struct ceph_dentry_info *di = ceph_dentry(dentry);
2935 	int force = 0;
2936 	int ret;
2937 
2938 	/*
2939 	 * force an record for the directory caps if we have a dentry lease.
2940 	 * this is racy (can't take i_lock and d_lock together), but it
2941 	 * doesn't have to be perfect; the mds will revoke anything we don't
2942 	 * release.
2943 	 */
2944 	spin_lock(&dentry->d_lock);
2945 	if (di->lease_session && di->lease_session->s_mds == mds)
2946 		force = 1;
2947 	spin_unlock(&dentry->d_lock);
2948 
2949 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
2950 
2951 	spin_lock(&dentry->d_lock);
2952 	if (ret && di->lease_session && di->lease_session->s_mds == mds) {
2953 		dout("encode_dentry_release %p mds%d seq %d\n",
2954 		     dentry, mds, (int)di->lease_seq);
2955 		rel->dname_len = cpu_to_le32(dentry->d_name.len);
2956 		memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2957 		*p += dentry->d_name.len;
2958 		rel->dname_seq = cpu_to_le32(di->lease_seq);
2959 	}
2960 	spin_unlock(&dentry->d_lock);
2961 	return ret;
2962 }
2963