xref: /linux/net/sunrpc/cache.c (revision 277f27e2f27752cd1a7901443d72e908ddea8a2e)
1ddc64d0aSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * net/sunrpc/cache.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Generic code for various authentication-related caches
61da177e4SLinus Torvalds  * used by sunrpc clients and servers.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/types.h>
121da177e4SLinus Torvalds #include <linux/fs.h>
131da177e4SLinus Torvalds #include <linux/file.h>
141da177e4SLinus Torvalds #include <linux/slab.h>
151da177e4SLinus Torvalds #include <linux/signal.h>
161da177e4SLinus Torvalds #include <linux/sched.h>
171da177e4SLinus Torvalds #include <linux/kmod.h>
181da177e4SLinus Torvalds #include <linux/list.h>
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/ctype.h>
211b2e122dSAndy Shevchenko #include <linux/string_helpers.h>
227c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
231da177e4SLinus Torvalds #include <linux/poll.h>
241da177e4SLinus Torvalds #include <linux/seq_file.h>
251da177e4SLinus Torvalds #include <linux/proc_fs.h>
261da177e4SLinus Torvalds #include <linux/net.h>
271da177e4SLinus Torvalds #include <linux/workqueue.h>
284a3e2f71SArjan van de Ven #include <linux/mutex.h>
29da77005fSTrond Myklebust #include <linux/pagemap.h>
301da177e4SLinus Torvalds #include <asm/ioctls.h>
311da177e4SLinus Torvalds #include <linux/sunrpc/types.h>
321da177e4SLinus Torvalds #include <linux/sunrpc/cache.h>
331da177e4SLinus Torvalds #include <linux/sunrpc/stats.h>
348854e82dSTrond Myklebust #include <linux/sunrpc/rpc_pipe_fs.h>
354f42d0d5SPavel Emelyanov #include "netns.h"
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds #define	 RPCDBG_FACILITY RPCDBG_CACHE
381da177e4SLinus Torvalds 
39d76d1815SJ. Bruce Fields static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
401da177e4SLinus Torvalds static void cache_revisit_request(struct cache_head *item);
411da177e4SLinus Torvalds 
4277862036SNeil Brown static void cache_init(struct cache_head *h, struct cache_detail *detail)
431da177e4SLinus Torvalds {
44f559935eSArnd Bergmann 	time64_t now = seconds_since_boot();
45129e5824SKinglong Mee 	INIT_HLIST_NODE(&h->cache_list);
461da177e4SLinus Torvalds 	h->flags = 0;
47baab935fSNeilBrown 	kref_init(&h->ref);
481da177e4SLinus Torvalds 	h->expiry_time = now + CACHE_NEW_EXPIRY;
4977862036SNeil Brown 	if (now <= detail->flush_time)
5077862036SNeil Brown 		/* ensure it isn't already expired */
5177862036SNeil Brown 		now = detail->flush_time + 1;
521da177e4SLinus Torvalds 	h->last_refresh = now;
531da177e4SLinus Torvalds }
541da177e4SLinus Torvalds 
554ecd55eaSVasily Averin static void cache_fresh_unlocked(struct cache_head *head,
564ecd55eaSVasily Averin 				struct cache_detail *detail);
574ecd55eaSVasily Averin 
58ae74136bSTrond Myklebust static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
59ae74136bSTrond Myklebust 						struct cache_head *key,
60ae74136bSTrond Myklebust 						int hash)
61ae74136bSTrond Myklebust {
62ae74136bSTrond Myklebust 	struct hlist_head *head = &detail->hash_table[hash];
63ae74136bSTrond Myklebust 	struct cache_head *tmp;
64ae74136bSTrond Myklebust 
65ae74136bSTrond Myklebust 	rcu_read_lock();
66ae74136bSTrond Myklebust 	hlist_for_each_entry_rcu(tmp, head, cache_list) {
67*277f27e2STrond Myklebust 		if (!detail->match(tmp, key))
68*277f27e2STrond Myklebust 			continue;
69*277f27e2STrond Myklebust 		if (test_bit(CACHE_VALID, &tmp->flags) &&
70*277f27e2STrond Myklebust 		    cache_is_expired(detail, tmp))
71ae74136bSTrond Myklebust 			continue;
72ae74136bSTrond Myklebust 		tmp = cache_get_rcu(tmp);
73ae74136bSTrond Myklebust 		rcu_read_unlock();
74ae74136bSTrond Myklebust 		return tmp;
75ae74136bSTrond Myklebust 	}
76ae74136bSTrond Myklebust 	rcu_read_unlock();
77ae74136bSTrond Myklebust 	return NULL;
78ae74136bSTrond Myklebust }
79ae74136bSTrond Myklebust 
80809fe3c5STrond Myklebust static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
81809fe3c5STrond Myklebust 					    struct cache_detail *cd)
82809fe3c5STrond Myklebust {
83809fe3c5STrond Myklebust 	/* Must be called under cd->hash_lock */
84809fe3c5STrond Myklebust 	hlist_del_init_rcu(&ch->cache_list);
85809fe3c5STrond Myklebust 	set_bit(CACHE_CLEANED, &ch->flags);
86809fe3c5STrond Myklebust 	cd->entries --;
87809fe3c5STrond Myklebust }
88809fe3c5STrond Myklebust 
89809fe3c5STrond Myklebust static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
90809fe3c5STrond Myklebust 					  struct cache_detail *cd)
91809fe3c5STrond Myklebust {
92809fe3c5STrond Myklebust 	cache_fresh_unlocked(ch, cd);
93809fe3c5STrond Myklebust 	cache_put(ch, cd);
94809fe3c5STrond Myklebust }
95809fe3c5STrond Myklebust 
96b92a8fabSTrond Myklebust static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
97b92a8fabSTrond Myklebust 						 struct cache_head *key,
98b92a8fabSTrond Myklebust 						 int hash)
99b92a8fabSTrond Myklebust {
100b92a8fabSTrond Myklebust 	struct cache_head *new, *tmp, *freeme = NULL;
101b92a8fabSTrond Myklebust 	struct hlist_head *head = &detail->hash_table[hash];
10215a5f6bdSNeilBrown 
10315a5f6bdSNeilBrown 	new = detail->alloc();
10415a5f6bdSNeilBrown 	if (!new)
10515a5f6bdSNeilBrown 		return NULL;
1062f34931fSNeil Brown 	/* must fully initialise 'new', else
1072f34931fSNeil Brown 	 * we might get lose if we need to
1082f34931fSNeil Brown 	 * cache_put it soon.
1092f34931fSNeil Brown 	 */
11077862036SNeil Brown 	cache_init(new, detail);
1112f34931fSNeil Brown 	detail->init(new, key);
11215a5f6bdSNeilBrown 
1131863d77fSTrond Myklebust 	spin_lock(&detail->hash_lock);
11415a5f6bdSNeilBrown 
11515a5f6bdSNeilBrown 	/* check if entry appeared while we slept */
11651cae673SAmol Grover 	hlist_for_each_entry_rcu(tmp, head, cache_list,
11751cae673SAmol Grover 				 lockdep_is_held(&detail->hash_lock)) {
118*277f27e2STrond Myklebust 		if (!detail->match(tmp, key))
119*277f27e2STrond Myklebust 			continue;
120*277f27e2STrond Myklebust 		if (test_bit(CACHE_VALID, &tmp->flags) &&
121*277f27e2STrond Myklebust 		    cache_is_expired(detail, tmp)) {
122809fe3c5STrond Myklebust 			sunrpc_begin_cache_remove_entry(tmp, detail);
123d202cce8SNeilBrown 			freeme = tmp;
124d202cce8SNeilBrown 			break;
125d202cce8SNeilBrown 		}
12615a5f6bdSNeilBrown 		cache_get(tmp);
1271863d77fSTrond Myklebust 		spin_unlock(&detail->hash_lock);
128baab935fSNeilBrown 		cache_put(new, detail);
12915a5f6bdSNeilBrown 		return tmp;
13015a5f6bdSNeilBrown 	}
131129e5824SKinglong Mee 
132ae74136bSTrond Myklebust 	hlist_add_head_rcu(&new->cache_list, head);
13315a5f6bdSNeilBrown 	detail->entries++;
13415a5f6bdSNeilBrown 	cache_get(new);
1351863d77fSTrond Myklebust 	spin_unlock(&detail->hash_lock);
13615a5f6bdSNeilBrown 
137809fe3c5STrond Myklebust 	if (freeme)
138809fe3c5STrond Myklebust 		sunrpc_end_cache_remove_entry(freeme, detail);
13915a5f6bdSNeilBrown 	return new;
14015a5f6bdSNeilBrown }
14115a5f6bdSNeilBrown 
142ae74136bSTrond Myklebust struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
143ae74136bSTrond Myklebust 					   struct cache_head *key, int hash)
144ae74136bSTrond Myklebust {
145ae74136bSTrond Myklebust 	struct cache_head *ret;
146ae74136bSTrond Myklebust 
147ae74136bSTrond Myklebust 	ret = sunrpc_cache_find_rcu(detail, key, hash);
148ae74136bSTrond Myklebust 	if (ret)
149ae74136bSTrond Myklebust 		return ret;
150ae74136bSTrond Myklebust 	/* Didn't find anything, insert an empty entry */
151ae74136bSTrond Myklebust 	return sunrpc_cache_add_entry(detail, key, hash);
152ae74136bSTrond Myklebust }
153ae74136bSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
154ae74136bSTrond Myklebust 
155f866a819SNeilBrown static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
156ebd0cb1aSNeilBrown 
157f559935eSArnd Bergmann static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
15877862036SNeil Brown 			       struct cache_detail *detail)
159ebd0cb1aSNeilBrown {
160f559935eSArnd Bergmann 	time64_t now = seconds_since_boot();
16177862036SNeil Brown 	if (now <= detail->flush_time)
16277862036SNeil Brown 		/* ensure it isn't immediately treated as expired */
16377862036SNeil Brown 		now = detail->flush_time + 1;
164ebd0cb1aSNeilBrown 	head->expiry_time = expiry;
16577862036SNeil Brown 	head->last_refresh = now;
166fdef7aa5SJ. Bruce Fields 	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
167908329f2SNeilBrown 	set_bit(CACHE_VALID, &head->flags);
168ebd0cb1aSNeilBrown }
169ebd0cb1aSNeilBrown 
170ebd0cb1aSNeilBrown static void cache_fresh_unlocked(struct cache_head *head,
171908329f2SNeilBrown 				 struct cache_detail *detail)
172ebd0cb1aSNeilBrown {
173ebd0cb1aSNeilBrown 	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
174ebd0cb1aSNeilBrown 		cache_revisit_request(head);
175f866a819SNeilBrown 		cache_dequeue(detail, head);
176ebd0cb1aSNeilBrown 	}
177ebd0cb1aSNeilBrown }
178ebd0cb1aSNeilBrown 
17915a5f6bdSNeilBrown struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
18015a5f6bdSNeilBrown 				       struct cache_head *new, struct cache_head *old, int hash)
18115a5f6bdSNeilBrown {
18215a5f6bdSNeilBrown 	/* The 'old' entry is to be replaced by 'new'.
18315a5f6bdSNeilBrown 	 * If 'old' is not VALID, we update it directly,
18415a5f6bdSNeilBrown 	 * otherwise we need to replace it
18515a5f6bdSNeilBrown 	 */
18615a5f6bdSNeilBrown 	struct cache_head *tmp;
18715a5f6bdSNeilBrown 
18815a5f6bdSNeilBrown 	if (!test_bit(CACHE_VALID, &old->flags)) {
1891863d77fSTrond Myklebust 		spin_lock(&detail->hash_lock);
19015a5f6bdSNeilBrown 		if (!test_bit(CACHE_VALID, &old->flags)) {
19115a5f6bdSNeilBrown 			if (test_bit(CACHE_NEGATIVE, &new->flags))
19215a5f6bdSNeilBrown 				set_bit(CACHE_NEGATIVE, &old->flags);
19315a5f6bdSNeilBrown 			else
19415a5f6bdSNeilBrown 				detail->update(old, new);
19577862036SNeil Brown 			cache_fresh_locked(old, new->expiry_time, detail);
1961863d77fSTrond Myklebust 			spin_unlock(&detail->hash_lock);
197908329f2SNeilBrown 			cache_fresh_unlocked(old, detail);
19815a5f6bdSNeilBrown 			return old;
19915a5f6bdSNeilBrown 		}
2001863d77fSTrond Myklebust 		spin_unlock(&detail->hash_lock);
20115a5f6bdSNeilBrown 	}
20215a5f6bdSNeilBrown 	/* We need to insert a new entry */
20315a5f6bdSNeilBrown 	tmp = detail->alloc();
20415a5f6bdSNeilBrown 	if (!tmp) {
205baab935fSNeilBrown 		cache_put(old, detail);
20615a5f6bdSNeilBrown 		return NULL;
20715a5f6bdSNeilBrown 	}
20877862036SNeil Brown 	cache_init(tmp, detail);
20915a5f6bdSNeilBrown 	detail->init(tmp, old);
21015a5f6bdSNeilBrown 
2111863d77fSTrond Myklebust 	spin_lock(&detail->hash_lock);
21215a5f6bdSNeilBrown 	if (test_bit(CACHE_NEGATIVE, &new->flags))
21315a5f6bdSNeilBrown 		set_bit(CACHE_NEGATIVE, &tmp->flags);
21415a5f6bdSNeilBrown 	else
21515a5f6bdSNeilBrown 		detail->update(tmp, new);
216129e5824SKinglong Mee 	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
217f2d39586SNeilBrown 	detail->entries++;
21815a5f6bdSNeilBrown 	cache_get(tmp);
21977862036SNeil Brown 	cache_fresh_locked(tmp, new->expiry_time, detail);
22077862036SNeil Brown 	cache_fresh_locked(old, 0, detail);
2211863d77fSTrond Myklebust 	spin_unlock(&detail->hash_lock);
222908329f2SNeilBrown 	cache_fresh_unlocked(tmp, detail);
223908329f2SNeilBrown 	cache_fresh_unlocked(old, detail);
224baab935fSNeilBrown 	cache_put(old, detail);
22515a5f6bdSNeilBrown 	return tmp;
22615a5f6bdSNeilBrown }
22724c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_update);
2281da177e4SLinus Torvalds 
229b6040f97Schaoting fan static inline int cache_is_valid(struct cache_head *h)
230989a19b9SNeilBrown {
231d202cce8SNeilBrown 	if (!test_bit(CACHE_VALID, &h->flags))
232989a19b9SNeilBrown 		return -EAGAIN;
233989a19b9SNeilBrown 	else {
234989a19b9SNeilBrown 		/* entry is valid */
235989a19b9SNeilBrown 		if (test_bit(CACHE_NEGATIVE, &h->flags))
236989a19b9SNeilBrown 			return -ENOENT;
237fdef7aa5SJ. Bruce Fields 		else {
238fdef7aa5SJ. Bruce Fields 			/*
239fdef7aa5SJ. Bruce Fields 			 * In combination with write barrier in
240fdef7aa5SJ. Bruce Fields 			 * sunrpc_cache_update, ensures that anyone
241fdef7aa5SJ. Bruce Fields 			 * using the cache entry after this sees the
242fdef7aa5SJ. Bruce Fields 			 * updated contents:
243fdef7aa5SJ. Bruce Fields 			 */
244fdef7aa5SJ. Bruce Fields 			smp_rmb();
245989a19b9SNeilBrown 			return 0;
246989a19b9SNeilBrown 		}
247989a19b9SNeilBrown 	}
248fdef7aa5SJ. Bruce Fields }
249e9dc1221SJ. Bruce Fields 
2506bab93f8SJ. Bruce Fields static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
2516bab93f8SJ. Bruce Fields {
2526bab93f8SJ. Bruce Fields 	int rv;
2536bab93f8SJ. Bruce Fields 
2541863d77fSTrond Myklebust 	spin_lock(&detail->hash_lock);
255b6040f97Schaoting fan 	rv = cache_is_valid(h);
2562a1c7f53SNeilBrown 	if (rv == -EAGAIN) {
2576bab93f8SJ. Bruce Fields 		set_bit(CACHE_NEGATIVE, &h->flags);
25877862036SNeil Brown 		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
25977862036SNeil Brown 				   detail);
2602a1c7f53SNeilBrown 		rv = -ENOENT;
2612a1c7f53SNeilBrown 	}
2621863d77fSTrond Myklebust 	spin_unlock(&detail->hash_lock);
2636bab93f8SJ. Bruce Fields 	cache_fresh_unlocked(h, detail);
2642a1c7f53SNeilBrown 	return rv;
2656bab93f8SJ. Bruce Fields }
2666bab93f8SJ. Bruce Fields 
2671da177e4SLinus Torvalds /*
2681da177e4SLinus Torvalds  * This is the generic cache management routine for all
2691da177e4SLinus Torvalds  * the authentication caches.
2701da177e4SLinus Torvalds  * It checks the currency of a cache item and will (later)
2711da177e4SLinus Torvalds  * initiate an upcall to fill it if needed.
2721da177e4SLinus Torvalds  *
2731da177e4SLinus Torvalds  *
2741da177e4SLinus Torvalds  * Returns 0 if the cache_head can be used, or cache_puts it and returns
275989a19b9SNeilBrown  * -EAGAIN if upcall is pending and request has been queued
276989a19b9SNeilBrown  * -ETIMEDOUT if upcall failed or request could not be queue or
277989a19b9SNeilBrown  *           upcall completed but item is still invalid (implying that
278989a19b9SNeilBrown  *           the cache item has been replaced with a newer one).
2791da177e4SLinus Torvalds  * -ENOENT if cache entry was negative
2801da177e4SLinus Torvalds  */
2811da177e4SLinus Torvalds int cache_check(struct cache_detail *detail,
2821da177e4SLinus Torvalds 		    struct cache_head *h, struct cache_req *rqstp)
2831da177e4SLinus Torvalds {
2841da177e4SLinus Torvalds 	int rv;
285f559935eSArnd Bergmann 	time64_t refresh_age, age;
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds 	/* First decide return status as best we can */
288b6040f97Schaoting fan 	rv = cache_is_valid(h);
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds 	/* now see if we want to start an upcall */
2911da177e4SLinus Torvalds 	refresh_age = (h->expiry_time - h->last_refresh);
292c5b29f88SNeilBrown 	age = seconds_since_boot() - h->last_refresh;
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	if (rqstp == NULL) {
2951da177e4SLinus Torvalds 		if (rv == -EAGAIN)
2961da177e4SLinus Torvalds 			rv = -ENOENT;
2970bebc633SNeilBrown 	} else if (rv == -EAGAIN ||
2980bebc633SNeilBrown 		   (h->expiry_time != 0 && age > refresh_age/2)) {
299f559935eSArnd Bergmann 		dprintk("RPC:       Want update, refage=%lld, age=%lld\n",
30046121cf7SChuck Lever 				refresh_age, age);
30165286b88STrond Myklebust 		switch (detail->cache_upcall(detail, h)) {
3021da177e4SLinus Torvalds 		case -EINVAL:
3036bab93f8SJ. Bruce Fields 			rv = try_to_negate_entry(detail, h);
3041da177e4SLinus Torvalds 			break;
3051da177e4SLinus Torvalds 		case -EAGAIN:
3062a1c7f53SNeilBrown 			cache_fresh_unlocked(h, detail);
3071da177e4SLinus Torvalds 			break;
3081da177e4SLinus Torvalds 		}
3091da177e4SLinus Torvalds 	}
3101da177e4SLinus Torvalds 
311989a19b9SNeilBrown 	if (rv == -EAGAIN) {
312d76d1815SJ. Bruce Fields 		if (!cache_defer_req(rqstp, h)) {
313d76d1815SJ. Bruce Fields 			/*
314d76d1815SJ. Bruce Fields 			 * Request was not deferred; handle it as best
315d76d1815SJ. Bruce Fields 			 * we can ourselves:
316d76d1815SJ. Bruce Fields 			 */
317b6040f97Schaoting fan 			rv = cache_is_valid(h);
3181da177e4SLinus Torvalds 			if (rv == -EAGAIN)
319e0bb89efSJ.Bruce Fields 				rv = -ETIMEDOUT;
320989a19b9SNeilBrown 		}
321989a19b9SNeilBrown 	}
3224013edeaSNeilBrown 	if (rv)
323baab935fSNeilBrown 		cache_put(h, detail);
3241da177e4SLinus Torvalds 	return rv;
3251da177e4SLinus Torvalds }
32624c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_check);
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds /*
3291da177e4SLinus Torvalds  * caches need to be periodically cleaned.
3301da177e4SLinus Torvalds  * For this we maintain a list of cache_detail and
3311da177e4SLinus Torvalds  * a current pointer into that list and into the table
3321da177e4SLinus Torvalds  * for that entry.
3331da177e4SLinus Torvalds  *
334013920ebSNeilBrown  * Each time cache_clean is called it finds the next non-empty entry
3351da177e4SLinus Torvalds  * in the current table and walks the list in that entry
3361da177e4SLinus Torvalds  * looking for entries that can be removed.
3371da177e4SLinus Torvalds  *
3381da177e4SLinus Torvalds  * An entry gets removed if:
3391da177e4SLinus Torvalds  * - The expiry is before current time
3401da177e4SLinus Torvalds  * - The last_refresh time is before the flush_time for that cache
3411da177e4SLinus Torvalds  *
3421da177e4SLinus Torvalds  * later we might drop old entries with non-NEVER expiry if that table
3431da177e4SLinus Torvalds  * is getting 'full' for some definition of 'full'
3441da177e4SLinus Torvalds  *
3451da177e4SLinus Torvalds  * The question of "how often to scan a table" is an interesting one
3461da177e4SLinus Torvalds  * and is answered in part by the use of the "nextcheck" field in the
3471da177e4SLinus Torvalds  * cache_detail.
3481da177e4SLinus Torvalds  * When a scan of a table begins, the nextcheck field is set to a time
3491da177e4SLinus Torvalds  * that is well into the future.
3501da177e4SLinus Torvalds  * While scanning, if an expiry time is found that is earlier than the
3511da177e4SLinus Torvalds  * current nextcheck time, nextcheck is set to that expiry time.
3521da177e4SLinus Torvalds  * If the flush_time is ever set to a time earlier than the nextcheck
3531da177e4SLinus Torvalds  * time, the nextcheck time is then set to that flush_time.
3541da177e4SLinus Torvalds  *
3551da177e4SLinus Torvalds  * A table is then only scanned if the current time is at least
3561da177e4SLinus Torvalds  * the nextcheck time.
3571da177e4SLinus Torvalds  *
3581da177e4SLinus Torvalds  */
3591da177e4SLinus Torvalds 
3601da177e4SLinus Torvalds static LIST_HEAD(cache_list);
3611da177e4SLinus Torvalds static DEFINE_SPINLOCK(cache_list_lock);
3621da177e4SLinus Torvalds static struct cache_detail *current_detail;
3631da177e4SLinus Torvalds static int current_index;
3641da177e4SLinus Torvalds 
36565f27f38SDavid Howells static void do_cache_clean(struct work_struct *work);
3668eab945cSArtem Bityutskiy static struct delayed_work cache_cleaner;
3671da177e4SLinus Torvalds 
368820f9442SStanislav Kinsbursky void sunrpc_init_cache_detail(struct cache_detail *cd)
3691da177e4SLinus Torvalds {
3701863d77fSTrond Myklebust 	spin_lock_init(&cd->hash_lock);
3711da177e4SLinus Torvalds 	INIT_LIST_HEAD(&cd->queue);
3721da177e4SLinus Torvalds 	spin_lock(&cache_list_lock);
3731da177e4SLinus Torvalds 	cd->nextcheck = 0;
3741da177e4SLinus Torvalds 	cd->entries = 0;
37564a38e84SDave Wysochanski 	atomic_set(&cd->writers, 0);
3761da177e4SLinus Torvalds 	cd->last_close = 0;
3771da177e4SLinus Torvalds 	cd->last_warn = -1;
3781da177e4SLinus Torvalds 	list_add(&cd->others, &cache_list);
3791da177e4SLinus Torvalds 	spin_unlock(&cache_list_lock);
3801da177e4SLinus Torvalds 
3811da177e4SLinus Torvalds 	/* start the cleaning process */
38277b00bc0SKe Wang 	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
3831da177e4SLinus Torvalds }
384820f9442SStanislav Kinsbursky EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
3851da177e4SLinus Torvalds 
386820f9442SStanislav Kinsbursky void sunrpc_destroy_cache_detail(struct cache_detail *cd)
3871da177e4SLinus Torvalds {
3881da177e4SLinus Torvalds 	cache_purge(cd);
3891da177e4SLinus Torvalds 	spin_lock(&cache_list_lock);
3901863d77fSTrond Myklebust 	spin_lock(&cd->hash_lock);
3911da177e4SLinus Torvalds 	if (current_detail == cd)
3921da177e4SLinus Torvalds 		current_detail = NULL;
3931da177e4SLinus Torvalds 	list_del_init(&cd->others);
3941863d77fSTrond Myklebust 	spin_unlock(&cd->hash_lock);
3951da177e4SLinus Torvalds 	spin_unlock(&cache_list_lock);
3961da177e4SLinus Torvalds 	if (list_empty(&cache_list)) {
3971da177e4SLinus Torvalds 		/* module must be being unloaded so its safe to kill the worker */
3984011cd97STrond Myklebust 		cancel_delayed_work_sync(&cache_cleaner);
3991da177e4SLinus Torvalds 	}
4001da177e4SLinus Torvalds }
401820f9442SStanislav Kinsbursky EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
4021da177e4SLinus Torvalds 
4031da177e4SLinus Torvalds /* clean cache tries to find something to clean
4041da177e4SLinus Torvalds  * and cleans it.
4051da177e4SLinus Torvalds  * It returns 1 if it cleaned something,
4061da177e4SLinus Torvalds  *            0 if it didn't find anything this time
4071da177e4SLinus Torvalds  *           -1 if it fell off the end of the list.
4081da177e4SLinus Torvalds  */
4091da177e4SLinus Torvalds static int cache_clean(void)
4101da177e4SLinus Torvalds {
4111da177e4SLinus Torvalds 	int rv = 0;
4121da177e4SLinus Torvalds 	struct list_head *next;
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds 	spin_lock(&cache_list_lock);
4151da177e4SLinus Torvalds 
4161da177e4SLinus Torvalds 	/* find a suitable table if we don't already have one */
4171da177e4SLinus Torvalds 	while (current_detail == NULL ||
4181da177e4SLinus Torvalds 	    current_index >= current_detail->hash_size) {
4191da177e4SLinus Torvalds 		if (current_detail)
4201da177e4SLinus Torvalds 			next = current_detail->others.next;
4211da177e4SLinus Torvalds 		else
4221da177e4SLinus Torvalds 			next = cache_list.next;
4231da177e4SLinus Torvalds 		if (next == &cache_list) {
4241da177e4SLinus Torvalds 			current_detail = NULL;
4251da177e4SLinus Torvalds 			spin_unlock(&cache_list_lock);
4261da177e4SLinus Torvalds 			return -1;
4271da177e4SLinus Torvalds 		}
4281da177e4SLinus Torvalds 		current_detail = list_entry(next, struct cache_detail, others);
429c5b29f88SNeilBrown 		if (current_detail->nextcheck > seconds_since_boot())
4301da177e4SLinus Torvalds 			current_index = current_detail->hash_size;
4311da177e4SLinus Torvalds 		else {
4321da177e4SLinus Torvalds 			current_index = 0;
433c5b29f88SNeilBrown 			current_detail->nextcheck = seconds_since_boot()+30*60;
4341da177e4SLinus Torvalds 		}
4351da177e4SLinus Torvalds 	}
4361da177e4SLinus Torvalds 
4371da177e4SLinus Torvalds 	/* find a non-empty bucket in the table */
4381da177e4SLinus Torvalds 	while (current_detail &&
4391da177e4SLinus Torvalds 	       current_index < current_detail->hash_size &&
440129e5824SKinglong Mee 	       hlist_empty(&current_detail->hash_table[current_index]))
4411da177e4SLinus Torvalds 		current_index++;
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
4441da177e4SLinus Torvalds 
4451da177e4SLinus Torvalds 	if (current_detail && current_index < current_detail->hash_size) {
446129e5824SKinglong Mee 		struct cache_head *ch = NULL;
4471da177e4SLinus Torvalds 		struct cache_detail *d;
448129e5824SKinglong Mee 		struct hlist_head *head;
449129e5824SKinglong Mee 		struct hlist_node *tmp;
4501da177e4SLinus Torvalds 
4511863d77fSTrond Myklebust 		spin_lock(&current_detail->hash_lock);
4521da177e4SLinus Torvalds 
4531da177e4SLinus Torvalds 		/* Ok, now to clean this strand */
4541da177e4SLinus Torvalds 
455129e5824SKinglong Mee 		head = &current_detail->hash_table[current_index];
456129e5824SKinglong Mee 		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
4571da177e4SLinus Torvalds 			if (current_detail->nextcheck > ch->expiry_time)
4581da177e4SLinus Torvalds 				current_detail->nextcheck = ch->expiry_time+1;
4592f50d8b6SNeilBrown 			if (!cache_is_expired(current_detail, ch))
4601da177e4SLinus Torvalds 				continue;
4611da177e4SLinus Torvalds 
462809fe3c5STrond Myklebust 			sunrpc_begin_cache_remove_entry(ch, current_detail);
4631da177e4SLinus Torvalds 			rv = 1;
4643af4974eSNeilBrown 			break;
4651da177e4SLinus Torvalds 		}
4663af4974eSNeilBrown 
4671863d77fSTrond Myklebust 		spin_unlock(&current_detail->hash_lock);
4681da177e4SLinus Torvalds 		d = current_detail;
4691da177e4SLinus Torvalds 		if (!ch)
4701da177e4SLinus Torvalds 			current_index ++;
4711da177e4SLinus Torvalds 		spin_unlock(&cache_list_lock);
472809fe3c5STrond Myklebust 		if (ch)
473809fe3c5STrond Myklebust 			sunrpc_end_cache_remove_entry(ch, d);
4741da177e4SLinus Torvalds 	} else
4751da177e4SLinus Torvalds 		spin_unlock(&cache_list_lock);
4761da177e4SLinus Torvalds 
4771da177e4SLinus Torvalds 	return rv;
4781da177e4SLinus Torvalds }
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds /*
4811da177e4SLinus Torvalds  * We want to regularly clean the cache, so we need to schedule some work ...
4821da177e4SLinus Torvalds  */
48365f27f38SDavid Howells static void do_cache_clean(struct work_struct *work)
4841da177e4SLinus Torvalds {
4851da177e4SLinus Torvalds 	int delay = 5;
4861da177e4SLinus Torvalds 	if (cache_clean() == -1)
4876aad89c8SAnton Blanchard 		delay = round_jiffies_relative(30*HZ);
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds 	if (list_empty(&cache_list))
4901da177e4SLinus Torvalds 		delay = 0;
4911da177e4SLinus Torvalds 
4921da177e4SLinus Torvalds 	if (delay)
49377b00bc0SKe Wang 		queue_delayed_work(system_power_efficient_wq,
49477b00bc0SKe Wang 				   &cache_cleaner, delay);
4951da177e4SLinus Torvalds }
4961da177e4SLinus Torvalds 
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds /*
4991da177e4SLinus Torvalds  * Clean all caches promptly.  This just calls cache_clean
5001da177e4SLinus Torvalds  * repeatedly until we are sure that every cache has had a chance to
5011da177e4SLinus Torvalds  * be fully cleaned
5021da177e4SLinus Torvalds  */
5031da177e4SLinus Torvalds void cache_flush(void)
5041da177e4SLinus Torvalds {
5051da177e4SLinus Torvalds 	while (cache_clean() != -1)
5061da177e4SLinus Torvalds 		cond_resched();
5071da177e4SLinus Torvalds 	while (cache_clean() != -1)
5081da177e4SLinus Torvalds 		cond_resched();
5091da177e4SLinus Torvalds }
51024c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_flush);
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds void cache_purge(struct cache_detail *detail)
5131da177e4SLinus Torvalds {
514471a930aSKinglong Mee 	struct cache_head *ch = NULL;
515471a930aSKinglong Mee 	struct hlist_head *head = NULL;
516471a930aSKinglong Mee 	struct hlist_node *tmp = NULL;
517471a930aSKinglong Mee 	int i = 0;
518471a930aSKinglong Mee 
5191863d77fSTrond Myklebust 	spin_lock(&detail->hash_lock);
520471a930aSKinglong Mee 	if (!detail->entries) {
5211863d77fSTrond Myklebust 		spin_unlock(&detail->hash_lock);
522471a930aSKinglong Mee 		return;
523471a930aSKinglong Mee 	}
524471a930aSKinglong Mee 
525471a930aSKinglong Mee 	dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
526471a930aSKinglong Mee 	for (i = 0; i < detail->hash_size; i++) {
527471a930aSKinglong Mee 		head = &detail->hash_table[i];
528471a930aSKinglong Mee 		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
529809fe3c5STrond Myklebust 			sunrpc_begin_cache_remove_entry(ch, detail);
5301863d77fSTrond Myklebust 			spin_unlock(&detail->hash_lock);
531809fe3c5STrond Myklebust 			sunrpc_end_cache_remove_entry(ch, detail);
5321863d77fSTrond Myklebust 			spin_lock(&detail->hash_lock);
533471a930aSKinglong Mee 		}
534471a930aSKinglong Mee 	}
5351863d77fSTrond Myklebust 	spin_unlock(&detail->hash_lock);
5361da177e4SLinus Torvalds }
53724c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_purge);
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds /*
5411da177e4SLinus Torvalds  * Deferral and Revisiting of Requests.
5421da177e4SLinus Torvalds  *
5431da177e4SLinus Torvalds  * If a cache lookup finds a pending entry, we
5441da177e4SLinus Torvalds  * need to defer the request and revisit it later.
5451da177e4SLinus Torvalds  * All deferred requests are stored in a hash table,
5461da177e4SLinus Torvalds  * indexed by "struct cache_head *".
5471da177e4SLinus Torvalds  * As it may be wasteful to store a whole request
5481da177e4SLinus Torvalds  * structure, we allow the request to provide a
5491da177e4SLinus Torvalds  * deferred form, which must contain a
5501da177e4SLinus Torvalds  * 'struct cache_deferred_req'
5511da177e4SLinus Torvalds  * This cache_deferred_req contains a method to allow
5521da177e4SLinus Torvalds  * it to be revisited when cache info is available
5531da177e4SLinus Torvalds  */
5541da177e4SLinus Torvalds 
5551da177e4SLinus Torvalds #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
5561da177e4SLinus Torvalds #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
5571da177e4SLinus Torvalds 
5581da177e4SLinus Torvalds #define	DFR_MAX	300	/* ??? */
5591da177e4SLinus Torvalds 
5601da177e4SLinus Torvalds static DEFINE_SPINLOCK(cache_defer_lock);
5611da177e4SLinus Torvalds static LIST_HEAD(cache_defer_list);
56211174492SNeilBrown static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
5631da177e4SLinus Torvalds static int cache_defer_cnt;
5641da177e4SLinus Torvalds 
5656610f720SJ. Bruce Fields static void __unhash_deferred_req(struct cache_deferred_req *dreq)
5661da177e4SLinus Torvalds {
56711174492SNeilBrown 	hlist_del_init(&dreq->hash);
568e33534d5SNeilBrown 	if (!list_empty(&dreq->recent)) {
569e33534d5SNeilBrown 		list_del_init(&dreq->recent);
5706610f720SJ. Bruce Fields 		cache_defer_cnt--;
5716610f720SJ. Bruce Fields 	}
572e33534d5SNeilBrown }
5736610f720SJ. Bruce Fields 
5746610f720SJ. Bruce Fields static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
5756610f720SJ. Bruce Fields {
5761da177e4SLinus Torvalds 	int hash = DFR_HASH(item);
5771da177e4SLinus Torvalds 
578e33534d5SNeilBrown 	INIT_LIST_HEAD(&dreq->recent);
57911174492SNeilBrown 	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
58001f3bd1fSJ.Bruce Fields }
5816610f720SJ. Bruce Fields 
582e33534d5SNeilBrown static void setup_deferral(struct cache_deferred_req *dreq,
583e33534d5SNeilBrown 			   struct cache_head *item,
584e33534d5SNeilBrown 			   int count_me)
5851da177e4SLinus Torvalds {
5861da177e4SLinus Torvalds 
5871da177e4SLinus Torvalds 	dreq->item = item;
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds 	spin_lock(&cache_defer_lock);
5901da177e4SLinus Torvalds 
5916610f720SJ. Bruce Fields 	__hash_deferred_req(dreq, item);
5921da177e4SLinus Torvalds 
593e33534d5SNeilBrown 	if (count_me) {
594e33534d5SNeilBrown 		cache_defer_cnt++;
5951da177e4SLinus Torvalds 		list_add(&dreq->recent, &cache_defer_list);
5961da177e4SLinus Torvalds 	}
597e33534d5SNeilBrown 
5981da177e4SLinus Torvalds 	spin_unlock(&cache_defer_lock);
5991da177e4SLinus Torvalds 
6001da177e4SLinus Torvalds }
601f16b6e8dSNeilBrown 
6023211af11SJ. Bruce Fields struct thread_deferred_req {
6033211af11SJ. Bruce Fields 	struct cache_deferred_req handle;
6043211af11SJ. Bruce Fields 	struct completion completion;
6053211af11SJ. Bruce Fields };
6063211af11SJ. Bruce Fields 
6073211af11SJ. Bruce Fields static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
6083211af11SJ. Bruce Fields {
6093211af11SJ. Bruce Fields 	struct thread_deferred_req *dr =
6103211af11SJ. Bruce Fields 		container_of(dreq, struct thread_deferred_req, handle);
6113211af11SJ. Bruce Fields 	complete(&dr->completion);
6123211af11SJ. Bruce Fields }
6133211af11SJ. Bruce Fields 
614d29068c4SNeilBrown static void cache_wait_req(struct cache_req *req, struct cache_head *item)
6153211af11SJ. Bruce Fields {
6163211af11SJ. Bruce Fields 	struct thread_deferred_req sleeper;
6173211af11SJ. Bruce Fields 	struct cache_deferred_req *dreq = &sleeper.handle;
6183211af11SJ. Bruce Fields 
6193211af11SJ. Bruce Fields 	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
6203211af11SJ. Bruce Fields 	dreq->revisit = cache_restart_thread;
6213211af11SJ. Bruce Fields 
622e33534d5SNeilBrown 	setup_deferral(dreq, item, 0);
6233211af11SJ. Bruce Fields 
624d29068c4SNeilBrown 	if (!test_bit(CACHE_PENDING, &item->flags) ||
625277f68dbSNeilBrown 	    wait_for_completion_interruptible_timeout(
626f16b6e8dSNeilBrown 		    &sleeper.completion, req->thread_wait) <= 0) {
627f16b6e8dSNeilBrown 		/* The completion wasn't completed, so we need
628f16b6e8dSNeilBrown 		 * to clean up
629f16b6e8dSNeilBrown 		 */
630f16b6e8dSNeilBrown 		spin_lock(&cache_defer_lock);
63111174492SNeilBrown 		if (!hlist_unhashed(&sleeper.handle.hash)) {
6326610f720SJ. Bruce Fields 			__unhash_deferred_req(&sleeper.handle);
633f16b6e8dSNeilBrown 			spin_unlock(&cache_defer_lock);
634f16b6e8dSNeilBrown 		} else {
635f16b6e8dSNeilBrown 			/* cache_revisit_request already removed
636f16b6e8dSNeilBrown 			 * this from the hash table, but hasn't
637f16b6e8dSNeilBrown 			 * called ->revisit yet.  It will very soon
638f16b6e8dSNeilBrown 			 * and we need to wait for it.
639f16b6e8dSNeilBrown 			 */
640f16b6e8dSNeilBrown 			spin_unlock(&cache_defer_lock);
641f16b6e8dSNeilBrown 			wait_for_completion(&sleeper.completion);
642f16b6e8dSNeilBrown 		}
643f16b6e8dSNeilBrown 	}
644f16b6e8dSNeilBrown }
6453211af11SJ. Bruce Fields 
646e33534d5SNeilBrown static void cache_limit_defers(void)
647e33534d5SNeilBrown {
648e33534d5SNeilBrown 	/* Make sure we haven't exceed the limit of allowed deferred
649e33534d5SNeilBrown 	 * requests.
650e33534d5SNeilBrown 	 */
651e33534d5SNeilBrown 	struct cache_deferred_req *discard = NULL;
652e33534d5SNeilBrown 
653e33534d5SNeilBrown 	if (cache_defer_cnt <= DFR_MAX)
654e33534d5SNeilBrown 		return;
655e33534d5SNeilBrown 
656e33534d5SNeilBrown 	spin_lock(&cache_defer_lock);
657e33534d5SNeilBrown 
658e33534d5SNeilBrown 	/* Consider removing either the first or the last */
659e33534d5SNeilBrown 	if (cache_defer_cnt > DFR_MAX) {
66063862b5bSAruna-Hewapathirane 		if (prandom_u32() & 1)
661e33534d5SNeilBrown 			discard = list_entry(cache_defer_list.next,
662e33534d5SNeilBrown 					     struct cache_deferred_req, recent);
663e33534d5SNeilBrown 		else
664e33534d5SNeilBrown 			discard = list_entry(cache_defer_list.prev,
665e33534d5SNeilBrown 					     struct cache_deferred_req, recent);
666e33534d5SNeilBrown 		__unhash_deferred_req(discard);
667e33534d5SNeilBrown 	}
668e33534d5SNeilBrown 	spin_unlock(&cache_defer_lock);
669e33534d5SNeilBrown 	if (discard)
670e33534d5SNeilBrown 		discard->revisit(discard, 1);
671e33534d5SNeilBrown }
672e33534d5SNeilBrown 
673d76d1815SJ. Bruce Fields /* Return true if and only if a deferred request is queued. */
674d76d1815SJ. Bruce Fields static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
6753211af11SJ. Bruce Fields {
6763211af11SJ. Bruce Fields 	struct cache_deferred_req *dreq;
6773211af11SJ. Bruce Fields 
6783211af11SJ. Bruce Fields 	if (req->thread_wait) {
679d29068c4SNeilBrown 		cache_wait_req(req, item);
680d29068c4SNeilBrown 		if (!test_bit(CACHE_PENDING, &item->flags))
681d76d1815SJ. Bruce Fields 			return false;
6823211af11SJ. Bruce Fields 	}
6833211af11SJ. Bruce Fields 	dreq = req->defer(req);
6843211af11SJ. Bruce Fields 	if (dreq == NULL)
685d76d1815SJ. Bruce Fields 		return false;
686e33534d5SNeilBrown 	setup_deferral(dreq, item, 1);
687d29068c4SNeilBrown 	if (!test_bit(CACHE_PENDING, &item->flags))
688d29068c4SNeilBrown 		/* Bit could have been cleared before we managed to
689d29068c4SNeilBrown 		 * set up the deferral, so need to revisit just in case
690d29068c4SNeilBrown 		 */
691d29068c4SNeilBrown 		cache_revisit_request(item);
692e33534d5SNeilBrown 
693e33534d5SNeilBrown 	cache_limit_defers();
694d76d1815SJ. Bruce Fields 	return true;
695989a19b9SNeilBrown }
6961da177e4SLinus Torvalds 
6971da177e4SLinus Torvalds static void cache_revisit_request(struct cache_head *item)
6981da177e4SLinus Torvalds {
6991da177e4SLinus Torvalds 	struct cache_deferred_req *dreq;
7001da177e4SLinus Torvalds 	struct list_head pending;
701b67bfe0dSSasha Levin 	struct hlist_node *tmp;
7021da177e4SLinus Torvalds 	int hash = DFR_HASH(item);
7031da177e4SLinus Torvalds 
7041da177e4SLinus Torvalds 	INIT_LIST_HEAD(&pending);
7051da177e4SLinus Torvalds 	spin_lock(&cache_defer_lock);
7061da177e4SLinus Torvalds 
707b67bfe0dSSasha Levin 	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
7081da177e4SLinus Torvalds 		if (dreq->item == item) {
7096610f720SJ. Bruce Fields 			__unhash_deferred_req(dreq);
7106610f720SJ. Bruce Fields 			list_add(&dreq->recent, &pending);
7111da177e4SLinus Torvalds 		}
71211174492SNeilBrown 
7131da177e4SLinus Torvalds 	spin_unlock(&cache_defer_lock);
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds 	while (!list_empty(&pending)) {
7161da177e4SLinus Torvalds 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
7171da177e4SLinus Torvalds 		list_del_init(&dreq->recent);
7181da177e4SLinus Torvalds 		dreq->revisit(dreq, 0);
7191da177e4SLinus Torvalds 	}
7201da177e4SLinus Torvalds }
7211da177e4SLinus Torvalds 
7221da177e4SLinus Torvalds void cache_clean_deferred(void *owner)
7231da177e4SLinus Torvalds {
7241da177e4SLinus Torvalds 	struct cache_deferred_req *dreq, *tmp;
7251da177e4SLinus Torvalds 	struct list_head pending;
7261da177e4SLinus Torvalds 
7271da177e4SLinus Torvalds 
7281da177e4SLinus Torvalds 	INIT_LIST_HEAD(&pending);
7291da177e4SLinus Torvalds 	spin_lock(&cache_defer_lock);
7301da177e4SLinus Torvalds 
7311da177e4SLinus Torvalds 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
7321da177e4SLinus Torvalds 		if (dreq->owner == owner) {
7336610f720SJ. Bruce Fields 			__unhash_deferred_req(dreq);
734e95dffa4SNeilBrown 			list_add(&dreq->recent, &pending);
7351da177e4SLinus Torvalds 		}
7361da177e4SLinus Torvalds 	}
7371da177e4SLinus Torvalds 	spin_unlock(&cache_defer_lock);
7381da177e4SLinus Torvalds 
7391da177e4SLinus Torvalds 	while (!list_empty(&pending)) {
7401da177e4SLinus Torvalds 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
7411da177e4SLinus Torvalds 		list_del_init(&dreq->recent);
7421da177e4SLinus Torvalds 		dreq->revisit(dreq, 1);
7431da177e4SLinus Torvalds 	}
7441da177e4SLinus Torvalds }
7451da177e4SLinus Torvalds 
7461da177e4SLinus Torvalds /*
7471da177e4SLinus Torvalds  * communicate with user-space
7481da177e4SLinus Torvalds  *
7496489a8f4SKinglong Mee  * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
750a490c681SJ. Bruce Fields  * On read, you get a full request, or block.
751a490c681SJ. Bruce Fields  * On write, an update request is processed.
752a490c681SJ. Bruce Fields  * Poll works if anything to read, and always allows write.
7531da177e4SLinus Torvalds  *
7541da177e4SLinus Torvalds  * Implemented by linked list of requests.  Each open file has
755a490c681SJ. Bruce Fields  * a ->private that also exists in this list.  New requests are added
7561da177e4SLinus Torvalds  * to the end and may wakeup and preceding readers.
7571da177e4SLinus Torvalds  * New readers are added to the head.  If, on read, an item is found with
7581da177e4SLinus Torvalds  * CACHE_UPCALLING clear, we free it from the list.
7591da177e4SLinus Torvalds  *
7601da177e4SLinus Torvalds  */
7611da177e4SLinus Torvalds 
7621da177e4SLinus Torvalds static DEFINE_SPINLOCK(queue_lock);
7634a3e2f71SArjan van de Ven static DEFINE_MUTEX(queue_io_mutex);
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds struct cache_queue {
7661da177e4SLinus Torvalds 	struct list_head	list;
7671da177e4SLinus Torvalds 	int			reader;	/* if 0, then request */
7681da177e4SLinus Torvalds };
7691da177e4SLinus Torvalds struct cache_request {
7701da177e4SLinus Torvalds 	struct cache_queue	q;
7711da177e4SLinus Torvalds 	struct cache_head	*item;
7721da177e4SLinus Torvalds 	char			* buf;
7731da177e4SLinus Torvalds 	int			len;
7741da177e4SLinus Torvalds 	int			readers;
7751da177e4SLinus Torvalds };
7761da177e4SLinus Torvalds struct cache_reader {
7771da177e4SLinus Torvalds 	struct cache_queue	q;
7781da177e4SLinus Torvalds 	int			offset;	/* if non-0, we have a refcnt on next request */
7791da177e4SLinus Torvalds };
7801da177e4SLinus Torvalds 
781d94af6deSStanislav Kinsbursky static int cache_request(struct cache_detail *detail,
782d94af6deSStanislav Kinsbursky 			       struct cache_request *crq)
783d94af6deSStanislav Kinsbursky {
784d94af6deSStanislav Kinsbursky 	char *bp = crq->buf;
785d94af6deSStanislav Kinsbursky 	int len = PAGE_SIZE;
786d94af6deSStanislav Kinsbursky 
787d94af6deSStanislav Kinsbursky 	detail->cache_request(detail, crq->item, &bp, &len);
788d94af6deSStanislav Kinsbursky 	if (len < 0)
789d94af6deSStanislav Kinsbursky 		return -EAGAIN;
790d94af6deSStanislav Kinsbursky 	return PAGE_SIZE - len;
791d94af6deSStanislav Kinsbursky }
792d94af6deSStanislav Kinsbursky 
793173912a6STrond Myklebust static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
794173912a6STrond Myklebust 			  loff_t *ppos, struct cache_detail *cd)
7951da177e4SLinus Torvalds {
7961da177e4SLinus Torvalds 	struct cache_reader *rp = filp->private_data;
7971da177e4SLinus Torvalds 	struct cache_request *rq;
798496ad9aaSAl Viro 	struct inode *inode = file_inode(filp);
7991da177e4SLinus Torvalds 	int err;
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 	if (count == 0)
8021da177e4SLinus Torvalds 		return 0;
8031da177e4SLinus Torvalds 
8045955102cSAl Viro 	inode_lock(inode); /* protect against multiple concurrent
8051da177e4SLinus Torvalds 			      * readers on this file */
8061da177e4SLinus Torvalds  again:
8071da177e4SLinus Torvalds 	spin_lock(&queue_lock);
8081da177e4SLinus Torvalds 	/* need to find next request */
8091da177e4SLinus Torvalds 	while (rp->q.list.next != &cd->queue &&
8101da177e4SLinus Torvalds 	       list_entry(rp->q.list.next, struct cache_queue, list)
8111da177e4SLinus Torvalds 	       ->reader) {
8121da177e4SLinus Torvalds 		struct list_head *next = rp->q.list.next;
8131da177e4SLinus Torvalds 		list_move(&rp->q.list, next);
8141da177e4SLinus Torvalds 	}
8151da177e4SLinus Torvalds 	if (rp->q.list.next == &cd->queue) {
8161da177e4SLinus Torvalds 		spin_unlock(&queue_lock);
8175955102cSAl Viro 		inode_unlock(inode);
8180db74d9aSWeston Andros Adamson 		WARN_ON_ONCE(rp->offset);
8191da177e4SLinus Torvalds 		return 0;
8201da177e4SLinus Torvalds 	}
8211da177e4SLinus Torvalds 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
8220db74d9aSWeston Andros Adamson 	WARN_ON_ONCE(rq->q.reader);
8231da177e4SLinus Torvalds 	if (rp->offset == 0)
8241da177e4SLinus Torvalds 		rq->readers++;
8251da177e4SLinus Torvalds 	spin_unlock(&queue_lock);
8261da177e4SLinus Torvalds 
827d94af6deSStanislav Kinsbursky 	if (rq->len == 0) {
828d94af6deSStanislav Kinsbursky 		err = cache_request(cd, rq);
829d94af6deSStanislav Kinsbursky 		if (err < 0)
830d94af6deSStanislav Kinsbursky 			goto out;
831d94af6deSStanislav Kinsbursky 		rq->len = err;
832d94af6deSStanislav Kinsbursky 	}
833d94af6deSStanislav Kinsbursky 
8341da177e4SLinus Torvalds 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
8351da177e4SLinus Torvalds 		err = -EAGAIN;
8361da177e4SLinus Torvalds 		spin_lock(&queue_lock);
8371da177e4SLinus Torvalds 		list_move(&rp->q.list, &rq->q.list);
8381da177e4SLinus Torvalds 		spin_unlock(&queue_lock);
8391da177e4SLinus Torvalds 	} else {
8401da177e4SLinus Torvalds 		if (rp->offset + count > rq->len)
8411da177e4SLinus Torvalds 			count = rq->len - rp->offset;
8421da177e4SLinus Torvalds 		err = -EFAULT;
8431da177e4SLinus Torvalds 		if (copy_to_user(buf, rq->buf + rp->offset, count))
8441da177e4SLinus Torvalds 			goto out;
8451da177e4SLinus Torvalds 		rp->offset += count;
8461da177e4SLinus Torvalds 		if (rp->offset >= rq->len) {
8471da177e4SLinus Torvalds 			rp->offset = 0;
8481da177e4SLinus Torvalds 			spin_lock(&queue_lock);
8491da177e4SLinus Torvalds 			list_move(&rp->q.list, &rq->q.list);
8501da177e4SLinus Torvalds 			spin_unlock(&queue_lock);
8511da177e4SLinus Torvalds 		}
8521da177e4SLinus Torvalds 		err = 0;
8531da177e4SLinus Torvalds 	}
8541da177e4SLinus Torvalds  out:
8551da177e4SLinus Torvalds 	if (rp->offset == 0) {
8561da177e4SLinus Torvalds 		/* need to release rq */
8571da177e4SLinus Torvalds 		spin_lock(&queue_lock);
8581da177e4SLinus Torvalds 		rq->readers--;
8591da177e4SLinus Torvalds 		if (rq->readers == 0 &&
8601da177e4SLinus Torvalds 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
8611da177e4SLinus Torvalds 			list_del(&rq->q.list);
8621da177e4SLinus Torvalds 			spin_unlock(&queue_lock);
863baab935fSNeilBrown 			cache_put(rq->item, cd);
8641da177e4SLinus Torvalds 			kfree(rq->buf);
8651da177e4SLinus Torvalds 			kfree(rq);
8661da177e4SLinus Torvalds 		} else
8671da177e4SLinus Torvalds 			spin_unlock(&queue_lock);
8681da177e4SLinus Torvalds 	}
8691da177e4SLinus Torvalds 	if (err == -EAGAIN)
8701da177e4SLinus Torvalds 		goto again;
8715955102cSAl Viro 	inode_unlock(inode);
8721da177e4SLinus Torvalds 	return err ? err :  count;
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds 
875da77005fSTrond Myklebust static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
876da77005fSTrond Myklebust 				 size_t count, struct cache_detail *cd)
8771da177e4SLinus Torvalds {
878da77005fSTrond Myklebust 	ssize_t ret;
8791da177e4SLinus Torvalds 
8806d8d1749SDan Carpenter 	if (count == 0)
8816d8d1749SDan Carpenter 		return -EINVAL;
882da77005fSTrond Myklebust 	if (copy_from_user(kaddr, buf, count))
8831da177e4SLinus Torvalds 		return -EFAULT;
884da77005fSTrond Myklebust 	kaddr[count] = '\0';
885da77005fSTrond Myklebust 	ret = cd->cache_parse(cd, kaddr, count);
886da77005fSTrond Myklebust 	if (!ret)
887da77005fSTrond Myklebust 		ret = count;
888da77005fSTrond Myklebust 	return ret;
8891da177e4SLinus Torvalds }
8901da177e4SLinus Torvalds 
891da77005fSTrond Myklebust static ssize_t cache_slow_downcall(const char __user *buf,
892da77005fSTrond Myklebust 				   size_t count, struct cache_detail *cd)
893da77005fSTrond Myklebust {
8941da177e4SLinus Torvalds 	static char write_buf[8192]; /* protected by queue_io_mutex */
895da77005fSTrond Myklebust 	ssize_t ret = -EINVAL;
896da77005fSTrond Myklebust 
897da77005fSTrond Myklebust 	if (count >= sizeof(write_buf))
898da77005fSTrond Myklebust 		goto out;
899da77005fSTrond Myklebust 	mutex_lock(&queue_io_mutex);
900da77005fSTrond Myklebust 	ret = cache_do_downcall(write_buf, buf, count, cd);
9014a3e2f71SArjan van de Ven 	mutex_unlock(&queue_io_mutex);
902da77005fSTrond Myklebust out:
903da77005fSTrond Myklebust 	return ret;
904da77005fSTrond Myklebust }
905da77005fSTrond Myklebust 
906da77005fSTrond Myklebust static ssize_t cache_downcall(struct address_space *mapping,
907da77005fSTrond Myklebust 			      const char __user *buf,
908da77005fSTrond Myklebust 			      size_t count, struct cache_detail *cd)
909da77005fSTrond Myklebust {
910da77005fSTrond Myklebust 	struct page *page;
911da77005fSTrond Myklebust 	char *kaddr;
912da77005fSTrond Myklebust 	ssize_t ret = -ENOMEM;
913da77005fSTrond Myklebust 
91409cbfeafSKirill A. Shutemov 	if (count >= PAGE_SIZE)
915da77005fSTrond Myklebust 		goto out_slow;
916da77005fSTrond Myklebust 
917da77005fSTrond Myklebust 	page = find_or_create_page(mapping, 0, GFP_KERNEL);
918da77005fSTrond Myklebust 	if (!page)
919da77005fSTrond Myklebust 		goto out_slow;
920da77005fSTrond Myklebust 
921da77005fSTrond Myklebust 	kaddr = kmap(page);
922da77005fSTrond Myklebust 	ret = cache_do_downcall(kaddr, buf, count, cd);
923da77005fSTrond Myklebust 	kunmap(page);
924da77005fSTrond Myklebust 	unlock_page(page);
92509cbfeafSKirill A. Shutemov 	put_page(page);
926da77005fSTrond Myklebust 	return ret;
927da77005fSTrond Myklebust out_slow:
928da77005fSTrond Myklebust 	return cache_slow_downcall(buf, count, cd);
929da77005fSTrond Myklebust }
9301da177e4SLinus Torvalds 
931173912a6STrond Myklebust static ssize_t cache_write(struct file *filp, const char __user *buf,
932173912a6STrond Myklebust 			   size_t count, loff_t *ppos,
933173912a6STrond Myklebust 			   struct cache_detail *cd)
9341da177e4SLinus Torvalds {
935da77005fSTrond Myklebust 	struct address_space *mapping = filp->f_mapping;
936496ad9aaSAl Viro 	struct inode *inode = file_inode(filp);
937da77005fSTrond Myklebust 	ssize_t ret = -EINVAL;
9381da177e4SLinus Torvalds 
939da77005fSTrond Myklebust 	if (!cd->cache_parse)
940da77005fSTrond Myklebust 		goto out;
9411da177e4SLinus Torvalds 
9425955102cSAl Viro 	inode_lock(inode);
943da77005fSTrond Myklebust 	ret = cache_downcall(mapping, buf, count, cd);
9445955102cSAl Viro 	inode_unlock(inode);
945da77005fSTrond Myklebust out:
946da77005fSTrond Myklebust 	return ret;
9471da177e4SLinus Torvalds }
9481da177e4SLinus Torvalds 
9491da177e4SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
9501da177e4SLinus Torvalds 
951ade994f4SAl Viro static __poll_t cache_poll(struct file *filp, poll_table *wait,
952173912a6STrond Myklebust 			       struct cache_detail *cd)
9531da177e4SLinus Torvalds {
954ade994f4SAl Viro 	__poll_t mask;
9551da177e4SLinus Torvalds 	struct cache_reader *rp = filp->private_data;
9561da177e4SLinus Torvalds 	struct cache_queue *cq;
9571da177e4SLinus Torvalds 
9581da177e4SLinus Torvalds 	poll_wait(filp, &queue_wait, wait);
9591da177e4SLinus Torvalds 
9601da177e4SLinus Torvalds 	/* alway allow write */
961a9a08845SLinus Torvalds 	mask = EPOLLOUT | EPOLLWRNORM;
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 	if (!rp)
9641da177e4SLinus Torvalds 		return mask;
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds 	spin_lock(&queue_lock);
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds 	for (cq= &rp->q; &cq->list != &cd->queue;
9691da177e4SLinus Torvalds 	     cq = list_entry(cq->list.next, struct cache_queue, list))
9701da177e4SLinus Torvalds 		if (!cq->reader) {
971a9a08845SLinus Torvalds 			mask |= EPOLLIN | EPOLLRDNORM;
9721da177e4SLinus Torvalds 			break;
9731da177e4SLinus Torvalds 		}
9741da177e4SLinus Torvalds 	spin_unlock(&queue_lock);
9751da177e4SLinus Torvalds 	return mask;
9761da177e4SLinus Torvalds }
9771da177e4SLinus Torvalds 
978173912a6STrond Myklebust static int cache_ioctl(struct inode *ino, struct file *filp,
979173912a6STrond Myklebust 		       unsigned int cmd, unsigned long arg,
980173912a6STrond Myklebust 		       struct cache_detail *cd)
9811da177e4SLinus Torvalds {
9821da177e4SLinus Torvalds 	int len = 0;
9831da177e4SLinus Torvalds 	struct cache_reader *rp = filp->private_data;
9841da177e4SLinus Torvalds 	struct cache_queue *cq;
9851da177e4SLinus Torvalds 
9861da177e4SLinus Torvalds 	if (cmd != FIONREAD || !rp)
9871da177e4SLinus Torvalds 		return -EINVAL;
9881da177e4SLinus Torvalds 
9891da177e4SLinus Torvalds 	spin_lock(&queue_lock);
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	/* only find the length remaining in current request,
9921da177e4SLinus Torvalds 	 * or the length of the next request
9931da177e4SLinus Torvalds 	 */
9941da177e4SLinus Torvalds 	for (cq= &rp->q; &cq->list != &cd->queue;
9951da177e4SLinus Torvalds 	     cq = list_entry(cq->list.next, struct cache_queue, list))
9961da177e4SLinus Torvalds 		if (!cq->reader) {
9971da177e4SLinus Torvalds 			struct cache_request *cr =
9981da177e4SLinus Torvalds 				container_of(cq, struct cache_request, q);
9991da177e4SLinus Torvalds 			len = cr->len - rp->offset;
10001da177e4SLinus Torvalds 			break;
10011da177e4SLinus Torvalds 		}
10021da177e4SLinus Torvalds 	spin_unlock(&queue_lock);
10031da177e4SLinus Torvalds 
10041da177e4SLinus Torvalds 	return put_user(len, (int __user *)arg);
10051da177e4SLinus Torvalds }
10061da177e4SLinus Torvalds 
1007173912a6STrond Myklebust static int cache_open(struct inode *inode, struct file *filp,
1008173912a6STrond Myklebust 		      struct cache_detail *cd)
10091da177e4SLinus Torvalds {
10101da177e4SLinus Torvalds 	struct cache_reader *rp = NULL;
10111da177e4SLinus Torvalds 
1012f7e86ab9STrond Myklebust 	if (!cd || !try_module_get(cd->owner))
1013f7e86ab9STrond Myklebust 		return -EACCES;
10141da177e4SLinus Torvalds 	nonseekable_open(inode, filp);
10151da177e4SLinus Torvalds 	if (filp->f_mode & FMODE_READ) {
10161da177e4SLinus Torvalds 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1017a7823c79SAlexey Khoroshilov 		if (!rp) {
1018a7823c79SAlexey Khoroshilov 			module_put(cd->owner);
10191da177e4SLinus Torvalds 			return -ENOMEM;
1020a7823c79SAlexey Khoroshilov 		}
10211da177e4SLinus Torvalds 		rp->offset = 0;
10221da177e4SLinus Torvalds 		rp->q.reader = 1;
102364a38e84SDave Wysochanski 
10241da177e4SLinus Torvalds 		spin_lock(&queue_lock);
10251da177e4SLinus Torvalds 		list_add(&rp->q.list, &cd->queue);
10261da177e4SLinus Torvalds 		spin_unlock(&queue_lock);
10271da177e4SLinus Torvalds 	}
102864a38e84SDave Wysochanski 	if (filp->f_mode & FMODE_WRITE)
102964a38e84SDave Wysochanski 		atomic_inc(&cd->writers);
10301da177e4SLinus Torvalds 	filp->private_data = rp;
10311da177e4SLinus Torvalds 	return 0;
10321da177e4SLinus Torvalds }
10331da177e4SLinus Torvalds 
1034173912a6STrond Myklebust static int cache_release(struct inode *inode, struct file *filp,
1035173912a6STrond Myklebust 			 struct cache_detail *cd)
10361da177e4SLinus Torvalds {
10371da177e4SLinus Torvalds 	struct cache_reader *rp = filp->private_data;
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds 	if (rp) {
10401da177e4SLinus Torvalds 		spin_lock(&queue_lock);
10411da177e4SLinus Torvalds 		if (rp->offset) {
10421da177e4SLinus Torvalds 			struct cache_queue *cq;
10431da177e4SLinus Torvalds 			for (cq= &rp->q; &cq->list != &cd->queue;
10441da177e4SLinus Torvalds 			     cq = list_entry(cq->list.next, struct cache_queue, list))
10451da177e4SLinus Torvalds 				if (!cq->reader) {
10461da177e4SLinus Torvalds 					container_of(cq, struct cache_request, q)
10471da177e4SLinus Torvalds 						->readers--;
10481da177e4SLinus Torvalds 					break;
10491da177e4SLinus Torvalds 				}
10501da177e4SLinus Torvalds 			rp->offset = 0;
10511da177e4SLinus Torvalds 		}
10521da177e4SLinus Torvalds 		list_del(&rp->q.list);
10531da177e4SLinus Torvalds 		spin_unlock(&queue_lock);
10541da177e4SLinus Torvalds 
10551da177e4SLinus Torvalds 		filp->private_data = NULL;
10561da177e4SLinus Torvalds 		kfree(rp);
10571da177e4SLinus Torvalds 
105864a38e84SDave Wysochanski 	}
105964a38e84SDave Wysochanski 	if (filp->f_mode & FMODE_WRITE) {
106064a38e84SDave Wysochanski 		atomic_dec(&cd->writers);
1061c5b29f88SNeilBrown 		cd->last_close = seconds_since_boot();
10621da177e4SLinus Torvalds 	}
1063f7e86ab9STrond Myklebust 	module_put(cd->owner);
10641da177e4SLinus Torvalds 	return 0;
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds 
10681da177e4SLinus Torvalds 
1069f866a819SNeilBrown static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
10701da177e4SLinus Torvalds {
1071f9e1aedcSNeilBrown 	struct cache_queue *cq, *tmp;
1072f9e1aedcSNeilBrown 	struct cache_request *cr;
1073f9e1aedcSNeilBrown 	struct list_head dequeued;
1074f9e1aedcSNeilBrown 
1075f9e1aedcSNeilBrown 	INIT_LIST_HEAD(&dequeued);
10761da177e4SLinus Torvalds 	spin_lock(&queue_lock);
1077f9e1aedcSNeilBrown 	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
10781da177e4SLinus Torvalds 		if (!cq->reader) {
1079f9e1aedcSNeilBrown 			cr = container_of(cq, struct cache_request, q);
10801da177e4SLinus Torvalds 			if (cr->item != ch)
10811da177e4SLinus Torvalds 				continue;
1082f9e1aedcSNeilBrown 			if (test_bit(CACHE_PENDING, &ch->flags))
1083f9e1aedcSNeilBrown 				/* Lost a race and it is pending again */
1084f9e1aedcSNeilBrown 				break;
10851da177e4SLinus Torvalds 			if (cr->readers != 0)
10864013edeaSNeilBrown 				continue;
1087f9e1aedcSNeilBrown 			list_move(&cr->q.list, &dequeued);
1088f9e1aedcSNeilBrown 		}
10891da177e4SLinus Torvalds 	spin_unlock(&queue_lock);
1090f9e1aedcSNeilBrown 	while (!list_empty(&dequeued)) {
1091f9e1aedcSNeilBrown 		cr = list_entry(dequeued.next, struct cache_request, q.list);
1092f9e1aedcSNeilBrown 		list_del(&cr->q.list);
1093baab935fSNeilBrown 		cache_put(cr->item, detail);
10941da177e4SLinus Torvalds 		kfree(cr->buf);
10951da177e4SLinus Torvalds 		kfree(cr);
10961da177e4SLinus Torvalds 	}
10971da177e4SLinus Torvalds }
10981da177e4SLinus Torvalds 
10991da177e4SLinus Torvalds /*
11001da177e4SLinus Torvalds  * Support routines for text-based upcalls.
11011da177e4SLinus Torvalds  * Fields are separated by spaces.
11021da177e4SLinus Torvalds  * Fields are either mangled to quote space tab newline slosh with slosh
11031da177e4SLinus Torvalds  * or a hexified with a leading \x
11041da177e4SLinus Torvalds  * Record is terminated with newline.
11051da177e4SLinus Torvalds  *
11061da177e4SLinus Torvalds  */
11071da177e4SLinus Torvalds 
11081da177e4SLinus Torvalds void qword_add(char **bpp, int *lp, char *str)
11091da177e4SLinus Torvalds {
11101da177e4SLinus Torvalds 	char *bp = *bpp;
11111da177e4SLinus Torvalds 	int len = *lp;
11121b2e122dSAndy Shevchenko 	int ret;
11131da177e4SLinus Torvalds 
11141da177e4SLinus Torvalds 	if (len < 0) return;
11151da177e4SLinus Torvalds 
111641416f23SRasmus Villemoes 	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
111741416f23SRasmus Villemoes 	if (ret >= len) {
111841416f23SRasmus Villemoes 		bp += len;
11191b2e122dSAndy Shevchenko 		len = -1;
112041416f23SRasmus Villemoes 	} else {
112141416f23SRasmus Villemoes 		bp += ret;
11221b2e122dSAndy Shevchenko 		len -= ret;
11231da177e4SLinus Torvalds 		*bp++ = ' ';
11241da177e4SLinus Torvalds 		len--;
11251da177e4SLinus Torvalds 	}
11261da177e4SLinus Torvalds 	*bpp = bp;
11271da177e4SLinus Torvalds 	*lp = len;
11281da177e4SLinus Torvalds }
112924c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_add);
11301da177e4SLinus Torvalds 
11311da177e4SLinus Torvalds void qword_addhex(char **bpp, int *lp, char *buf, int blen)
11321da177e4SLinus Torvalds {
11331da177e4SLinus Torvalds 	char *bp = *bpp;
11341da177e4SLinus Torvalds 	int len = *lp;
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds 	if (len < 0) return;
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds 	if (len > 2) {
11391da177e4SLinus Torvalds 		*bp++ = '\\';
11401da177e4SLinus Torvalds 		*bp++ = 'x';
11411da177e4SLinus Torvalds 		len -= 2;
11421da177e4SLinus Torvalds 		while (blen && len >= 2) {
1143056785eaSAndy Shevchenko 			bp = hex_byte_pack(bp, *buf++);
11441da177e4SLinus Torvalds 			len -= 2;
11451da177e4SLinus Torvalds 			blen--;
11461da177e4SLinus Torvalds 		}
11471da177e4SLinus Torvalds 	}
11481da177e4SLinus Torvalds 	if (blen || len<1) len = -1;
11491da177e4SLinus Torvalds 	else {
11501da177e4SLinus Torvalds 		*bp++ = ' ';
11511da177e4SLinus Torvalds 		len--;
11521da177e4SLinus Torvalds 	}
11531da177e4SLinus Torvalds 	*bpp = bp;
11541da177e4SLinus Torvalds 	*lp = len;
11551da177e4SLinus Torvalds }
115624c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_addhex);
11571da177e4SLinus Torvalds 
11581da177e4SLinus Torvalds static void warn_no_listener(struct cache_detail *detail)
11591da177e4SLinus Torvalds {
11601da177e4SLinus Torvalds 	if (detail->last_warn != detail->last_close) {
11611da177e4SLinus Torvalds 		detail->last_warn = detail->last_close;
11621da177e4SLinus Torvalds 		if (detail->warn_no_listener)
11632da8ca26STrond Myklebust 			detail->warn_no_listener(detail, detail->last_close != 0);
11641da177e4SLinus Torvalds 	}
11651da177e4SLinus Torvalds }
11661da177e4SLinus Torvalds 
116706497524SJ. Bruce Fields static bool cache_listeners_exist(struct cache_detail *detail)
116806497524SJ. Bruce Fields {
116964a38e84SDave Wysochanski 	if (atomic_read(&detail->writers))
117006497524SJ. Bruce Fields 		return true;
117106497524SJ. Bruce Fields 	if (detail->last_close == 0)
117206497524SJ. Bruce Fields 		/* This cache was never opened */
117306497524SJ. Bruce Fields 		return false;
117406497524SJ. Bruce Fields 	if (detail->last_close < seconds_since_boot() - 30)
117506497524SJ. Bruce Fields 		/*
117606497524SJ. Bruce Fields 		 * We allow for the possibility that someone might
117706497524SJ. Bruce Fields 		 * restart a userspace daemon without restarting the
117806497524SJ. Bruce Fields 		 * server; but after 30 seconds, we give up.
117906497524SJ. Bruce Fields 		 */
118006497524SJ. Bruce Fields 		 return false;
118106497524SJ. Bruce Fields 	return true;
118206497524SJ. Bruce Fields }
118306497524SJ. Bruce Fields 
11841da177e4SLinus Torvalds /*
1185bc74b4f5STrond Myklebust  * register an upcall request to user-space and queue it up for read() by the
1186bc74b4f5STrond Myklebust  * upcall daemon.
1187bc74b4f5STrond Myklebust  *
11881da177e4SLinus Torvalds  * Each request is at most one page long.
11891da177e4SLinus Torvalds  */
119065286b88STrond Myklebust static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
11911da177e4SLinus Torvalds {
11921da177e4SLinus Torvalds 	char *buf;
11931da177e4SLinus Torvalds 	struct cache_request *crq;
1194f9e1aedcSNeilBrown 	int ret = 0;
11951da177e4SLinus Torvalds 
1196013920ebSNeilBrown 	if (test_bit(CACHE_CLEANED, &h->flags))
1197013920ebSNeilBrown 		/* Too late to make an upcall */
1198013920ebSNeilBrown 		return -EAGAIN;
11991da177e4SLinus Torvalds 
12001da177e4SLinus Torvalds 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
12011da177e4SLinus Torvalds 	if (!buf)
12021da177e4SLinus Torvalds 		return -EAGAIN;
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds 	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
12051da177e4SLinus Torvalds 	if (!crq) {
12061da177e4SLinus Torvalds 		kfree(buf);
12071da177e4SLinus Torvalds 		return -EAGAIN;
12081da177e4SLinus Torvalds 	}
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds 	crq->q.reader = 0;
12111da177e4SLinus Torvalds 	crq->buf = buf;
1212d94af6deSStanislav Kinsbursky 	crq->len = 0;
12131da177e4SLinus Torvalds 	crq->readers = 0;
12141da177e4SLinus Torvalds 	spin_lock(&queue_lock);
1215a6ab1e81SNeilBrown 	if (test_bit(CACHE_PENDING, &h->flags)) {
1216a6ab1e81SNeilBrown 		crq->item = cache_get(h);
12171da177e4SLinus Torvalds 		list_add_tail(&crq->q.list, &detail->queue);
1218a6ab1e81SNeilBrown 	} else
1219f9e1aedcSNeilBrown 		/* Lost a race, no longer PENDING, so don't enqueue */
1220f9e1aedcSNeilBrown 		ret = -EAGAIN;
12211da177e4SLinus Torvalds 	spin_unlock(&queue_lock);
12221da177e4SLinus Torvalds 	wake_up(&queue_wait);
1223f9e1aedcSNeilBrown 	if (ret == -EAGAIN) {
1224f9e1aedcSNeilBrown 		kfree(buf);
1225f9e1aedcSNeilBrown 		kfree(crq);
1226f9e1aedcSNeilBrown 	}
1227f9e1aedcSNeilBrown 	return ret;
12281da177e4SLinus Torvalds }
122965286b88STrond Myklebust 
123065286b88STrond Myklebust int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
123165286b88STrond Myklebust {
123265286b88STrond Myklebust 	if (test_and_set_bit(CACHE_PENDING, &h->flags))
123365286b88STrond Myklebust 		return 0;
123465286b88STrond Myklebust 	return cache_pipe_upcall(detail, h);
123565286b88STrond Myklebust }
1236bc74b4f5STrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
12371da177e4SLinus Torvalds 
123865286b88STrond Myklebust int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
123965286b88STrond Myklebust 				     struct cache_head *h)
124065286b88STrond Myklebust {
124165286b88STrond Myklebust 	if (!cache_listeners_exist(detail)) {
124265286b88STrond Myklebust 		warn_no_listener(detail);
124365286b88STrond Myklebust 		return -EINVAL;
124465286b88STrond Myklebust 	}
124565286b88STrond Myklebust 	return sunrpc_cache_pipe_upcall(detail, h);
124665286b88STrond Myklebust }
124765286b88STrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
124865286b88STrond Myklebust 
12491da177e4SLinus Torvalds /*
12501da177e4SLinus Torvalds  * parse a message from user-space and pass it
12511da177e4SLinus Torvalds  * to an appropriate cache
12521da177e4SLinus Torvalds  * Messages are, like requests, separated into fields by
12531da177e4SLinus Torvalds  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
12541da177e4SLinus Torvalds  *
12551da177e4SLinus Torvalds  * Message is
12561da177e4SLinus Torvalds  *   reply cachename expiry key ... content....
12571da177e4SLinus Torvalds  *
12581da177e4SLinus Torvalds  * key and content are both parsed by cache
12591da177e4SLinus Torvalds  */
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds int qword_get(char **bpp, char *dest, int bufsize)
12621da177e4SLinus Torvalds {
12631da177e4SLinus Torvalds 	/* return bytes copied, or -1 on error */
12641da177e4SLinus Torvalds 	char *bp = *bpp;
12651da177e4SLinus Torvalds 	int len = 0;
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds 	while (*bp == ' ') bp++;
12681da177e4SLinus Torvalds 
12691da177e4SLinus Torvalds 	if (bp[0] == '\\' && bp[1] == 'x') {
12701da177e4SLinus Torvalds 		/* HEX STRING */
12711da177e4SLinus Torvalds 		bp += 2;
1272b7052cd7SStefan Hajnoczi 		while (len < bufsize - 1) {
1273e7f483eaSAndy Shevchenko 			int h, l;
1274e7f483eaSAndy Shevchenko 
1275e7f483eaSAndy Shevchenko 			h = hex_to_bin(bp[0]);
1276e7f483eaSAndy Shevchenko 			if (h < 0)
1277e7f483eaSAndy Shevchenko 				break;
1278e7f483eaSAndy Shevchenko 
1279e7f483eaSAndy Shevchenko 			l = hex_to_bin(bp[1]);
1280e7f483eaSAndy Shevchenko 			if (l < 0)
1281e7f483eaSAndy Shevchenko 				break;
1282e7f483eaSAndy Shevchenko 
1283e7f483eaSAndy Shevchenko 			*dest++ = (h << 4) | l;
1284e7f483eaSAndy Shevchenko 			bp += 2;
12851da177e4SLinus Torvalds 			len++;
12861da177e4SLinus Torvalds 		}
12871da177e4SLinus Torvalds 	} else {
12881da177e4SLinus Torvalds 		/* text with \nnn octal quoting */
12891da177e4SLinus Torvalds 		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
12901da177e4SLinus Torvalds 			if (*bp == '\\' &&
12911da177e4SLinus Torvalds 			    isodigit(bp[1]) && (bp[1] <= '3') &&
12921da177e4SLinus Torvalds 			    isodigit(bp[2]) &&
12931da177e4SLinus Torvalds 			    isodigit(bp[3])) {
12941da177e4SLinus Torvalds 				int byte = (*++bp -'0');
12951da177e4SLinus Torvalds 				bp++;
12961da177e4SLinus Torvalds 				byte = (byte << 3) | (*bp++ - '0');
12971da177e4SLinus Torvalds 				byte = (byte << 3) | (*bp++ - '0');
12981da177e4SLinus Torvalds 				*dest++ = byte;
12991da177e4SLinus Torvalds 				len++;
13001da177e4SLinus Torvalds 			} else {
13011da177e4SLinus Torvalds 				*dest++ = *bp++;
13021da177e4SLinus Torvalds 				len++;
13031da177e4SLinus Torvalds 			}
13041da177e4SLinus Torvalds 		}
13051da177e4SLinus Torvalds 	}
13061da177e4SLinus Torvalds 
13071da177e4SLinus Torvalds 	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
13081da177e4SLinus Torvalds 		return -1;
13091da177e4SLinus Torvalds 	while (*bp == ' ') bp++;
13101da177e4SLinus Torvalds 	*bpp = bp;
13111da177e4SLinus Torvalds 	*dest = '\0';
13121da177e4SLinus Torvalds 	return len;
13131da177e4SLinus Torvalds }
131424c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_get);
13151da177e4SLinus Torvalds 
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds /*
13186489a8f4SKinglong Mee  * support /proc/net/rpc/$CACHENAME/content
13191da177e4SLinus Torvalds  * as a seqfile.
13201da177e4SLinus Torvalds  * We call ->cache_show passing NULL for the item to
13211da177e4SLinus Torvalds  * get a header, then pass each real item in the cache
13221da177e4SLinus Torvalds  */
13231da177e4SLinus Torvalds 
1324ae74136bSTrond Myklebust static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
13251da177e4SLinus Torvalds {
13261da177e4SLinus Torvalds 	loff_t n = *pos;
132795c96174SEric Dumazet 	unsigned int hash, entry;
13281da177e4SLinus Torvalds 	struct cache_head *ch;
13299936f2aeSKinglong Mee 	struct cache_detail *cd = m->private;
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	if (!n--)
13321da177e4SLinus Torvalds 		return SEQ_START_TOKEN;
13331da177e4SLinus Torvalds 	hash = n >> 32;
13341da177e4SLinus Torvalds 	entry = n & ((1LL<<32) - 1);
13351da177e4SLinus Torvalds 
1336ae74136bSTrond Myklebust 	hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
13371da177e4SLinus Torvalds 		if (!entry--)
13381da177e4SLinus Torvalds 			return ch;
13391da177e4SLinus Torvalds 	n &= ~((1LL<<32) - 1);
13401da177e4SLinus Torvalds 	do {
13411da177e4SLinus Torvalds 		hash++;
13421da177e4SLinus Torvalds 		n += 1LL<<32;
13431da177e4SLinus Torvalds 	} while(hash < cd->hash_size &&
1344129e5824SKinglong Mee 		hlist_empty(&cd->hash_table[hash]));
13451da177e4SLinus Torvalds 	if (hash >= cd->hash_size)
13461da177e4SLinus Torvalds 		return NULL;
13471da177e4SLinus Torvalds 	*pos = n+1;
1348ae74136bSTrond Myklebust 	return hlist_entry_safe(rcu_dereference_raw(
1349ae74136bSTrond Myklebust 				hlist_first_rcu(&cd->hash_table[hash])),
1350129e5824SKinglong Mee 				struct cache_head, cache_list);
13511da177e4SLinus Torvalds }
1352ae74136bSTrond Myklebust 
1353d48cf356STrond Myklebust static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
13541da177e4SLinus Torvalds {
13551da177e4SLinus Torvalds 	struct cache_head *ch = p;
13561da177e4SLinus Torvalds 	int hash = (*pos >> 32);
13579936f2aeSKinglong Mee 	struct cache_detail *cd = m->private;
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds 	if (p == SEQ_START_TOKEN)
13601da177e4SLinus Torvalds 		hash = 0;
1361129e5824SKinglong Mee 	else if (ch->cache_list.next == NULL) {
13621da177e4SLinus Torvalds 		hash++;
13631da177e4SLinus Torvalds 		*pos += 1LL<<32;
13641da177e4SLinus Torvalds 	} else {
13651da177e4SLinus Torvalds 		++*pos;
1366ae74136bSTrond Myklebust 		return hlist_entry_safe(rcu_dereference_raw(
1367ae74136bSTrond Myklebust 					hlist_next_rcu(&ch->cache_list)),
1368129e5824SKinglong Mee 					struct cache_head, cache_list);
13691da177e4SLinus Torvalds 	}
13701da177e4SLinus Torvalds 	*pos &= ~((1LL<<32) - 1);
13711da177e4SLinus Torvalds 	while (hash < cd->hash_size &&
1372129e5824SKinglong Mee 	       hlist_empty(&cd->hash_table[hash])) {
13731da177e4SLinus Torvalds 		hash++;
13741da177e4SLinus Torvalds 		*pos += 1LL<<32;
13751da177e4SLinus Torvalds 	}
13761da177e4SLinus Torvalds 	if (hash >= cd->hash_size)
13771da177e4SLinus Torvalds 		return NULL;
13781da177e4SLinus Torvalds 	++*pos;
1379ae74136bSTrond Myklebust 	return hlist_entry_safe(rcu_dereference_raw(
1380ae74136bSTrond Myklebust 				hlist_first_rcu(&cd->hash_table[hash])),
1381129e5824SKinglong Mee 				struct cache_head, cache_list);
13821da177e4SLinus Torvalds }
13831da177e4SLinus Torvalds 
1384ae74136bSTrond Myklebust void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1385ae74136bSTrond Myklebust 	__acquires(RCU)
1386ae74136bSTrond Myklebust {
1387ae74136bSTrond Myklebust 	rcu_read_lock();
1388ae74136bSTrond Myklebust 	return __cache_seq_start(m, pos);
1389ae74136bSTrond Myklebust }
1390ae74136bSTrond Myklebust EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1391ae74136bSTrond Myklebust 
1392ae74136bSTrond Myklebust void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1393ae74136bSTrond Myklebust {
1394ae74136bSTrond Myklebust 	return cache_seq_next(file, p, pos);
1395ae74136bSTrond Myklebust }
1396ae74136bSTrond Myklebust EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1397ae74136bSTrond Myklebust 
1398ae74136bSTrond Myklebust void cache_seq_stop_rcu(struct seq_file *m, void *p)
1399ae74136bSTrond Myklebust 	__releases(RCU)
1400ae74136bSTrond Myklebust {
1401ae74136bSTrond Myklebust 	rcu_read_unlock();
1402ae74136bSTrond Myklebust }
1403ae74136bSTrond Myklebust EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1404ae74136bSTrond Myklebust 
14051da177e4SLinus Torvalds static int c_show(struct seq_file *m, void *p)
14061da177e4SLinus Torvalds {
14071da177e4SLinus Torvalds 	struct cache_head *cp = p;
14089936f2aeSKinglong Mee 	struct cache_detail *cd = m->private;
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds 	if (p == SEQ_START_TOKEN)
14111da177e4SLinus Torvalds 		return cd->cache_show(m, cd, NULL);
14121da177e4SLinus Torvalds 
14131da177e4SLinus Torvalds 	ifdebug(CACHE)
1414f559935eSArnd Bergmann 		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1415c5b29f88SNeilBrown 			   convert_to_wallclock(cp->expiry_time),
14162c935bc5SPeter Zijlstra 			   kref_read(&cp->ref), cp->flags);
14171da177e4SLinus Torvalds 	cache_get(cp);
14181da177e4SLinus Torvalds 	if (cache_check(cd, cp, NULL))
14191da177e4SLinus Torvalds 		/* cache_check does a cache_put on failure */
14201da177e4SLinus Torvalds 		seq_printf(m, "# ");
1421200724a7SNeilBrown 	else {
1422200724a7SNeilBrown 		if (cache_is_expired(cd, cp))
1423200724a7SNeilBrown 			seq_printf(m, "# ");
14241da177e4SLinus Torvalds 		cache_put(cp, cd);
1425200724a7SNeilBrown 	}
14261da177e4SLinus Torvalds 
14271da177e4SLinus Torvalds 	return cd->cache_show(m, cd, cp);
14281da177e4SLinus Torvalds }
14291da177e4SLinus Torvalds 
143056b3d975SPhilippe De Muyter static const struct seq_operations cache_content_op = {
1431d48cf356STrond Myklebust 	.start	= cache_seq_start_rcu,
1432d48cf356STrond Myklebust 	.next	= cache_seq_next_rcu,
1433d48cf356STrond Myklebust 	.stop	= cache_seq_stop_rcu,
14341da177e4SLinus Torvalds 	.show	= c_show,
14351da177e4SLinus Torvalds };
14361da177e4SLinus Torvalds 
1437173912a6STrond Myklebust static int content_open(struct inode *inode, struct file *file,
1438173912a6STrond Myklebust 			struct cache_detail *cd)
14391da177e4SLinus Torvalds {
14409936f2aeSKinglong Mee 	struct seq_file *seq;
14419936f2aeSKinglong Mee 	int err;
14421da177e4SLinus Torvalds 
1443f7e86ab9STrond Myklebust 	if (!cd || !try_module_get(cd->owner))
1444f7e86ab9STrond Myklebust 		return -EACCES;
14459936f2aeSKinglong Mee 
14469936f2aeSKinglong Mee 	err = seq_open(file, &cache_content_op);
14479936f2aeSKinglong Mee 	if (err) {
1448a5990ea1SLi Zefan 		module_put(cd->owner);
14499936f2aeSKinglong Mee 		return err;
1450a5990ea1SLi Zefan 	}
14511da177e4SLinus Torvalds 
14529936f2aeSKinglong Mee 	seq = file->private_data;
14539936f2aeSKinglong Mee 	seq->private = cd;
1454ec931035SPavel Emelyanov 	return 0;
14551da177e4SLinus Torvalds }
14561da177e4SLinus Torvalds 
1457f7e86ab9STrond Myklebust static int content_release(struct inode *inode, struct file *file,
1458f7e86ab9STrond Myklebust 		struct cache_detail *cd)
1459f7e86ab9STrond Myklebust {
14609936f2aeSKinglong Mee 	int ret = seq_release(inode, file);
1461f7e86ab9STrond Myklebust 	module_put(cd->owner);
1462f7e86ab9STrond Myklebust 	return ret;
1463f7e86ab9STrond Myklebust }
1464f7e86ab9STrond Myklebust 
1465f7e86ab9STrond Myklebust static int open_flush(struct inode *inode, struct file *file,
1466f7e86ab9STrond Myklebust 			struct cache_detail *cd)
1467f7e86ab9STrond Myklebust {
1468f7e86ab9STrond Myklebust 	if (!cd || !try_module_get(cd->owner))
1469f7e86ab9STrond Myklebust 		return -EACCES;
1470f7e86ab9STrond Myklebust 	return nonseekable_open(inode, file);
1471f7e86ab9STrond Myklebust }
1472f7e86ab9STrond Myklebust 
1473f7e86ab9STrond Myklebust static int release_flush(struct inode *inode, struct file *file,
1474f7e86ab9STrond Myklebust 			struct cache_detail *cd)
1475f7e86ab9STrond Myklebust {
1476f7e86ab9STrond Myklebust 	module_put(cd->owner);
1477f7e86ab9STrond Myklebust 	return 0;
1478f7e86ab9STrond Myklebust }
14791da177e4SLinus Torvalds 
14801da177e4SLinus Torvalds static ssize_t read_flush(struct file *file, char __user *buf,
1481173912a6STrond Myklebust 			  size_t count, loff_t *ppos,
1482173912a6STrond Myklebust 			  struct cache_detail *cd)
14831da177e4SLinus Torvalds {
1484212ba906SSasha Levin 	char tbuf[22];
148501b2969aSChuck Lever 	size_t len;
14861da177e4SLinus Torvalds 
1487f559935eSArnd Bergmann 	len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
14888ccc8691SKinglong Mee 			convert_to_wallclock(cd->flush_time));
14898ccc8691SKinglong Mee 	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
14901da177e4SLinus Torvalds }
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds static ssize_t write_flush(struct file *file, const char __user *buf,
1493173912a6STrond Myklebust 			   size_t count, loff_t *ppos,
1494173912a6STrond Myklebust 			   struct cache_detail *cd)
14951da177e4SLinus Torvalds {
14961da177e4SLinus Torvalds 	char tbuf[20];
14973b68e6eeSNeilBrown 	char *ep;
1498f559935eSArnd Bergmann 	time64_t now;
1499c5b29f88SNeilBrown 
15001da177e4SLinus Torvalds 	if (*ppos || count > sizeof(tbuf)-1)
15011da177e4SLinus Torvalds 		return -EINVAL;
15021da177e4SLinus Torvalds 	if (copy_from_user(tbuf, buf, count))
15031da177e4SLinus Torvalds 		return -EFAULT;
15041da177e4SLinus Torvalds 	tbuf[count] = 0;
1505c5b29f88SNeilBrown 	simple_strtoul(tbuf, &ep, 0);
15061da177e4SLinus Torvalds 	if (*ep && *ep != '\n')
15071da177e4SLinus Torvalds 		return -EINVAL;
15083b68e6eeSNeilBrown 	/* Note that while we check that 'buf' holds a valid number,
15093b68e6eeSNeilBrown 	 * we always ignore the value and just flush everything.
15103b68e6eeSNeilBrown 	 * Making use of the number leads to races.
151177862036SNeil Brown 	 */
15123b68e6eeSNeilBrown 
15133b68e6eeSNeilBrown 	now = seconds_since_boot();
15143b68e6eeSNeilBrown 	/* Always flush everything, so behave like cache_purge()
15153b68e6eeSNeilBrown 	 * Do this by advancing flush_time to the current time,
15163b68e6eeSNeilBrown 	 * or by one second if it has already reached the current time.
15173b68e6eeSNeilBrown 	 * Newly added cache entries will always have ->last_refresh greater
15183b68e6eeSNeilBrown 	 * that ->flush_time, so they don't get flushed prematurely.
15193b68e6eeSNeilBrown 	 */
15203b68e6eeSNeilBrown 
152177862036SNeil Brown 	if (cd->flush_time >= now)
152277862036SNeil Brown 		now = cd->flush_time + 1;
152377862036SNeil Brown 
15243b68e6eeSNeilBrown 	cd->flush_time = now;
15253b68e6eeSNeilBrown 	cd->nextcheck = now;
15261da177e4SLinus Torvalds 	cache_flush();
15271da177e4SLinus Torvalds 
1528f69d6d8eSJeff Layton 	if (cd->flush)
1529f69d6d8eSJeff Layton 		cd->flush();
1530f69d6d8eSJeff Layton 
15311da177e4SLinus Torvalds 	*ppos += count;
15321da177e4SLinus Torvalds 	return count;
15331da177e4SLinus Torvalds }
15341da177e4SLinus Torvalds 
1535173912a6STrond Myklebust static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1536173912a6STrond Myklebust 				 size_t count, loff_t *ppos)
1537173912a6STrond Myklebust {
1538d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1539173912a6STrond Myklebust 
1540173912a6STrond Myklebust 	return cache_read(filp, buf, count, ppos, cd);
1541173912a6STrond Myklebust }
1542173912a6STrond Myklebust 
1543173912a6STrond Myklebust static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1544173912a6STrond Myklebust 				  size_t count, loff_t *ppos)
1545173912a6STrond Myklebust {
1546d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1547173912a6STrond Myklebust 
1548173912a6STrond Myklebust 	return cache_write(filp, buf, count, ppos, cd);
1549173912a6STrond Myklebust }
1550173912a6STrond Myklebust 
1551ade994f4SAl Viro static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1552173912a6STrond Myklebust {
1553d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1554173912a6STrond Myklebust 
1555173912a6STrond Myklebust 	return cache_poll(filp, wait, cd);
1556173912a6STrond Myklebust }
1557173912a6STrond Myklebust 
1558d79b6f4dSFrederic Weisbecker static long cache_ioctl_procfs(struct file *filp,
1559173912a6STrond Myklebust 			       unsigned int cmd, unsigned long arg)
1560173912a6STrond Myklebust {
1561496ad9aaSAl Viro 	struct inode *inode = file_inode(filp);
1562d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1563173912a6STrond Myklebust 
1564a6f8dbc6SArnd Bergmann 	return cache_ioctl(inode, filp, cmd, arg, cd);
1565173912a6STrond Myklebust }
1566173912a6STrond Myklebust 
1567173912a6STrond Myklebust static int cache_open_procfs(struct inode *inode, struct file *filp)
1568173912a6STrond Myklebust {
1569d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1570173912a6STrond Myklebust 
1571173912a6STrond Myklebust 	return cache_open(inode, filp, cd);
1572173912a6STrond Myklebust }
1573173912a6STrond Myklebust 
1574173912a6STrond Myklebust static int cache_release_procfs(struct inode *inode, struct file *filp)
1575173912a6STrond Myklebust {
1576d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1577173912a6STrond Myklebust 
1578173912a6STrond Myklebust 	return cache_release(inode, filp, cd);
1579173912a6STrond Myklebust }
1580173912a6STrond Myklebust 
158197a32539SAlexey Dobriyan static const struct proc_ops cache_channel_proc_ops = {
158297a32539SAlexey Dobriyan 	.proc_lseek	= no_llseek,
158397a32539SAlexey Dobriyan 	.proc_read	= cache_read_procfs,
158497a32539SAlexey Dobriyan 	.proc_write	= cache_write_procfs,
158597a32539SAlexey Dobriyan 	.proc_poll	= cache_poll_procfs,
158697a32539SAlexey Dobriyan 	.proc_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
158797a32539SAlexey Dobriyan 	.proc_open	= cache_open_procfs,
158897a32539SAlexey Dobriyan 	.proc_release	= cache_release_procfs,
15891da177e4SLinus Torvalds };
1590173912a6STrond Myklebust 
1591173912a6STrond Myklebust static int content_open_procfs(struct inode *inode, struct file *filp)
1592173912a6STrond Myklebust {
1593d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1594173912a6STrond Myklebust 
1595173912a6STrond Myklebust 	return content_open(inode, filp, cd);
1596173912a6STrond Myklebust }
1597173912a6STrond Myklebust 
1598f7e86ab9STrond Myklebust static int content_release_procfs(struct inode *inode, struct file *filp)
1599f7e86ab9STrond Myklebust {
1600d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1601f7e86ab9STrond Myklebust 
1602f7e86ab9STrond Myklebust 	return content_release(inode, filp, cd);
1603f7e86ab9STrond Myklebust }
1604f7e86ab9STrond Myklebust 
160597a32539SAlexey Dobriyan static const struct proc_ops content_proc_ops = {
160697a32539SAlexey Dobriyan 	.proc_open	= content_open_procfs,
160797a32539SAlexey Dobriyan 	.proc_read	= seq_read,
160897a32539SAlexey Dobriyan 	.proc_lseek	= seq_lseek,
160997a32539SAlexey Dobriyan 	.proc_release	= content_release_procfs,
1610173912a6STrond Myklebust };
1611173912a6STrond Myklebust 
1612f7e86ab9STrond Myklebust static int open_flush_procfs(struct inode *inode, struct file *filp)
1613f7e86ab9STrond Myklebust {
1614d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1615f7e86ab9STrond Myklebust 
1616f7e86ab9STrond Myklebust 	return open_flush(inode, filp, cd);
1617f7e86ab9STrond Myklebust }
1618f7e86ab9STrond Myklebust 
1619f7e86ab9STrond Myklebust static int release_flush_procfs(struct inode *inode, struct file *filp)
1620f7e86ab9STrond Myklebust {
1621d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(inode);
1622f7e86ab9STrond Myklebust 
1623f7e86ab9STrond Myklebust 	return release_flush(inode, filp, cd);
1624f7e86ab9STrond Myklebust }
1625f7e86ab9STrond Myklebust 
1626173912a6STrond Myklebust static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1627173912a6STrond Myklebust 			    size_t count, loff_t *ppos)
1628173912a6STrond Myklebust {
1629d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1630173912a6STrond Myklebust 
1631173912a6STrond Myklebust 	return read_flush(filp, buf, count, ppos, cd);
1632173912a6STrond Myklebust }
1633173912a6STrond Myklebust 
1634173912a6STrond Myklebust static ssize_t write_flush_procfs(struct file *filp,
1635173912a6STrond Myklebust 				  const char __user *buf,
1636173912a6STrond Myklebust 				  size_t count, loff_t *ppos)
1637173912a6STrond Myklebust {
1638d9dda78bSAl Viro 	struct cache_detail *cd = PDE_DATA(file_inode(filp));
1639173912a6STrond Myklebust 
1640173912a6STrond Myklebust 	return write_flush(filp, buf, count, ppos, cd);
1641173912a6STrond Myklebust }
1642173912a6STrond Myklebust 
164397a32539SAlexey Dobriyan static const struct proc_ops cache_flush_proc_ops = {
164497a32539SAlexey Dobriyan 	.proc_open	= open_flush_procfs,
164597a32539SAlexey Dobriyan 	.proc_read	= read_flush_procfs,
164697a32539SAlexey Dobriyan 	.proc_write	= write_flush_procfs,
164797a32539SAlexey Dobriyan 	.proc_release	= release_flush_procfs,
164897a32539SAlexey Dobriyan 	.proc_lseek	= no_llseek,
1649173912a6STrond Myklebust };
1650173912a6STrond Myklebust 
1651863d7d9cSKinglong Mee static void remove_cache_proc_entries(struct cache_detail *cd)
1652173912a6STrond Myklebust {
1653863d7d9cSKinglong Mee 	if (cd->procfs) {
1654863d7d9cSKinglong Mee 		proc_remove(cd->procfs);
1655863d7d9cSKinglong Mee 		cd->procfs = NULL;
1656863d7d9cSKinglong Mee 	}
1657173912a6STrond Myklebust }
1658173912a6STrond Myklebust 
1659173912a6STrond Myklebust #ifdef CONFIG_PROC_FS
1660593ce16bSPavel Emelyanov static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1661173912a6STrond Myklebust {
1662173912a6STrond Myklebust 	struct proc_dir_entry *p;
16634f42d0d5SPavel Emelyanov 	struct sunrpc_net *sn;
1664173912a6STrond Myklebust 
16654f42d0d5SPavel Emelyanov 	sn = net_generic(net, sunrpc_net_id);
1666863d7d9cSKinglong Mee 	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1667863d7d9cSKinglong Mee 	if (cd->procfs == NULL)
1668173912a6STrond Myklebust 		goto out_nomem;
1669173912a6STrond Myklebust 
1670d6444062SJoe Perches 	p = proc_create_data("flush", S_IFREG | 0600,
167197a32539SAlexey Dobriyan 			     cd->procfs, &cache_flush_proc_ops, cd);
1672173912a6STrond Myklebust 	if (p == NULL)
1673173912a6STrond Myklebust 		goto out_nomem;
1674173912a6STrond Myklebust 
16752d438338SStanislav Kinsbursky 	if (cd->cache_request || cd->cache_parse) {
1676d6444062SJoe Perches 		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
167797a32539SAlexey Dobriyan 				     &cache_channel_proc_ops, cd);
1678173912a6STrond Myklebust 		if (p == NULL)
1679173912a6STrond Myklebust 			goto out_nomem;
1680173912a6STrond Myklebust 	}
1681173912a6STrond Myklebust 	if (cd->cache_show) {
1682d6444062SJoe Perches 		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
168397a32539SAlexey Dobriyan 				     &content_proc_ops, cd);
1684173912a6STrond Myklebust 		if (p == NULL)
1685173912a6STrond Myklebust 			goto out_nomem;
1686173912a6STrond Myklebust 	}
1687173912a6STrond Myklebust 	return 0;
1688173912a6STrond Myklebust out_nomem:
1689863d7d9cSKinglong Mee 	remove_cache_proc_entries(cd);
1690173912a6STrond Myklebust 	return -ENOMEM;
1691173912a6STrond Myklebust }
1692173912a6STrond Myklebust #else /* CONFIG_PROC_FS */
1693593ce16bSPavel Emelyanov static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1694173912a6STrond Myklebust {
1695173912a6STrond Myklebust 	return 0;
1696173912a6STrond Myklebust }
1697173912a6STrond Myklebust #endif
1698173912a6STrond Myklebust 
16998eab945cSArtem Bityutskiy void __init cache_initialize(void)
17008eab945cSArtem Bityutskiy {
1701203b42f7STejun Heo 	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
17028eab945cSArtem Bityutskiy }
17038eab945cSArtem Bityutskiy 
1704593ce16bSPavel Emelyanov int cache_register_net(struct cache_detail *cd, struct net *net)
1705173912a6STrond Myklebust {
1706173912a6STrond Myklebust 	int ret;
1707173912a6STrond Myklebust 
1708173912a6STrond Myklebust 	sunrpc_init_cache_detail(cd);
1709593ce16bSPavel Emelyanov 	ret = create_cache_proc_entries(cd, net);
1710173912a6STrond Myklebust 	if (ret)
1711173912a6STrond Myklebust 		sunrpc_destroy_cache_detail(cd);
1712173912a6STrond Myklebust 	return ret;
1713173912a6STrond Myklebust }
1714f5c8593bSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_register_net);
1715593ce16bSPavel Emelyanov 
1716593ce16bSPavel Emelyanov void cache_unregister_net(struct cache_detail *cd, struct net *net)
1717593ce16bSPavel Emelyanov {
1718863d7d9cSKinglong Mee 	remove_cache_proc_entries(cd);
1719593ce16bSPavel Emelyanov 	sunrpc_destroy_cache_detail(cd);
1720593ce16bSPavel Emelyanov }
1721f5c8593bSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_unregister_net);
1722593ce16bSPavel Emelyanov 
1723d34971a6SBhumika Goyal struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1724173912a6STrond Myklebust {
17250a402d5aSStanislav Kinsbursky 	struct cache_detail *cd;
1726129e5824SKinglong Mee 	int i;
17270a402d5aSStanislav Kinsbursky 
17280a402d5aSStanislav Kinsbursky 	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
17290a402d5aSStanislav Kinsbursky 	if (cd == NULL)
17300a402d5aSStanislav Kinsbursky 		return ERR_PTR(-ENOMEM);
17310a402d5aSStanislav Kinsbursky 
17326396bb22SKees Cook 	cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
17330a402d5aSStanislav Kinsbursky 				 GFP_KERNEL);
17340a402d5aSStanislav Kinsbursky 	if (cd->hash_table == NULL) {
17350a402d5aSStanislav Kinsbursky 		kfree(cd);
17360a402d5aSStanislav Kinsbursky 		return ERR_PTR(-ENOMEM);
1737173912a6STrond Myklebust 	}
1738129e5824SKinglong Mee 
1739129e5824SKinglong Mee 	for (i = 0; i < cd->hash_size; i++)
1740129e5824SKinglong Mee 		INIT_HLIST_HEAD(&cd->hash_table[i]);
17410a402d5aSStanislav Kinsbursky 	cd->net = net;
17420a402d5aSStanislav Kinsbursky 	return cd;
17430a402d5aSStanislav Kinsbursky }
17440a402d5aSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_create_net);
17450a402d5aSStanislav Kinsbursky 
17460a402d5aSStanislav Kinsbursky void cache_destroy_net(struct cache_detail *cd, struct net *net)
17470a402d5aSStanislav Kinsbursky {
17480a402d5aSStanislav Kinsbursky 	kfree(cd->hash_table);
17490a402d5aSStanislav Kinsbursky 	kfree(cd);
17500a402d5aSStanislav Kinsbursky }
17510a402d5aSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_destroy_net);
17528854e82dSTrond Myklebust 
17538854e82dSTrond Myklebust static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
17548854e82dSTrond Myklebust 				 size_t count, loff_t *ppos)
17558854e82dSTrond Myklebust {
1756496ad9aaSAl Viro 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
17578854e82dSTrond Myklebust 
17588854e82dSTrond Myklebust 	return cache_read(filp, buf, count, ppos, cd);
17598854e82dSTrond Myklebust }
17608854e82dSTrond Myklebust 
17618854e82dSTrond Myklebust static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
17628854e82dSTrond Myklebust 				  size_t count, loff_t *ppos)
17638854e82dSTrond Myklebust {
1764496ad9aaSAl Viro 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
17658854e82dSTrond Myklebust 
17668854e82dSTrond Myklebust 	return cache_write(filp, buf, count, ppos, cd);
17678854e82dSTrond Myklebust }
17688854e82dSTrond Myklebust 
1769ade994f4SAl Viro static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
17708854e82dSTrond Myklebust {
1771496ad9aaSAl Viro 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
17728854e82dSTrond Myklebust 
17738854e82dSTrond Myklebust 	return cache_poll(filp, wait, cd);
17748854e82dSTrond Myklebust }
17758854e82dSTrond Myklebust 
17769918ff26SFrederic Weisbecker static long cache_ioctl_pipefs(struct file *filp,
17778854e82dSTrond Myklebust 			      unsigned int cmd, unsigned long arg)
17788854e82dSTrond Myklebust {
1779496ad9aaSAl Viro 	struct inode *inode = file_inode(filp);
17808854e82dSTrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
17818854e82dSTrond Myklebust 
1782a6f8dbc6SArnd Bergmann 	return cache_ioctl(inode, filp, cmd, arg, cd);
17838854e82dSTrond Myklebust }
17848854e82dSTrond Myklebust 
17858854e82dSTrond Myklebust static int cache_open_pipefs(struct inode *inode, struct file *filp)
17868854e82dSTrond Myklebust {
17878854e82dSTrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
17888854e82dSTrond Myklebust 
17898854e82dSTrond Myklebust 	return cache_open(inode, filp, cd);
17908854e82dSTrond Myklebust }
17918854e82dSTrond Myklebust 
17928854e82dSTrond Myklebust static int cache_release_pipefs(struct inode *inode, struct file *filp)
17938854e82dSTrond Myklebust {
17948854e82dSTrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
17958854e82dSTrond Myklebust 
17968854e82dSTrond Myklebust 	return cache_release(inode, filp, cd);
17978854e82dSTrond Myklebust }
17988854e82dSTrond Myklebust 
17998854e82dSTrond Myklebust const struct file_operations cache_file_operations_pipefs = {
18008854e82dSTrond Myklebust 	.owner		= THIS_MODULE,
18018854e82dSTrond Myklebust 	.llseek		= no_llseek,
18028854e82dSTrond Myklebust 	.read		= cache_read_pipefs,
18038854e82dSTrond Myklebust 	.write		= cache_write_pipefs,
18048854e82dSTrond Myklebust 	.poll		= cache_poll_pipefs,
18059918ff26SFrederic Weisbecker 	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
18068854e82dSTrond Myklebust 	.open		= cache_open_pipefs,
18078854e82dSTrond Myklebust 	.release	= cache_release_pipefs,
18088854e82dSTrond Myklebust };
18098854e82dSTrond Myklebust 
18108854e82dSTrond Myklebust static int content_open_pipefs(struct inode *inode, struct file *filp)
18118854e82dSTrond Myklebust {
18128854e82dSTrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
18138854e82dSTrond Myklebust 
18148854e82dSTrond Myklebust 	return content_open(inode, filp, cd);
18158854e82dSTrond Myklebust }
18168854e82dSTrond Myklebust 
1817f7e86ab9STrond Myklebust static int content_release_pipefs(struct inode *inode, struct file *filp)
1818f7e86ab9STrond Myklebust {
1819f7e86ab9STrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
1820f7e86ab9STrond Myklebust 
1821f7e86ab9STrond Myklebust 	return content_release(inode, filp, cd);
1822f7e86ab9STrond Myklebust }
1823f7e86ab9STrond Myklebust 
18248854e82dSTrond Myklebust const struct file_operations content_file_operations_pipefs = {
18258854e82dSTrond Myklebust 	.open		= content_open_pipefs,
18268854e82dSTrond Myklebust 	.read		= seq_read,
18278854e82dSTrond Myklebust 	.llseek		= seq_lseek,
1828f7e86ab9STrond Myklebust 	.release	= content_release_pipefs,
18298854e82dSTrond Myklebust };
18308854e82dSTrond Myklebust 
1831f7e86ab9STrond Myklebust static int open_flush_pipefs(struct inode *inode, struct file *filp)
1832f7e86ab9STrond Myklebust {
1833f7e86ab9STrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
1834f7e86ab9STrond Myklebust 
1835f7e86ab9STrond Myklebust 	return open_flush(inode, filp, cd);
1836f7e86ab9STrond Myklebust }
1837f7e86ab9STrond Myklebust 
1838f7e86ab9STrond Myklebust static int release_flush_pipefs(struct inode *inode, struct file *filp)
1839f7e86ab9STrond Myklebust {
1840f7e86ab9STrond Myklebust 	struct cache_detail *cd = RPC_I(inode)->private;
1841f7e86ab9STrond Myklebust 
1842f7e86ab9STrond Myklebust 	return release_flush(inode, filp, cd);
1843f7e86ab9STrond Myklebust }
1844f7e86ab9STrond Myklebust 
18458854e82dSTrond Myklebust static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
18468854e82dSTrond Myklebust 			    size_t count, loff_t *ppos)
18478854e82dSTrond Myklebust {
1848496ad9aaSAl Viro 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
18498854e82dSTrond Myklebust 
18508854e82dSTrond Myklebust 	return read_flush(filp, buf, count, ppos, cd);
18518854e82dSTrond Myklebust }
18528854e82dSTrond Myklebust 
18538854e82dSTrond Myklebust static ssize_t write_flush_pipefs(struct file *filp,
18548854e82dSTrond Myklebust 				  const char __user *buf,
18558854e82dSTrond Myklebust 				  size_t count, loff_t *ppos)
18568854e82dSTrond Myklebust {
1857496ad9aaSAl Viro 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
18588854e82dSTrond Myklebust 
18598854e82dSTrond Myklebust 	return write_flush(filp, buf, count, ppos, cd);
18608854e82dSTrond Myklebust }
18618854e82dSTrond Myklebust 
18628854e82dSTrond Myklebust const struct file_operations cache_flush_operations_pipefs = {
1863f7e86ab9STrond Myklebust 	.open		= open_flush_pipefs,
18648854e82dSTrond Myklebust 	.read		= read_flush_pipefs,
18658854e82dSTrond Myklebust 	.write		= write_flush_pipefs,
1866f7e86ab9STrond Myklebust 	.release	= release_flush_pipefs,
18676038f373SArnd Bergmann 	.llseek		= no_llseek,
18688854e82dSTrond Myklebust };
18698854e82dSTrond Myklebust 
18708854e82dSTrond Myklebust int sunrpc_cache_register_pipefs(struct dentry *parent,
187164f1426fSAl Viro 				 const char *name, umode_t umode,
18728854e82dSTrond Myklebust 				 struct cache_detail *cd)
18738854e82dSTrond Myklebust {
1874a95e691fSAl Viro 	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1875a95e691fSAl Viro 	if (IS_ERR(dir))
1876a95e691fSAl Viro 		return PTR_ERR(dir);
1877863d7d9cSKinglong Mee 	cd->pipefs = dir;
1878a95e691fSAl Viro 	return 0;
18798854e82dSTrond Myklebust }
18808854e82dSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
18818854e82dSTrond Myklebust 
18828854e82dSTrond Myklebust void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
18838854e82dSTrond Myklebust {
1884863d7d9cSKinglong Mee 	if (cd->pipefs) {
1885863d7d9cSKinglong Mee 		rpc_remove_cache_dir(cd->pipefs);
1886863d7d9cSKinglong Mee 		cd->pipefs = NULL;
1887863d7d9cSKinglong Mee 	}
18888854e82dSTrond Myklebust }
18898854e82dSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
18908854e82dSTrond Myklebust 
18912b477c00SNeil Brown void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
18922b477c00SNeil Brown {
18931863d77fSTrond Myklebust 	spin_lock(&cd->hash_lock);
18942b477c00SNeil Brown 	if (!hlist_unhashed(&h->cache_list)){
1895809fe3c5STrond Myklebust 		sunrpc_begin_cache_remove_entry(h, cd);
18961863d77fSTrond Myklebust 		spin_unlock(&cd->hash_lock);
1897809fe3c5STrond Myklebust 		sunrpc_end_cache_remove_entry(h, cd);
18982b477c00SNeil Brown 	} else
18991863d77fSTrond Myklebust 		spin_unlock(&cd->hash_lock);
19002b477c00SNeil Brown }
19012b477c00SNeil Brown EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1902