xref: /linux/net/sunrpc/cache.c (revision 5394eea106517d5b0d4a372f00e63d5db8cb0370)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sunrpc/cache.c
4  *
5  * Generic code for various authentication-related caches
6  * used by sunrpc clients and servers.
7  *
8  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9  */
10 
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/string_helpers.h>
22 #include <linux/uaccess.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/net.h>
27 #include <linux/workqueue.h>
28 #include <linux/mutex.h>
29 #include <linux/pagemap.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
35 #include <trace/events/sunrpc.h>
36 
37 #include "netns.h"
38 #include "fail.h"
39 
40 #define	 RPCDBG_FACILITY RPCDBG_CACHE
41 
42 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
43 static void cache_revisit_request(struct cache_head *item);
44 
cache_init(struct cache_head * h,struct cache_detail * detail)45 static void cache_init(struct cache_head *h, struct cache_detail *detail)
46 {
47 	time64_t now = seconds_since_boot();
48 	INIT_HLIST_NODE(&h->cache_list);
49 	h->flags = 0;
50 	kref_init(&h->ref);
51 	h->expiry_time = now + CACHE_NEW_EXPIRY;
52 	if (now <= detail->flush_time)
53 		/* ensure it isn't already expired */
54 		now = detail->flush_time + 1;
55 	h->last_refresh = now;
56 }
57 
58 static void cache_fresh_unlocked(struct cache_head *head,
59 				struct cache_detail *detail);
60 
sunrpc_cache_find_rcu(struct cache_detail * detail,struct cache_head * key,int hash)61 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
62 						struct cache_head *key,
63 						int hash)
64 {
65 	struct hlist_head *head = &detail->hash_table[hash];
66 	struct cache_head *tmp;
67 
68 	rcu_read_lock();
69 	hlist_for_each_entry_rcu(tmp, head, cache_list) {
70 		if (!detail->match(tmp, key))
71 			continue;
72 		if (test_bit(CACHE_VALID, &tmp->flags) &&
73 		    cache_is_expired(detail, tmp))
74 			continue;
75 		tmp = cache_get_rcu(tmp);
76 		rcu_read_unlock();
77 		return tmp;
78 	}
79 	rcu_read_unlock();
80 	return NULL;
81 }
82 
sunrpc_begin_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)83 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
84 					    struct cache_detail *cd)
85 {
86 	/* Must be called under cd->hash_lock */
87 	hlist_del_init_rcu(&ch->cache_list);
88 	set_bit(CACHE_CLEANED, &ch->flags);
89 	cd->entries --;
90 }
91 
sunrpc_end_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)92 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
93 					  struct cache_detail *cd)
94 {
95 	cache_fresh_unlocked(ch, cd);
96 	cache_put(ch, cd);
97 }
98 
sunrpc_cache_add_entry(struct cache_detail * detail,struct cache_head * key,int hash)99 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
100 						 struct cache_head *key,
101 						 int hash)
102 {
103 	struct cache_head *new, *tmp, *freeme = NULL;
104 	struct hlist_head *head = &detail->hash_table[hash];
105 
106 	new = detail->alloc();
107 	if (!new)
108 		return NULL;
109 	/* must fully initialise 'new', else
110 	 * we might get lose if we need to
111 	 * cache_put it soon.
112 	 */
113 	cache_init(new, detail);
114 	detail->init(new, key);
115 
116 	spin_lock(&detail->hash_lock);
117 
118 	/* check if entry appeared while we slept */
119 	hlist_for_each_entry_rcu(tmp, head, cache_list,
120 				 lockdep_is_held(&detail->hash_lock)) {
121 		if (!detail->match(tmp, key))
122 			continue;
123 		if (test_bit(CACHE_VALID, &tmp->flags) &&
124 		    cache_is_expired(detail, tmp)) {
125 			sunrpc_begin_cache_remove_entry(tmp, detail);
126 			trace_cache_entry_expired(detail, tmp);
127 			freeme = tmp;
128 			break;
129 		}
130 		cache_get(tmp);
131 		spin_unlock(&detail->hash_lock);
132 		cache_put(new, detail);
133 		return tmp;
134 	}
135 
136 	hlist_add_head_rcu(&new->cache_list, head);
137 	detail->entries++;
138 	cache_get(new);
139 	spin_unlock(&detail->hash_lock);
140 
141 	if (freeme)
142 		sunrpc_end_cache_remove_entry(freeme, detail);
143 	return new;
144 }
145 
sunrpc_cache_lookup_rcu(struct cache_detail * detail,struct cache_head * key,int hash)146 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
147 					   struct cache_head *key, int hash)
148 {
149 	struct cache_head *ret;
150 
151 	ret = sunrpc_cache_find_rcu(detail, key, hash);
152 	if (ret)
153 		return ret;
154 	/* Didn't find anything, insert an empty entry */
155 	return sunrpc_cache_add_entry(detail, key, hash);
156 }
157 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
158 
159 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
160 
cache_fresh_locked(struct cache_head * head,time64_t expiry,struct cache_detail * detail)161 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
162 			       struct cache_detail *detail)
163 {
164 	time64_t now = seconds_since_boot();
165 	if (now <= detail->flush_time)
166 		/* ensure it isn't immediately treated as expired */
167 		now = detail->flush_time + 1;
168 	head->expiry_time = expiry;
169 	head->last_refresh = now;
170 	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
171 	set_bit(CACHE_VALID, &head->flags);
172 }
173 
cache_fresh_unlocked(struct cache_head * head,struct cache_detail * detail)174 static void cache_fresh_unlocked(struct cache_head *head,
175 				 struct cache_detail *detail)
176 {
177 	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
178 		cache_revisit_request(head);
179 		cache_dequeue(detail, head);
180 	}
181 }
182 
cache_make_negative(struct cache_detail * detail,struct cache_head * h)183 static void cache_make_negative(struct cache_detail *detail,
184 				struct cache_head *h)
185 {
186 	set_bit(CACHE_NEGATIVE, &h->flags);
187 	trace_cache_entry_make_negative(detail, h);
188 }
189 
cache_entry_update(struct cache_detail * detail,struct cache_head * h,struct cache_head * new)190 static void cache_entry_update(struct cache_detail *detail,
191 			       struct cache_head *h,
192 			       struct cache_head *new)
193 {
194 	if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
195 		detail->update(h, new);
196 		trace_cache_entry_update(detail, h);
197 	} else {
198 		cache_make_negative(detail, h);
199 	}
200 }
201 
sunrpc_cache_update(struct cache_detail * detail,struct cache_head * new,struct cache_head * old,int hash)202 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
203 				       struct cache_head *new, struct cache_head *old, int hash)
204 {
205 	/* The 'old' entry is to be replaced by 'new'.
206 	 * If 'old' is not VALID, we update it directly,
207 	 * otherwise we need to replace it
208 	 */
209 	struct cache_head *tmp;
210 
211 	if (!test_bit(CACHE_VALID, &old->flags)) {
212 		spin_lock(&detail->hash_lock);
213 		if (!test_bit(CACHE_VALID, &old->flags)) {
214 			cache_entry_update(detail, old, new);
215 			cache_fresh_locked(old, new->expiry_time, detail);
216 			spin_unlock(&detail->hash_lock);
217 			cache_fresh_unlocked(old, detail);
218 			return old;
219 		}
220 		spin_unlock(&detail->hash_lock);
221 	}
222 	/* We need to insert a new entry */
223 	tmp = detail->alloc();
224 	if (!tmp) {
225 		cache_put(old, detail);
226 		return NULL;
227 	}
228 	cache_init(tmp, detail);
229 	detail->init(tmp, old);
230 
231 	spin_lock(&detail->hash_lock);
232 	cache_entry_update(detail, tmp, new);
233 	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
234 	detail->entries++;
235 	cache_get(tmp);
236 	cache_fresh_locked(tmp, new->expiry_time, detail);
237 	cache_fresh_locked(old, 0, detail);
238 	spin_unlock(&detail->hash_lock);
239 	cache_fresh_unlocked(tmp, detail);
240 	cache_fresh_unlocked(old, detail);
241 	cache_put(old, detail);
242 	return tmp;
243 }
244 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
245 
cache_is_valid(struct cache_head * h)246 static inline int cache_is_valid(struct cache_head *h)
247 {
248 	if (!test_bit(CACHE_VALID, &h->flags))
249 		return -EAGAIN;
250 	else {
251 		/* entry is valid */
252 		if (test_bit(CACHE_NEGATIVE, &h->flags))
253 			return -ENOENT;
254 		else {
255 			/*
256 			 * In combination with write barrier in
257 			 * sunrpc_cache_update, ensures that anyone
258 			 * using the cache entry after this sees the
259 			 * updated contents:
260 			 */
261 			smp_rmb();
262 			return 0;
263 		}
264 	}
265 }
266 
try_to_negate_entry(struct cache_detail * detail,struct cache_head * h)267 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
268 {
269 	int rv;
270 
271 	spin_lock(&detail->hash_lock);
272 	rv = cache_is_valid(h);
273 	if (rv == -EAGAIN) {
274 		cache_make_negative(detail, h);
275 		cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
276 				   detail);
277 		rv = -ENOENT;
278 	}
279 	spin_unlock(&detail->hash_lock);
280 	cache_fresh_unlocked(h, detail);
281 	return rv;
282 }
283 
cache_check_rcu(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)284 int cache_check_rcu(struct cache_detail *detail,
285 		    struct cache_head *h, struct cache_req *rqstp)
286 {
287 	int rv;
288 	time64_t refresh_age, age;
289 
290 	/* First decide return status as best we can */
291 	rv = cache_is_valid(h);
292 
293 	/* now see if we want to start an upcall */
294 	refresh_age = (h->expiry_time - h->last_refresh);
295 	age = seconds_since_boot() - h->last_refresh;
296 
297 	if (rqstp == NULL) {
298 		if (rv == -EAGAIN)
299 			rv = -ENOENT;
300 	} else if (rv == -EAGAIN ||
301 		   (h->expiry_time != 0 && age > refresh_age/2)) {
302 		dprintk("RPC:       Want update, refage=%lld, age=%lld\n",
303 				refresh_age, age);
304 		switch (detail->cache_upcall(detail, h)) {
305 		case -EINVAL:
306 			rv = try_to_negate_entry(detail, h);
307 			break;
308 		case -EAGAIN:
309 			cache_fresh_unlocked(h, detail);
310 			break;
311 		}
312 	}
313 
314 	if (rv == -EAGAIN) {
315 		if (!cache_defer_req(rqstp, h)) {
316 			/*
317 			 * Request was not deferred; handle it as best
318 			 * we can ourselves:
319 			 */
320 			rv = cache_is_valid(h);
321 			if (rv == -EAGAIN)
322 				rv = -ETIMEDOUT;
323 		}
324 	}
325 
326 	return rv;
327 }
328 EXPORT_SYMBOL_GPL(cache_check_rcu);
329 
330 /*
331  * This is the generic cache management routine for all
332  * the authentication caches.
333  * It checks the currency of a cache item and will (later)
334  * initiate an upcall to fill it if needed.
335  *
336  *
337  * Returns 0 if the cache_head can be used, or cache_puts it and returns
338  * -EAGAIN if upcall is pending and request has been queued
339  * -ETIMEDOUT if upcall failed or request could not be queue or
340  *           upcall completed but item is still invalid (implying that
341  *           the cache item has been replaced with a newer one).
342  * -ENOENT if cache entry was negative
343  */
cache_check(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)344 int cache_check(struct cache_detail *detail,
345 		struct cache_head *h, struct cache_req *rqstp)
346 {
347 	int rv;
348 
349 	rv = cache_check_rcu(detail, h, rqstp);
350 	if (rv)
351 		cache_put(h, detail);
352 	return rv;
353 }
354 EXPORT_SYMBOL_GPL(cache_check);
355 
356 /*
357  * caches need to be periodically cleaned.
358  * For this we maintain a list of cache_detail and
359  * a current pointer into that list and into the table
360  * for that entry.
361  *
362  * Each time cache_clean is called it finds the next non-empty entry
363  * in the current table and walks the list in that entry
364  * looking for entries that can be removed.
365  *
366  * An entry gets removed if:
367  * - The expiry is before current time
368  * - The last_refresh time is before the flush_time for that cache
369  *
370  * later we might drop old entries with non-NEVER expiry if that table
371  * is getting 'full' for some definition of 'full'
372  *
373  * The question of "how often to scan a table" is an interesting one
374  * and is answered in part by the use of the "nextcheck" field in the
375  * cache_detail.
376  * When a scan of a table begins, the nextcheck field is set to a time
377  * that is well into the future.
378  * While scanning, if an expiry time is found that is earlier than the
379  * current nextcheck time, nextcheck is set to that expiry time.
380  * If the flush_time is ever set to a time earlier than the nextcheck
381  * time, the nextcheck time is then set to that flush_time.
382  *
383  * A table is then only scanned if the current time is at least
384  * the nextcheck time.
385  *
386  */
387 
388 static LIST_HEAD(cache_list);
389 static DEFINE_SPINLOCK(cache_list_lock);
390 static struct cache_detail *current_detail;
391 static int current_index;
392 
393 static void do_cache_clean(struct work_struct *work);
394 static struct delayed_work cache_cleaner;
395 
sunrpc_init_cache_detail(struct cache_detail * cd)396 void sunrpc_init_cache_detail(struct cache_detail *cd)
397 {
398 	spin_lock_init(&cd->hash_lock);
399 	INIT_LIST_HEAD(&cd->queue);
400 	spin_lock(&cache_list_lock);
401 	cd->nextcheck = 0;
402 	cd->entries = 0;
403 	atomic_set(&cd->writers, 0);
404 	cd->last_close = 0;
405 	cd->last_warn = -1;
406 	list_add(&cd->others, &cache_list);
407 	spin_unlock(&cache_list_lock);
408 
409 	/* start the cleaning process */
410 	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
411 }
412 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
413 
sunrpc_destroy_cache_detail(struct cache_detail * cd)414 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
415 {
416 	cache_purge(cd);
417 	spin_lock(&cache_list_lock);
418 	spin_lock(&cd->hash_lock);
419 	if (current_detail == cd)
420 		current_detail = NULL;
421 	list_del_init(&cd->others);
422 	spin_unlock(&cd->hash_lock);
423 	spin_unlock(&cache_list_lock);
424 	if (list_empty(&cache_list)) {
425 		/* module must be being unloaded so its safe to kill the worker */
426 		cancel_delayed_work_sync(&cache_cleaner);
427 	}
428 }
429 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
430 
431 /* clean cache tries to find something to clean
432  * and cleans it.
433  * It returns 1 if it cleaned something,
434  *            0 if it didn't find anything this time
435  *           -1 if it fell off the end of the list.
436  */
cache_clean(void)437 static int cache_clean(void)
438 {
439 	int rv = 0;
440 	struct list_head *next;
441 
442 	spin_lock(&cache_list_lock);
443 
444 	/* find a suitable table if we don't already have one */
445 	while (current_detail == NULL ||
446 	    current_index >= current_detail->hash_size) {
447 		if (current_detail)
448 			next = current_detail->others.next;
449 		else
450 			next = cache_list.next;
451 		if (next == &cache_list) {
452 			current_detail = NULL;
453 			spin_unlock(&cache_list_lock);
454 			return -1;
455 		}
456 		current_detail = list_entry(next, struct cache_detail, others);
457 		if (current_detail->nextcheck > seconds_since_boot())
458 			current_index = current_detail->hash_size;
459 		else {
460 			current_index = 0;
461 			current_detail->nextcheck = seconds_since_boot()+30*60;
462 		}
463 	}
464 
465 	/* find a non-empty bucket in the table */
466 	while (current_detail &&
467 	       current_index < current_detail->hash_size &&
468 	       hlist_empty(&current_detail->hash_table[current_index]))
469 		current_index++;
470 
471 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
472 
473 	if (current_detail && current_index < current_detail->hash_size) {
474 		struct cache_head *ch = NULL;
475 		struct cache_detail *d;
476 		struct hlist_head *head;
477 		struct hlist_node *tmp;
478 
479 		spin_lock(&current_detail->hash_lock);
480 
481 		/* Ok, now to clean this strand */
482 
483 		head = &current_detail->hash_table[current_index];
484 		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
485 			if (current_detail->nextcheck > ch->expiry_time)
486 				current_detail->nextcheck = ch->expiry_time+1;
487 			if (!cache_is_expired(current_detail, ch))
488 				continue;
489 
490 			sunrpc_begin_cache_remove_entry(ch, current_detail);
491 			trace_cache_entry_expired(current_detail, ch);
492 			rv = 1;
493 			break;
494 		}
495 
496 		spin_unlock(&current_detail->hash_lock);
497 		d = current_detail;
498 		if (!ch)
499 			current_index ++;
500 		spin_unlock(&cache_list_lock);
501 		if (ch)
502 			sunrpc_end_cache_remove_entry(ch, d);
503 	} else
504 		spin_unlock(&cache_list_lock);
505 
506 	return rv;
507 }
508 
509 /*
510  * We want to regularly clean the cache, so we need to schedule some work ...
511  */
do_cache_clean(struct work_struct * work)512 static void do_cache_clean(struct work_struct *work)
513 {
514 	int delay;
515 
516 	if (list_empty(&cache_list))
517 		return;
518 
519 	if (cache_clean() == -1)
520 		delay = round_jiffies_relative(30*HZ);
521 	else
522 		delay = 5;
523 
524 	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
525 }
526 
527 
528 /*
529  * Clean all caches promptly.  This just calls cache_clean
530  * repeatedly until we are sure that every cache has had a chance to
531  * be fully cleaned
532  */
cache_flush(void)533 void cache_flush(void)
534 {
535 	while (cache_clean() != -1)
536 		cond_resched();
537 	while (cache_clean() != -1)
538 		cond_resched();
539 }
540 EXPORT_SYMBOL_GPL(cache_flush);
541 
cache_purge(struct cache_detail * detail)542 void cache_purge(struct cache_detail *detail)
543 {
544 	struct cache_head *ch = NULL;
545 	struct hlist_head *head = NULL;
546 	int i = 0;
547 
548 	spin_lock(&detail->hash_lock);
549 	if (!detail->entries) {
550 		spin_unlock(&detail->hash_lock);
551 		return;
552 	}
553 
554 	dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
555 	for (i = 0; i < detail->hash_size; i++) {
556 		head = &detail->hash_table[i];
557 		while (!hlist_empty(head)) {
558 			ch = hlist_entry(head->first, struct cache_head,
559 					 cache_list);
560 			sunrpc_begin_cache_remove_entry(ch, detail);
561 			spin_unlock(&detail->hash_lock);
562 			sunrpc_end_cache_remove_entry(ch, detail);
563 			spin_lock(&detail->hash_lock);
564 		}
565 	}
566 	spin_unlock(&detail->hash_lock);
567 }
568 EXPORT_SYMBOL_GPL(cache_purge);
569 
570 
571 /*
572  * Deferral and Revisiting of Requests.
573  *
574  * If a cache lookup finds a pending entry, we
575  * need to defer the request and revisit it later.
576  * All deferred requests are stored in a hash table,
577  * indexed by "struct cache_head *".
578  * As it may be wasteful to store a whole request
579  * structure, we allow the request to provide a
580  * deferred form, which must contain a
581  * 'struct cache_deferred_req'
582  * This cache_deferred_req contains a method to allow
583  * it to be revisited when cache info is available
584  */
585 
586 #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
587 #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
588 
589 #define	DFR_MAX	300	/* ??? */
590 
591 static DEFINE_SPINLOCK(cache_defer_lock);
592 static LIST_HEAD(cache_defer_list);
593 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
594 static int cache_defer_cnt;
595 
__unhash_deferred_req(struct cache_deferred_req * dreq)596 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
597 {
598 	hlist_del_init(&dreq->hash);
599 	if (!list_empty(&dreq->recent)) {
600 		list_del_init(&dreq->recent);
601 		cache_defer_cnt--;
602 	}
603 }
604 
__hash_deferred_req(struct cache_deferred_req * dreq,struct cache_head * item)605 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
606 {
607 	int hash = DFR_HASH(item);
608 
609 	INIT_LIST_HEAD(&dreq->recent);
610 	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
611 }
612 
setup_deferral(struct cache_deferred_req * dreq,struct cache_head * item,int count_me)613 static void setup_deferral(struct cache_deferred_req *dreq,
614 			   struct cache_head *item,
615 			   int count_me)
616 {
617 
618 	dreq->item = item;
619 
620 	spin_lock(&cache_defer_lock);
621 
622 	__hash_deferred_req(dreq, item);
623 
624 	if (count_me) {
625 		cache_defer_cnt++;
626 		list_add(&dreq->recent, &cache_defer_list);
627 	}
628 
629 	spin_unlock(&cache_defer_lock);
630 
631 }
632 
633 struct thread_deferred_req {
634 	struct cache_deferred_req handle;
635 	struct completion completion;
636 };
637 
cache_restart_thread(struct cache_deferred_req * dreq,int too_many)638 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
639 {
640 	struct thread_deferred_req *dr =
641 		container_of(dreq, struct thread_deferred_req, handle);
642 	complete(&dr->completion);
643 }
644 
cache_wait_req(struct cache_req * req,struct cache_head * item)645 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
646 {
647 	struct thread_deferred_req sleeper;
648 	struct cache_deferred_req *dreq = &sleeper.handle;
649 
650 	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
651 	dreq->revisit = cache_restart_thread;
652 
653 	setup_deferral(dreq, item, 0);
654 
655 	if (!test_bit(CACHE_PENDING, &item->flags) ||
656 	    wait_for_completion_interruptible_timeout(
657 		    &sleeper.completion, req->thread_wait) <= 0) {
658 		/* The completion wasn't completed, so we need
659 		 * to clean up
660 		 */
661 		spin_lock(&cache_defer_lock);
662 		if (!hlist_unhashed(&sleeper.handle.hash)) {
663 			__unhash_deferred_req(&sleeper.handle);
664 			spin_unlock(&cache_defer_lock);
665 		} else {
666 			/* cache_revisit_request already removed
667 			 * this from the hash table, but hasn't
668 			 * called ->revisit yet.  It will very soon
669 			 * and we need to wait for it.
670 			 */
671 			spin_unlock(&cache_defer_lock);
672 			wait_for_completion(&sleeper.completion);
673 		}
674 	}
675 }
676 
cache_limit_defers(void)677 static void cache_limit_defers(void)
678 {
679 	/* Make sure we haven't exceed the limit of allowed deferred
680 	 * requests.
681 	 */
682 	struct cache_deferred_req *discard = NULL;
683 
684 	if (cache_defer_cnt <= DFR_MAX)
685 		return;
686 
687 	spin_lock(&cache_defer_lock);
688 
689 	/* Consider removing either the first or the last */
690 	if (cache_defer_cnt > DFR_MAX) {
691 		if (get_random_u32_below(2))
692 			discard = list_entry(cache_defer_list.next,
693 					     struct cache_deferred_req, recent);
694 		else
695 			discard = list_entry(cache_defer_list.prev,
696 					     struct cache_deferred_req, recent);
697 		__unhash_deferred_req(discard);
698 	}
699 	spin_unlock(&cache_defer_lock);
700 	if (discard)
701 		discard->revisit(discard, 1);
702 }
703 
704 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
cache_defer_immediately(void)705 static inline bool cache_defer_immediately(void)
706 {
707 	return !fail_sunrpc.ignore_cache_wait &&
708 		should_fail(&fail_sunrpc.attr, 1);
709 }
710 #else
cache_defer_immediately(void)711 static inline bool cache_defer_immediately(void)
712 {
713 	return false;
714 }
715 #endif
716 
717 /* Return true if and only if a deferred request is queued. */
cache_defer_req(struct cache_req * req,struct cache_head * item)718 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
719 {
720 	struct cache_deferred_req *dreq;
721 
722 	if (!cache_defer_immediately()) {
723 		cache_wait_req(req, item);
724 		if (!test_bit(CACHE_PENDING, &item->flags))
725 			return false;
726 	}
727 
728 	dreq = req->defer(req);
729 	if (dreq == NULL)
730 		return false;
731 	setup_deferral(dreq, item, 1);
732 	if (!test_bit(CACHE_PENDING, &item->flags))
733 		/* Bit could have been cleared before we managed to
734 		 * set up the deferral, so need to revisit just in case
735 		 */
736 		cache_revisit_request(item);
737 
738 	cache_limit_defers();
739 	return true;
740 }
741 
cache_revisit_request(struct cache_head * item)742 static void cache_revisit_request(struct cache_head *item)
743 {
744 	struct cache_deferred_req *dreq;
745 	struct hlist_node *tmp;
746 	int hash = DFR_HASH(item);
747 	LIST_HEAD(pending);
748 
749 	spin_lock(&cache_defer_lock);
750 
751 	hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
752 		if (dreq->item == item) {
753 			__unhash_deferred_req(dreq);
754 			list_add(&dreq->recent, &pending);
755 		}
756 
757 	spin_unlock(&cache_defer_lock);
758 
759 	while (!list_empty(&pending)) {
760 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
761 		list_del_init(&dreq->recent);
762 		dreq->revisit(dreq, 0);
763 	}
764 }
765 
cache_clean_deferred(void * owner)766 void cache_clean_deferred(void *owner)
767 {
768 	struct cache_deferred_req *dreq, *tmp;
769 	LIST_HEAD(pending);
770 
771 	spin_lock(&cache_defer_lock);
772 
773 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
774 		if (dreq->owner == owner) {
775 			__unhash_deferred_req(dreq);
776 			list_add(&dreq->recent, &pending);
777 		}
778 	}
779 	spin_unlock(&cache_defer_lock);
780 
781 	while (!list_empty(&pending)) {
782 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
783 		list_del_init(&dreq->recent);
784 		dreq->revisit(dreq, 1);
785 	}
786 }
787 
788 /*
789  * communicate with user-space
790  *
791  * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
792  * On read, you get a full request, or block.
793  * On write, an update request is processed.
794  * Poll works if anything to read, and always allows write.
795  *
796  * Implemented by linked list of requests.  Each open file has
797  * a ->private that also exists in this list.  New requests are added
798  * to the end and may wakeup and preceding readers.
799  * New readers are added to the head.  If, on read, an item is found with
800  * CACHE_UPCALLING clear, we free it from the list.
801  *
802  */
803 
804 static DEFINE_SPINLOCK(queue_lock);
805 
806 struct cache_queue {
807 	struct list_head	list;
808 	int			reader;	/* if 0, then request */
809 };
810 struct cache_request {
811 	struct cache_queue	q;
812 	struct cache_head	*item;
813 	char			* buf;
814 	int			len;
815 	int			readers;
816 };
817 struct cache_reader {
818 	struct cache_queue	q;
819 	int			offset;	/* if non-0, we have a refcnt on next request */
820 };
821 
cache_request(struct cache_detail * detail,struct cache_request * crq)822 static int cache_request(struct cache_detail *detail,
823 			       struct cache_request *crq)
824 {
825 	char *bp = crq->buf;
826 	int len = PAGE_SIZE;
827 
828 	detail->cache_request(detail, crq->item, &bp, &len);
829 	if (len < 0)
830 		return -E2BIG;
831 	return PAGE_SIZE - len;
832 }
833 
cache_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)834 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
835 			  loff_t *ppos, struct cache_detail *cd)
836 {
837 	struct cache_reader *rp = filp->private_data;
838 	struct cache_request *rq;
839 	struct inode *inode = file_inode(filp);
840 	int err;
841 
842 	if (count == 0)
843 		return 0;
844 
845 	inode_lock(inode); /* protect against multiple concurrent
846 			      * readers on this file */
847  again:
848 	spin_lock(&queue_lock);
849 	/* need to find next request */
850 	while (rp->q.list.next != &cd->queue &&
851 	       list_entry(rp->q.list.next, struct cache_queue, list)
852 	       ->reader) {
853 		struct list_head *next = rp->q.list.next;
854 		list_move(&rp->q.list, next);
855 	}
856 	if (rp->q.list.next == &cd->queue) {
857 		spin_unlock(&queue_lock);
858 		inode_unlock(inode);
859 		WARN_ON_ONCE(rp->offset);
860 		return 0;
861 	}
862 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
863 	WARN_ON_ONCE(rq->q.reader);
864 	if (rp->offset == 0)
865 		rq->readers++;
866 	spin_unlock(&queue_lock);
867 
868 	if (rq->len == 0) {
869 		err = cache_request(cd, rq);
870 		if (err < 0)
871 			goto out;
872 		rq->len = err;
873 	}
874 
875 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
876 		err = -EAGAIN;
877 		spin_lock(&queue_lock);
878 		list_move(&rp->q.list, &rq->q.list);
879 		spin_unlock(&queue_lock);
880 	} else {
881 		if (rp->offset + count > rq->len)
882 			count = rq->len - rp->offset;
883 		err = -EFAULT;
884 		if (copy_to_user(buf, rq->buf + rp->offset, count))
885 			goto out;
886 		rp->offset += count;
887 		if (rp->offset >= rq->len) {
888 			rp->offset = 0;
889 			spin_lock(&queue_lock);
890 			list_move(&rp->q.list, &rq->q.list);
891 			spin_unlock(&queue_lock);
892 		}
893 		err = 0;
894 	}
895  out:
896 	if (rp->offset == 0) {
897 		/* need to release rq */
898 		spin_lock(&queue_lock);
899 		rq->readers--;
900 		if (rq->readers == 0 &&
901 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
902 			list_del(&rq->q.list);
903 			spin_unlock(&queue_lock);
904 			cache_put(rq->item, cd);
905 			kfree(rq->buf);
906 			kfree(rq);
907 		} else
908 			spin_unlock(&queue_lock);
909 	}
910 	if (err == -EAGAIN)
911 		goto again;
912 	inode_unlock(inode);
913 	return err ? err :  count;
914 }
915 
cache_do_downcall(char * kaddr,const char __user * buf,size_t count,struct cache_detail * cd)916 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
917 				 size_t count, struct cache_detail *cd)
918 {
919 	ssize_t ret;
920 
921 	if (count == 0)
922 		return -EINVAL;
923 	if (copy_from_user(kaddr, buf, count))
924 		return -EFAULT;
925 	kaddr[count] = '\0';
926 	ret = cd->cache_parse(cd, kaddr, count);
927 	if (!ret)
928 		ret = count;
929 	return ret;
930 }
931 
cache_downcall(struct address_space * mapping,const char __user * buf,size_t count,struct cache_detail * cd)932 static ssize_t cache_downcall(struct address_space *mapping,
933 			      const char __user *buf,
934 			      size_t count, struct cache_detail *cd)
935 {
936 	char *write_buf;
937 	ssize_t ret = -ENOMEM;
938 
939 	if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
940 		ret = -EINVAL;
941 		goto out;
942 	}
943 
944 	write_buf = kvmalloc(count + 1, GFP_KERNEL);
945 	if (!write_buf)
946 		goto out;
947 
948 	ret = cache_do_downcall(write_buf, buf, count, cd);
949 	kvfree(write_buf);
950 out:
951 	return ret;
952 }
953 
cache_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)954 static ssize_t cache_write(struct file *filp, const char __user *buf,
955 			   size_t count, loff_t *ppos,
956 			   struct cache_detail *cd)
957 {
958 	struct address_space *mapping = filp->f_mapping;
959 	struct inode *inode = file_inode(filp);
960 	ssize_t ret = -EINVAL;
961 
962 	if (!cd->cache_parse)
963 		goto out;
964 
965 	inode_lock(inode);
966 	ret = cache_downcall(mapping, buf, count, cd);
967 	inode_unlock(inode);
968 out:
969 	return ret;
970 }
971 
972 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
973 
cache_poll(struct file * filp,poll_table * wait,struct cache_detail * cd)974 static __poll_t cache_poll(struct file *filp, poll_table *wait,
975 			       struct cache_detail *cd)
976 {
977 	__poll_t mask;
978 	struct cache_reader *rp = filp->private_data;
979 	struct cache_queue *cq;
980 
981 	poll_wait(filp, &queue_wait, wait);
982 
983 	/* alway allow write */
984 	mask = EPOLLOUT | EPOLLWRNORM;
985 
986 	if (!rp)
987 		return mask;
988 
989 	spin_lock(&queue_lock);
990 
991 	for (cq= &rp->q; &cq->list != &cd->queue;
992 	     cq = list_entry(cq->list.next, struct cache_queue, list))
993 		if (!cq->reader) {
994 			mask |= EPOLLIN | EPOLLRDNORM;
995 			break;
996 		}
997 	spin_unlock(&queue_lock);
998 	return mask;
999 }
1000 
cache_ioctl(struct inode * ino,struct file * filp,unsigned int cmd,unsigned long arg,struct cache_detail * cd)1001 static int cache_ioctl(struct inode *ino, struct file *filp,
1002 		       unsigned int cmd, unsigned long arg,
1003 		       struct cache_detail *cd)
1004 {
1005 	int len = 0;
1006 	struct cache_reader *rp = filp->private_data;
1007 	struct cache_queue *cq;
1008 
1009 	if (cmd != FIONREAD || !rp)
1010 		return -EINVAL;
1011 
1012 	spin_lock(&queue_lock);
1013 
1014 	/* only find the length remaining in current request,
1015 	 * or the length of the next request
1016 	 */
1017 	for (cq= &rp->q; &cq->list != &cd->queue;
1018 	     cq = list_entry(cq->list.next, struct cache_queue, list))
1019 		if (!cq->reader) {
1020 			struct cache_request *cr =
1021 				container_of(cq, struct cache_request, q);
1022 			len = cr->len - rp->offset;
1023 			break;
1024 		}
1025 	spin_unlock(&queue_lock);
1026 
1027 	return put_user(len, (int __user *)arg);
1028 }
1029 
cache_open(struct inode * inode,struct file * filp,struct cache_detail * cd)1030 static int cache_open(struct inode *inode, struct file *filp,
1031 		      struct cache_detail *cd)
1032 {
1033 	struct cache_reader *rp = NULL;
1034 
1035 	if (!cd || !try_module_get(cd->owner))
1036 		return -EACCES;
1037 	nonseekable_open(inode, filp);
1038 	if (filp->f_mode & FMODE_READ) {
1039 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1040 		if (!rp) {
1041 			module_put(cd->owner);
1042 			return -ENOMEM;
1043 		}
1044 		rp->offset = 0;
1045 		rp->q.reader = 1;
1046 
1047 		spin_lock(&queue_lock);
1048 		list_add(&rp->q.list, &cd->queue);
1049 		spin_unlock(&queue_lock);
1050 	}
1051 	if (filp->f_mode & FMODE_WRITE)
1052 		atomic_inc(&cd->writers);
1053 	filp->private_data = rp;
1054 	return 0;
1055 }
1056 
cache_release(struct inode * inode,struct file * filp,struct cache_detail * cd)1057 static int cache_release(struct inode *inode, struct file *filp,
1058 			 struct cache_detail *cd)
1059 {
1060 	struct cache_reader *rp = filp->private_data;
1061 
1062 	if (rp) {
1063 		spin_lock(&queue_lock);
1064 		if (rp->offset) {
1065 			struct cache_queue *cq;
1066 			for (cq= &rp->q; &cq->list != &cd->queue;
1067 			     cq = list_entry(cq->list.next, struct cache_queue, list))
1068 				if (!cq->reader) {
1069 					container_of(cq, struct cache_request, q)
1070 						->readers--;
1071 					break;
1072 				}
1073 			rp->offset = 0;
1074 		}
1075 		list_del(&rp->q.list);
1076 		spin_unlock(&queue_lock);
1077 
1078 		filp->private_data = NULL;
1079 		kfree(rp);
1080 
1081 	}
1082 	if (filp->f_mode & FMODE_WRITE) {
1083 		atomic_dec(&cd->writers);
1084 		cd->last_close = seconds_since_boot();
1085 	}
1086 	module_put(cd->owner);
1087 	return 0;
1088 }
1089 
1090 
1091 
cache_dequeue(struct cache_detail * detail,struct cache_head * ch)1092 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1093 {
1094 	struct cache_queue *cq, *tmp;
1095 	struct cache_request *cr;
1096 	LIST_HEAD(dequeued);
1097 
1098 	spin_lock(&queue_lock);
1099 	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1100 		if (!cq->reader) {
1101 			cr = container_of(cq, struct cache_request, q);
1102 			if (cr->item != ch)
1103 				continue;
1104 			if (test_bit(CACHE_PENDING, &ch->flags))
1105 				/* Lost a race and it is pending again */
1106 				break;
1107 			if (cr->readers != 0)
1108 				continue;
1109 			list_move(&cr->q.list, &dequeued);
1110 		}
1111 	spin_unlock(&queue_lock);
1112 	while (!list_empty(&dequeued)) {
1113 		cr = list_entry(dequeued.next, struct cache_request, q.list);
1114 		list_del(&cr->q.list);
1115 		cache_put(cr->item, detail);
1116 		kfree(cr->buf);
1117 		kfree(cr);
1118 	}
1119 }
1120 
1121 /*
1122  * Support routines for text-based upcalls.
1123  * Fields are separated by spaces.
1124  * Fields are either mangled to quote space tab newline slosh with slosh
1125  * or a hexified with a leading \x
1126  * Record is terminated with newline.
1127  *
1128  */
1129 
qword_add(char ** bpp,int * lp,char * str)1130 void qword_add(char **bpp, int *lp, char *str)
1131 {
1132 	char *bp = *bpp;
1133 	int len = *lp;
1134 	int ret;
1135 
1136 	if (len < 0) return;
1137 
1138 	ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1139 	if (ret >= len) {
1140 		bp += len;
1141 		len = -1;
1142 	} else {
1143 		bp += ret;
1144 		len -= ret;
1145 		*bp++ = ' ';
1146 		len--;
1147 	}
1148 	*bpp = bp;
1149 	*lp = len;
1150 }
1151 EXPORT_SYMBOL_GPL(qword_add);
1152 
qword_addhex(char ** bpp,int * lp,char * buf,int blen)1153 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1154 {
1155 	char *bp = *bpp;
1156 	int len = *lp;
1157 
1158 	if (len < 0) return;
1159 
1160 	if (len > 2) {
1161 		*bp++ = '\\';
1162 		*bp++ = 'x';
1163 		len -= 2;
1164 		while (blen && len >= 2) {
1165 			bp = hex_byte_pack(bp, *buf++);
1166 			len -= 2;
1167 			blen--;
1168 		}
1169 	}
1170 	if (blen || len<1) len = -1;
1171 	else {
1172 		*bp++ = ' ';
1173 		len--;
1174 	}
1175 	*bpp = bp;
1176 	*lp = len;
1177 }
1178 EXPORT_SYMBOL_GPL(qword_addhex);
1179 
warn_no_listener(struct cache_detail * detail)1180 static void warn_no_listener(struct cache_detail *detail)
1181 {
1182 	if (detail->last_warn != detail->last_close) {
1183 		detail->last_warn = detail->last_close;
1184 		if (detail->warn_no_listener)
1185 			detail->warn_no_listener(detail, detail->last_close != 0);
1186 	}
1187 }
1188 
cache_listeners_exist(struct cache_detail * detail)1189 static bool cache_listeners_exist(struct cache_detail *detail)
1190 {
1191 	if (atomic_read(&detail->writers))
1192 		return true;
1193 	if (detail->last_close == 0)
1194 		/* This cache was never opened */
1195 		return false;
1196 	if (detail->last_close < seconds_since_boot() - 30)
1197 		/*
1198 		 * We allow for the possibility that someone might
1199 		 * restart a userspace daemon without restarting the
1200 		 * server; but after 30 seconds, we give up.
1201 		 */
1202 		 return false;
1203 	return true;
1204 }
1205 
1206 /*
1207  * register an upcall request to user-space and queue it up for read() by the
1208  * upcall daemon.
1209  *
1210  * Each request is at most one page long.
1211  */
cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1212 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1213 {
1214 	char *buf;
1215 	struct cache_request *crq;
1216 	int ret = 0;
1217 
1218 	if (test_bit(CACHE_CLEANED, &h->flags))
1219 		/* Too late to make an upcall */
1220 		return -EAGAIN;
1221 
1222 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1223 	if (!buf)
1224 		return -EAGAIN;
1225 
1226 	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1227 	if (!crq) {
1228 		kfree(buf);
1229 		return -EAGAIN;
1230 	}
1231 
1232 	crq->q.reader = 0;
1233 	crq->buf = buf;
1234 	crq->len = 0;
1235 	crq->readers = 0;
1236 	spin_lock(&queue_lock);
1237 	if (test_bit(CACHE_PENDING, &h->flags)) {
1238 		crq->item = cache_get(h);
1239 		list_add_tail(&crq->q.list, &detail->queue);
1240 		trace_cache_entry_upcall(detail, h);
1241 	} else
1242 		/* Lost a race, no longer PENDING, so don't enqueue */
1243 		ret = -EAGAIN;
1244 	spin_unlock(&queue_lock);
1245 	wake_up(&queue_wait);
1246 	if (ret == -EAGAIN) {
1247 		kfree(buf);
1248 		kfree(crq);
1249 	}
1250 	return ret;
1251 }
1252 
sunrpc_cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1253 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1254 {
1255 	if (test_and_set_bit(CACHE_PENDING, &h->flags))
1256 		return 0;
1257 	return cache_pipe_upcall(detail, h);
1258 }
1259 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1260 
sunrpc_cache_pipe_upcall_timeout(struct cache_detail * detail,struct cache_head * h)1261 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1262 				     struct cache_head *h)
1263 {
1264 	if (!cache_listeners_exist(detail)) {
1265 		warn_no_listener(detail);
1266 		trace_cache_entry_no_listener(detail, h);
1267 		return -EINVAL;
1268 	}
1269 	return sunrpc_cache_pipe_upcall(detail, h);
1270 }
1271 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1272 
1273 /*
1274  * parse a message from user-space and pass it
1275  * to an appropriate cache
1276  * Messages are, like requests, separated into fields by
1277  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1278  *
1279  * Message is
1280  *   reply cachename expiry key ... content....
1281  *
1282  * key and content are both parsed by cache
1283  */
1284 
qword_get(char ** bpp,char * dest,int bufsize)1285 int qword_get(char **bpp, char *dest, int bufsize)
1286 {
1287 	/* return bytes copied, or -1 on error */
1288 	char *bp = *bpp;
1289 	int len = 0;
1290 
1291 	while (*bp == ' ') bp++;
1292 
1293 	if (bp[0] == '\\' && bp[1] == 'x') {
1294 		/* HEX STRING */
1295 		bp += 2;
1296 		while (len < bufsize - 1) {
1297 			int h, l;
1298 
1299 			h = hex_to_bin(bp[0]);
1300 			if (h < 0)
1301 				break;
1302 
1303 			l = hex_to_bin(bp[1]);
1304 			if (l < 0)
1305 				break;
1306 
1307 			*dest++ = (h << 4) | l;
1308 			bp += 2;
1309 			len++;
1310 		}
1311 	} else {
1312 		/* text with \nnn octal quoting */
1313 		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1314 			if (*bp == '\\' &&
1315 			    isodigit(bp[1]) && (bp[1] <= '3') &&
1316 			    isodigit(bp[2]) &&
1317 			    isodigit(bp[3])) {
1318 				int byte = (*++bp -'0');
1319 				bp++;
1320 				byte = (byte << 3) | (*bp++ - '0');
1321 				byte = (byte << 3) | (*bp++ - '0');
1322 				*dest++ = byte;
1323 				len++;
1324 			} else {
1325 				*dest++ = *bp++;
1326 				len++;
1327 			}
1328 		}
1329 	}
1330 
1331 	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1332 		return -1;
1333 	while (*bp == ' ') bp++;
1334 	*bpp = bp;
1335 	*dest = '\0';
1336 	return len;
1337 }
1338 EXPORT_SYMBOL_GPL(qword_get);
1339 
1340 
1341 /*
1342  * support /proc/net/rpc/$CACHENAME/content
1343  * as a seqfile.
1344  * We call ->cache_show passing NULL for the item to
1345  * get a header, then pass each real item in the cache
1346  */
1347 
__cache_seq_start(struct seq_file * m,loff_t * pos)1348 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1349 {
1350 	loff_t n = *pos;
1351 	unsigned int hash, entry;
1352 	struct cache_head *ch;
1353 	struct cache_detail *cd = m->private;
1354 
1355 	if (!n--)
1356 		return SEQ_START_TOKEN;
1357 	hash = n >> 32;
1358 	entry = n & ((1LL<<32) - 1);
1359 
1360 	hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1361 		if (!entry--)
1362 			return ch;
1363 	n &= ~((1LL<<32) - 1);
1364 	do {
1365 		hash++;
1366 		n += 1LL<<32;
1367 	} while(hash < cd->hash_size &&
1368 		hlist_empty(&cd->hash_table[hash]));
1369 	if (hash >= cd->hash_size)
1370 		return NULL;
1371 	*pos = n+1;
1372 	return hlist_entry_safe(rcu_dereference_raw(
1373 				hlist_first_rcu(&cd->hash_table[hash])),
1374 				struct cache_head, cache_list);
1375 }
1376 
cache_seq_next(struct seq_file * m,void * p,loff_t * pos)1377 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1378 {
1379 	struct cache_head *ch = p;
1380 	int hash = (*pos >> 32);
1381 	struct cache_detail *cd = m->private;
1382 
1383 	if (p == SEQ_START_TOKEN)
1384 		hash = 0;
1385 	else if (ch->cache_list.next == NULL) {
1386 		hash++;
1387 		*pos += 1LL<<32;
1388 	} else {
1389 		++*pos;
1390 		return hlist_entry_safe(rcu_dereference_raw(
1391 					hlist_next_rcu(&ch->cache_list)),
1392 					struct cache_head, cache_list);
1393 	}
1394 	*pos &= ~((1LL<<32) - 1);
1395 	while (hash < cd->hash_size &&
1396 	       hlist_empty(&cd->hash_table[hash])) {
1397 		hash++;
1398 		*pos += 1LL<<32;
1399 	}
1400 	if (hash >= cd->hash_size)
1401 		return NULL;
1402 	++*pos;
1403 	return hlist_entry_safe(rcu_dereference_raw(
1404 				hlist_first_rcu(&cd->hash_table[hash])),
1405 				struct cache_head, cache_list);
1406 }
1407 
cache_seq_start_rcu(struct seq_file * m,loff_t * pos)1408 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1409 	__acquires(RCU)
1410 {
1411 	rcu_read_lock();
1412 	return __cache_seq_start(m, pos);
1413 }
1414 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1415 
cache_seq_next_rcu(struct seq_file * file,void * p,loff_t * pos)1416 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1417 {
1418 	return cache_seq_next(file, p, pos);
1419 }
1420 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1421 
cache_seq_stop_rcu(struct seq_file * m,void * p)1422 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1423 	__releases(RCU)
1424 {
1425 	rcu_read_unlock();
1426 }
1427 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1428 
c_show(struct seq_file * m,void * p)1429 static int c_show(struct seq_file *m, void *p)
1430 {
1431 	struct cache_head *cp = p;
1432 	struct cache_detail *cd = m->private;
1433 
1434 	if (p == SEQ_START_TOKEN)
1435 		return cd->cache_show(m, cd, NULL);
1436 
1437 	ifdebug(CACHE)
1438 		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1439 			   convert_to_wallclock(cp->expiry_time),
1440 			   kref_read(&cp->ref), cp->flags);
1441 
1442 	if (cache_check_rcu(cd, cp, NULL))
1443 		seq_puts(m, "# ");
1444 	else if (cache_is_expired(cd, cp))
1445 		seq_puts(m, "# ");
1446 
1447 	return cd->cache_show(m, cd, cp);
1448 }
1449 
1450 static const struct seq_operations cache_content_op = {
1451 	.start	= cache_seq_start_rcu,
1452 	.next	= cache_seq_next_rcu,
1453 	.stop	= cache_seq_stop_rcu,
1454 	.show	= c_show,
1455 };
1456 
content_open(struct inode * inode,struct file * file,struct cache_detail * cd)1457 static int content_open(struct inode *inode, struct file *file,
1458 			struct cache_detail *cd)
1459 {
1460 	struct seq_file *seq;
1461 	int err;
1462 
1463 	if (!cd || !try_module_get(cd->owner))
1464 		return -EACCES;
1465 
1466 	err = seq_open(file, &cache_content_op);
1467 	if (err) {
1468 		module_put(cd->owner);
1469 		return err;
1470 	}
1471 
1472 	seq = file->private_data;
1473 	seq->private = cd;
1474 	return 0;
1475 }
1476 
content_release(struct inode * inode,struct file * file,struct cache_detail * cd)1477 static int content_release(struct inode *inode, struct file *file,
1478 		struct cache_detail *cd)
1479 {
1480 	int ret = seq_release(inode, file);
1481 	module_put(cd->owner);
1482 	return ret;
1483 }
1484 
open_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1485 static int open_flush(struct inode *inode, struct file *file,
1486 			struct cache_detail *cd)
1487 {
1488 	if (!cd || !try_module_get(cd->owner))
1489 		return -EACCES;
1490 	return nonseekable_open(inode, file);
1491 }
1492 
release_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1493 static int release_flush(struct inode *inode, struct file *file,
1494 			struct cache_detail *cd)
1495 {
1496 	module_put(cd->owner);
1497 	return 0;
1498 }
1499 
read_flush(struct file * file,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1500 static ssize_t read_flush(struct file *file, char __user *buf,
1501 			  size_t count, loff_t *ppos,
1502 			  struct cache_detail *cd)
1503 {
1504 	char tbuf[22];
1505 	size_t len;
1506 
1507 	len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1508 			convert_to_wallclock(cd->flush_time));
1509 	return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1510 }
1511 
write_flush(struct file * file,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1512 static ssize_t write_flush(struct file *file, const char __user *buf,
1513 			   size_t count, loff_t *ppos,
1514 			   struct cache_detail *cd)
1515 {
1516 	char tbuf[20];
1517 	char *ep;
1518 	time64_t now;
1519 
1520 	if (*ppos || count > sizeof(tbuf)-1)
1521 		return -EINVAL;
1522 	if (copy_from_user(tbuf, buf, count))
1523 		return -EFAULT;
1524 	tbuf[count] = 0;
1525 	simple_strtoul(tbuf, &ep, 0);
1526 	if (*ep && *ep != '\n')
1527 		return -EINVAL;
1528 	/* Note that while we check that 'buf' holds a valid number,
1529 	 * we always ignore the value and just flush everything.
1530 	 * Making use of the number leads to races.
1531 	 */
1532 
1533 	now = seconds_since_boot();
1534 	/* Always flush everything, so behave like cache_purge()
1535 	 * Do this by advancing flush_time to the current time,
1536 	 * or by one second if it has already reached the current time.
1537 	 * Newly added cache entries will always have ->last_refresh greater
1538 	 * that ->flush_time, so they don't get flushed prematurely.
1539 	 */
1540 
1541 	if (cd->flush_time >= now)
1542 		now = cd->flush_time + 1;
1543 
1544 	cd->flush_time = now;
1545 	cd->nextcheck = now;
1546 	cache_flush();
1547 
1548 	if (cd->flush)
1549 		cd->flush();
1550 
1551 	*ppos += count;
1552 	return count;
1553 }
1554 
cache_read_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1555 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1556 				 size_t count, loff_t *ppos)
1557 {
1558 	struct cache_detail *cd = pde_data(file_inode(filp));
1559 
1560 	return cache_read(filp, buf, count, ppos, cd);
1561 }
1562 
cache_write_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1563 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1564 				  size_t count, loff_t *ppos)
1565 {
1566 	struct cache_detail *cd = pde_data(file_inode(filp));
1567 
1568 	return cache_write(filp, buf, count, ppos, cd);
1569 }
1570 
cache_poll_procfs(struct file * filp,poll_table * wait)1571 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1572 {
1573 	struct cache_detail *cd = pde_data(file_inode(filp));
1574 
1575 	return cache_poll(filp, wait, cd);
1576 }
1577 
cache_ioctl_procfs(struct file * filp,unsigned int cmd,unsigned long arg)1578 static long cache_ioctl_procfs(struct file *filp,
1579 			       unsigned int cmd, unsigned long arg)
1580 {
1581 	struct inode *inode = file_inode(filp);
1582 	struct cache_detail *cd = pde_data(inode);
1583 
1584 	return cache_ioctl(inode, filp, cmd, arg, cd);
1585 }
1586 
cache_open_procfs(struct inode * inode,struct file * filp)1587 static int cache_open_procfs(struct inode *inode, struct file *filp)
1588 {
1589 	struct cache_detail *cd = pde_data(inode);
1590 
1591 	return cache_open(inode, filp, cd);
1592 }
1593 
cache_release_procfs(struct inode * inode,struct file * filp)1594 static int cache_release_procfs(struct inode *inode, struct file *filp)
1595 {
1596 	struct cache_detail *cd = pde_data(inode);
1597 
1598 	return cache_release(inode, filp, cd);
1599 }
1600 
1601 static const struct proc_ops cache_channel_proc_ops = {
1602 	.proc_read	= cache_read_procfs,
1603 	.proc_write	= cache_write_procfs,
1604 	.proc_poll	= cache_poll_procfs,
1605 	.proc_ioctl	= cache_ioctl_procfs, /* for FIONREAD */
1606 	.proc_open	= cache_open_procfs,
1607 	.proc_release	= cache_release_procfs,
1608 };
1609 
content_open_procfs(struct inode * inode,struct file * filp)1610 static int content_open_procfs(struct inode *inode, struct file *filp)
1611 {
1612 	struct cache_detail *cd = pde_data(inode);
1613 
1614 	return content_open(inode, filp, cd);
1615 }
1616 
content_release_procfs(struct inode * inode,struct file * filp)1617 static int content_release_procfs(struct inode *inode, struct file *filp)
1618 {
1619 	struct cache_detail *cd = pde_data(inode);
1620 
1621 	return content_release(inode, filp, cd);
1622 }
1623 
1624 static const struct proc_ops content_proc_ops = {
1625 	.proc_open	= content_open_procfs,
1626 	.proc_read	= seq_read,
1627 	.proc_lseek	= seq_lseek,
1628 	.proc_release	= content_release_procfs,
1629 };
1630 
open_flush_procfs(struct inode * inode,struct file * filp)1631 static int open_flush_procfs(struct inode *inode, struct file *filp)
1632 {
1633 	struct cache_detail *cd = pde_data(inode);
1634 
1635 	return open_flush(inode, filp, cd);
1636 }
1637 
release_flush_procfs(struct inode * inode,struct file * filp)1638 static int release_flush_procfs(struct inode *inode, struct file *filp)
1639 {
1640 	struct cache_detail *cd = pde_data(inode);
1641 
1642 	return release_flush(inode, filp, cd);
1643 }
1644 
read_flush_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1645 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1646 			    size_t count, loff_t *ppos)
1647 {
1648 	struct cache_detail *cd = pde_data(file_inode(filp));
1649 
1650 	return read_flush(filp, buf, count, ppos, cd);
1651 }
1652 
write_flush_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1653 static ssize_t write_flush_procfs(struct file *filp,
1654 				  const char __user *buf,
1655 				  size_t count, loff_t *ppos)
1656 {
1657 	struct cache_detail *cd = pde_data(file_inode(filp));
1658 
1659 	return write_flush(filp, buf, count, ppos, cd);
1660 }
1661 
1662 static const struct proc_ops cache_flush_proc_ops = {
1663 	.proc_open	= open_flush_procfs,
1664 	.proc_read	= read_flush_procfs,
1665 	.proc_write	= write_flush_procfs,
1666 	.proc_release	= release_flush_procfs,
1667 };
1668 
remove_cache_proc_entries(struct cache_detail * cd)1669 static void remove_cache_proc_entries(struct cache_detail *cd)
1670 {
1671 	if (cd->procfs) {
1672 		proc_remove(cd->procfs);
1673 		cd->procfs = NULL;
1674 	}
1675 }
1676 
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1677 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1678 {
1679 	struct proc_dir_entry *p;
1680 	struct sunrpc_net *sn;
1681 
1682 	if (!IS_ENABLED(CONFIG_PROC_FS))
1683 		return 0;
1684 
1685 	sn = net_generic(net, sunrpc_net_id);
1686 	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1687 	if (cd->procfs == NULL)
1688 		goto out_nomem;
1689 
1690 	p = proc_create_data("flush", S_IFREG | 0600,
1691 			     cd->procfs, &cache_flush_proc_ops, cd);
1692 	if (p == NULL)
1693 		goto out_nomem;
1694 
1695 	if (cd->cache_request || cd->cache_parse) {
1696 		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1697 				     &cache_channel_proc_ops, cd);
1698 		if (p == NULL)
1699 			goto out_nomem;
1700 	}
1701 	if (cd->cache_show) {
1702 		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1703 				     &content_proc_ops, cd);
1704 		if (p == NULL)
1705 			goto out_nomem;
1706 	}
1707 	return 0;
1708 out_nomem:
1709 	remove_cache_proc_entries(cd);
1710 	return -ENOMEM;
1711 }
1712 
cache_initialize(void)1713 void __init cache_initialize(void)
1714 {
1715 	INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1716 }
1717 
cache_register_net(struct cache_detail * cd,struct net * net)1718 int cache_register_net(struct cache_detail *cd, struct net *net)
1719 {
1720 	int ret;
1721 
1722 	sunrpc_init_cache_detail(cd);
1723 	ret = create_cache_proc_entries(cd, net);
1724 	if (ret)
1725 		sunrpc_destroy_cache_detail(cd);
1726 	return ret;
1727 }
1728 EXPORT_SYMBOL_GPL(cache_register_net);
1729 
cache_unregister_net(struct cache_detail * cd,struct net * net)1730 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1731 {
1732 	remove_cache_proc_entries(cd);
1733 	sunrpc_destroy_cache_detail(cd);
1734 }
1735 EXPORT_SYMBOL_GPL(cache_unregister_net);
1736 
cache_create_net(const struct cache_detail * tmpl,struct net * net)1737 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1738 {
1739 	struct cache_detail *cd;
1740 	int i;
1741 
1742 	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1743 	if (cd == NULL)
1744 		return ERR_PTR(-ENOMEM);
1745 
1746 	cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1747 				 GFP_KERNEL);
1748 	if (cd->hash_table == NULL) {
1749 		kfree(cd);
1750 		return ERR_PTR(-ENOMEM);
1751 	}
1752 
1753 	for (i = 0; i < cd->hash_size; i++)
1754 		INIT_HLIST_HEAD(&cd->hash_table[i]);
1755 	cd->net = net;
1756 	return cd;
1757 }
1758 EXPORT_SYMBOL_GPL(cache_create_net);
1759 
cache_destroy_net(struct cache_detail * cd,struct net * net)1760 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1761 {
1762 	kfree(cd->hash_table);
1763 	kfree(cd);
1764 }
1765 EXPORT_SYMBOL_GPL(cache_destroy_net);
1766 
cache_read_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1767 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1768 				 size_t count, loff_t *ppos)
1769 {
1770 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1771 
1772 	return cache_read(filp, buf, count, ppos, cd);
1773 }
1774 
cache_write_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1775 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1776 				  size_t count, loff_t *ppos)
1777 {
1778 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1779 
1780 	return cache_write(filp, buf, count, ppos, cd);
1781 }
1782 
cache_poll_pipefs(struct file * filp,poll_table * wait)1783 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1784 {
1785 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1786 
1787 	return cache_poll(filp, wait, cd);
1788 }
1789 
cache_ioctl_pipefs(struct file * filp,unsigned int cmd,unsigned long arg)1790 static long cache_ioctl_pipefs(struct file *filp,
1791 			      unsigned int cmd, unsigned long arg)
1792 {
1793 	struct inode *inode = file_inode(filp);
1794 	struct cache_detail *cd = RPC_I(inode)->private;
1795 
1796 	return cache_ioctl(inode, filp, cmd, arg, cd);
1797 }
1798 
cache_open_pipefs(struct inode * inode,struct file * filp)1799 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1800 {
1801 	struct cache_detail *cd = RPC_I(inode)->private;
1802 
1803 	return cache_open(inode, filp, cd);
1804 }
1805 
cache_release_pipefs(struct inode * inode,struct file * filp)1806 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1807 {
1808 	struct cache_detail *cd = RPC_I(inode)->private;
1809 
1810 	return cache_release(inode, filp, cd);
1811 }
1812 
1813 const struct file_operations cache_file_operations_pipefs = {
1814 	.owner		= THIS_MODULE,
1815 	.read		= cache_read_pipefs,
1816 	.write		= cache_write_pipefs,
1817 	.poll		= cache_poll_pipefs,
1818 	.unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */
1819 	.open		= cache_open_pipefs,
1820 	.release	= cache_release_pipefs,
1821 };
1822 
content_open_pipefs(struct inode * inode,struct file * filp)1823 static int content_open_pipefs(struct inode *inode, struct file *filp)
1824 {
1825 	struct cache_detail *cd = RPC_I(inode)->private;
1826 
1827 	return content_open(inode, filp, cd);
1828 }
1829 
content_release_pipefs(struct inode * inode,struct file * filp)1830 static int content_release_pipefs(struct inode *inode, struct file *filp)
1831 {
1832 	struct cache_detail *cd = RPC_I(inode)->private;
1833 
1834 	return content_release(inode, filp, cd);
1835 }
1836 
1837 const struct file_operations content_file_operations_pipefs = {
1838 	.open		= content_open_pipefs,
1839 	.read		= seq_read,
1840 	.llseek		= seq_lseek,
1841 	.release	= content_release_pipefs,
1842 };
1843 
open_flush_pipefs(struct inode * inode,struct file * filp)1844 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1845 {
1846 	struct cache_detail *cd = RPC_I(inode)->private;
1847 
1848 	return open_flush(inode, filp, cd);
1849 }
1850 
release_flush_pipefs(struct inode * inode,struct file * filp)1851 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1852 {
1853 	struct cache_detail *cd = RPC_I(inode)->private;
1854 
1855 	return release_flush(inode, filp, cd);
1856 }
1857 
read_flush_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1858 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1859 			    size_t count, loff_t *ppos)
1860 {
1861 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1862 
1863 	return read_flush(filp, buf, count, ppos, cd);
1864 }
1865 
write_flush_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1866 static ssize_t write_flush_pipefs(struct file *filp,
1867 				  const char __user *buf,
1868 				  size_t count, loff_t *ppos)
1869 {
1870 	struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1871 
1872 	return write_flush(filp, buf, count, ppos, cd);
1873 }
1874 
1875 const struct file_operations cache_flush_operations_pipefs = {
1876 	.open		= open_flush_pipefs,
1877 	.read		= read_flush_pipefs,
1878 	.write		= write_flush_pipefs,
1879 	.release	= release_flush_pipefs,
1880 };
1881 
sunrpc_cache_register_pipefs(struct dentry * parent,const char * name,umode_t umode,struct cache_detail * cd)1882 int sunrpc_cache_register_pipefs(struct dentry *parent,
1883 				 const char *name, umode_t umode,
1884 				 struct cache_detail *cd)
1885 {
1886 	struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1887 	if (IS_ERR(dir))
1888 		return PTR_ERR(dir);
1889 	cd->pipefs = dir;
1890 	return 0;
1891 }
1892 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1893 
sunrpc_cache_unregister_pipefs(struct cache_detail * cd)1894 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1895 {
1896 	if (cd->pipefs) {
1897 		rpc_remove_cache_dir(cd->pipefs);
1898 		cd->pipefs = NULL;
1899 	}
1900 }
1901 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1902 
sunrpc_cache_unhash(struct cache_detail * cd,struct cache_head * h)1903 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1904 {
1905 	spin_lock(&cd->hash_lock);
1906 	if (!hlist_unhashed(&h->cache_list)){
1907 		sunrpc_begin_cache_remove_entry(h, cd);
1908 		spin_unlock(&cd->hash_lock);
1909 		sunrpc_end_cache_remove_entry(h, cd);
1910 	} else
1911 		spin_unlock(&cd->hash_lock);
1912 }
1913 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1914