1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sunrpc/cache.c
4 *
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
7 *
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 */
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/hex.h>
15 #include <linux/slab.h>
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/string_helpers.h>
23 #include <linux/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include <trace/events/sunrpc.h>
37
38 #include "netns.h"
39 #include "fail.h"
40
41 #define RPCDBG_FACILITY RPCDBG_CACHE
42
43 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
44 static void cache_revisit_request(struct cache_head *item);
45
cache_init(struct cache_head * h,struct cache_detail * detail)46 static void cache_init(struct cache_head *h, struct cache_detail *detail)
47 {
48 time64_t now = seconds_since_boot();
49 INIT_HLIST_NODE(&h->cache_list);
50 h->flags = 0;
51 kref_init(&h->ref);
52 h->expiry_time = now + CACHE_NEW_EXPIRY;
53 if (now <= detail->flush_time)
54 /* ensure it isn't already expired */
55 now = detail->flush_time + 1;
56 h->last_refresh = now;
57 }
58
59 static void cache_fresh_unlocked(struct cache_head *head,
60 struct cache_detail *detail);
61
sunrpc_cache_find_rcu(struct cache_detail * detail,struct cache_head * key,int hash)62 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
63 struct cache_head *key,
64 int hash)
65 {
66 struct hlist_head *head = &detail->hash_table[hash];
67 struct cache_head *tmp;
68
69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tmp, head, cache_list) {
71 if (!detail->match(tmp, key))
72 continue;
73 if (test_bit(CACHE_VALID, &tmp->flags) &&
74 cache_is_expired(detail, tmp))
75 continue;
76 tmp = cache_get_rcu(tmp);
77 rcu_read_unlock();
78 return tmp;
79 }
80 rcu_read_unlock();
81 return NULL;
82 }
83
sunrpc_begin_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)84 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
85 struct cache_detail *cd)
86 {
87 /* Must be called under cd->hash_lock */
88 hlist_del_init_rcu(&ch->cache_list);
89 set_bit(CACHE_CLEANED, &ch->flags);
90 cd->entries --;
91 }
92
sunrpc_end_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)93 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
94 struct cache_detail *cd)
95 {
96 cache_fresh_unlocked(ch, cd);
97 cache_put(ch, cd);
98 }
99
sunrpc_cache_add_entry(struct cache_detail * detail,struct cache_head * key,int hash)100 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
101 struct cache_head *key,
102 int hash)
103 {
104 struct cache_head *new, *tmp, *freeme = NULL;
105 struct hlist_head *head = &detail->hash_table[hash];
106
107 new = detail->alloc();
108 if (!new)
109 return NULL;
110 /* must fully initialise 'new', else
111 * we might get lose if we need to
112 * cache_put it soon.
113 */
114 cache_init(new, detail);
115 detail->init(new, key);
116
117 spin_lock(&detail->hash_lock);
118
119 /* check if entry appeared while we slept */
120 hlist_for_each_entry_rcu(tmp, head, cache_list,
121 lockdep_is_held(&detail->hash_lock)) {
122 if (!detail->match(tmp, key))
123 continue;
124 if (test_bit(CACHE_VALID, &tmp->flags) &&
125 cache_is_expired(detail, tmp)) {
126 sunrpc_begin_cache_remove_entry(tmp, detail);
127 trace_cache_entry_expired(detail, tmp);
128 freeme = tmp;
129 break;
130 }
131 cache_get(tmp);
132 spin_unlock(&detail->hash_lock);
133 cache_put(new, detail);
134 return tmp;
135 }
136
137 cache_get(new);
138 hlist_add_head_rcu(&new->cache_list, head);
139 detail->entries++;
140 if (detail->nextcheck > new->expiry_time)
141 detail->nextcheck = new->expiry_time + 1;
142 spin_unlock(&detail->hash_lock);
143
144 if (freeme)
145 sunrpc_end_cache_remove_entry(freeme, detail);
146 return new;
147 }
148
sunrpc_cache_lookup_rcu(struct cache_detail * detail,struct cache_head * key,int hash)149 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
150 struct cache_head *key, int hash)
151 {
152 struct cache_head *ret;
153
154 ret = sunrpc_cache_find_rcu(detail, key, hash);
155 if (ret)
156 return ret;
157 /* Didn't find anything, insert an empty entry */
158 return sunrpc_cache_add_entry(detail, key, hash);
159 }
160 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
161
162 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
163
cache_fresh_locked(struct cache_head * head,time64_t expiry,struct cache_detail * detail)164 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
165 struct cache_detail *detail)
166 {
167 time64_t now = seconds_since_boot();
168 if (now <= detail->flush_time)
169 /* ensure it isn't immediately treated as expired */
170 now = detail->flush_time + 1;
171 head->expiry_time = expiry;
172 head->last_refresh = now;
173 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
174 set_bit(CACHE_VALID, &head->flags);
175 }
176
cache_fresh_unlocked(struct cache_head * head,struct cache_detail * detail)177 static void cache_fresh_unlocked(struct cache_head *head,
178 struct cache_detail *detail)
179 {
180 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
181 cache_revisit_request(head);
182 cache_dequeue(detail, head);
183 }
184 }
185
cache_make_negative(struct cache_detail * detail,struct cache_head * h)186 static void cache_make_negative(struct cache_detail *detail,
187 struct cache_head *h)
188 {
189 set_bit(CACHE_NEGATIVE, &h->flags);
190 trace_cache_entry_make_negative(detail, h);
191 }
192
cache_entry_update(struct cache_detail * detail,struct cache_head * h,struct cache_head * new)193 static void cache_entry_update(struct cache_detail *detail,
194 struct cache_head *h,
195 struct cache_head *new)
196 {
197 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
198 detail->update(h, new);
199 trace_cache_entry_update(detail, h);
200 } else {
201 cache_make_negative(detail, h);
202 }
203 }
204
sunrpc_cache_update(struct cache_detail * detail,struct cache_head * new,struct cache_head * old,int hash)205 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
206 struct cache_head *new, struct cache_head *old, int hash)
207 {
208 /* The 'old' entry is to be replaced by 'new'.
209 * If 'old' is not VALID, we update it directly,
210 * otherwise we need to replace it
211 */
212 struct cache_head *tmp;
213
214 if (!test_bit(CACHE_VALID, &old->flags)) {
215 spin_lock(&detail->hash_lock);
216 if (!test_bit(CACHE_VALID, &old->flags)) {
217 cache_entry_update(detail, old, new);
218 cache_fresh_locked(old, new->expiry_time, detail);
219 spin_unlock(&detail->hash_lock);
220 cache_fresh_unlocked(old, detail);
221 return old;
222 }
223 spin_unlock(&detail->hash_lock);
224 }
225 /* We need to insert a new entry */
226 tmp = detail->alloc();
227 if (!tmp) {
228 cache_put(old, detail);
229 return NULL;
230 }
231 cache_init(tmp, detail);
232 detail->init(tmp, old);
233
234 spin_lock(&detail->hash_lock);
235 cache_entry_update(detail, tmp, new);
236 cache_get(tmp);
237 hlist_add_head_rcu(&tmp->cache_list, &detail->hash_table[hash]);
238 detail->entries++;
239 cache_fresh_locked(tmp, new->expiry_time, detail);
240 cache_fresh_locked(old, 0, detail);
241 spin_unlock(&detail->hash_lock);
242 cache_fresh_unlocked(tmp, detail);
243 cache_fresh_unlocked(old, detail);
244 cache_put(old, detail);
245 return tmp;
246 }
247 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
248
cache_is_valid(struct cache_head * h)249 static inline int cache_is_valid(struct cache_head *h)
250 {
251 if (!test_bit(CACHE_VALID, &h->flags))
252 return -EAGAIN;
253 else {
254 /* entry is valid */
255 if (test_bit(CACHE_NEGATIVE, &h->flags))
256 return -ENOENT;
257 else {
258 /*
259 * In combination with write barrier in
260 * sunrpc_cache_update, ensures that anyone
261 * using the cache entry after this sees the
262 * updated contents:
263 */
264 smp_rmb();
265 return 0;
266 }
267 }
268 }
269
try_to_negate_entry(struct cache_detail * detail,struct cache_head * h)270 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
271 {
272 int rv;
273
274 spin_lock(&detail->hash_lock);
275 rv = cache_is_valid(h);
276 if (rv == -EAGAIN) {
277 cache_make_negative(detail, h);
278 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
279 detail);
280 rv = -ENOENT;
281 }
282 spin_unlock(&detail->hash_lock);
283 cache_fresh_unlocked(h, detail);
284 return rv;
285 }
286
cache_check_rcu(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)287 int cache_check_rcu(struct cache_detail *detail,
288 struct cache_head *h, struct cache_req *rqstp)
289 {
290 int rv;
291 time64_t refresh_age, age;
292
293 /* First decide return status as best we can */
294 rv = cache_is_valid(h);
295
296 /* now see if we want to start an upcall */
297 refresh_age = (h->expiry_time - h->last_refresh);
298 age = seconds_since_boot() - h->last_refresh;
299
300 if (rqstp == NULL) {
301 if (rv == -EAGAIN)
302 rv = -ENOENT;
303 } else if (rv == -EAGAIN ||
304 (h->expiry_time != 0 && age > refresh_age/2)) {
305 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
306 refresh_age, age);
307 switch (detail->cache_upcall(detail, h)) {
308 case -EINVAL:
309 rv = try_to_negate_entry(detail, h);
310 break;
311 case -EAGAIN:
312 cache_fresh_unlocked(h, detail);
313 break;
314 }
315 }
316
317 if (rv == -EAGAIN) {
318 if (!cache_defer_req(rqstp, h)) {
319 /*
320 * Request was not deferred; handle it as best
321 * we can ourselves:
322 */
323 rv = cache_is_valid(h);
324 if (rv == -EAGAIN)
325 rv = -ETIMEDOUT;
326 }
327 }
328
329 return rv;
330 }
331 EXPORT_SYMBOL_GPL(cache_check_rcu);
332
333 /*
334 * This is the generic cache management routine for all
335 * the authentication caches.
336 * It checks the currency of a cache item and will (later)
337 * initiate an upcall to fill it if needed.
338 *
339 *
340 * Returns 0 if the cache_head can be used, or cache_puts it and returns
341 * -EAGAIN if upcall is pending and request has been queued
342 * -ETIMEDOUT if upcall failed or request could not be queue or
343 * upcall completed but item is still invalid (implying that
344 * the cache item has been replaced with a newer one).
345 * -ENOENT if cache entry was negative
346 */
cache_check(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)347 int cache_check(struct cache_detail *detail,
348 struct cache_head *h, struct cache_req *rqstp)
349 {
350 int rv;
351
352 rv = cache_check_rcu(detail, h, rqstp);
353 if (rv)
354 cache_put(h, detail);
355 return rv;
356 }
357 EXPORT_SYMBOL_GPL(cache_check);
358
359 /*
360 * caches need to be periodically cleaned.
361 * For this we maintain a list of cache_detail and
362 * a current pointer into that list and into the table
363 * for that entry.
364 *
365 * Each time cache_clean is called it finds the next non-empty entry
366 * in the current table and walks the list in that entry
367 * looking for entries that can be removed.
368 *
369 * An entry gets removed if:
370 * - The expiry is before current time
371 * - The last_refresh time is before the flush_time for that cache
372 *
373 * later we might drop old entries with non-NEVER expiry if that table
374 * is getting 'full' for some definition of 'full'
375 *
376 * The question of "how often to scan a table" is an interesting one
377 * and is answered in part by the use of the "nextcheck" field in the
378 * cache_detail.
379 * When a scan of a table begins, the nextcheck field is set to a time
380 * that is well into the future.
381 * While scanning, if an expiry time is found that is earlier than the
382 * current nextcheck time, nextcheck is set to that expiry time.
383 * If the flush_time is ever set to a time earlier than the nextcheck
384 * time, the nextcheck time is then set to that flush_time.
385 *
386 * A table is then only scanned if the current time is at least
387 * the nextcheck time.
388 *
389 */
390
391 static LIST_HEAD(cache_list);
392 static DEFINE_SPINLOCK(cache_list_lock);
393 static struct cache_detail *current_detail;
394 static int current_index;
395
396 static void do_cache_clean(struct work_struct *work);
397 static struct delayed_work cache_cleaner;
398
sunrpc_init_cache_detail(struct cache_detail * cd)399 void sunrpc_init_cache_detail(struct cache_detail *cd)
400 {
401 spin_lock_init(&cd->hash_lock);
402 INIT_LIST_HEAD(&cd->requests);
403 INIT_LIST_HEAD(&cd->readers);
404 spin_lock_init(&cd->queue_lock);
405 init_waitqueue_head(&cd->queue_wait);
406 cd->next_seqno = 0;
407 spin_lock(&cache_list_lock);
408 cd->nextcheck = 0;
409 cd->entries = 0;
410 atomic_set(&cd->writers, 0);
411 cd->last_close = 0;
412 cd->last_warn = -1;
413 list_add(&cd->others, &cache_list);
414 spin_unlock(&cache_list_lock);
415
416 /* start the cleaning process */
417 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
418 }
419 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
420
sunrpc_destroy_cache_detail(struct cache_detail * cd)421 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
422 {
423 cache_purge(cd);
424 spin_lock(&cache_list_lock);
425 spin_lock(&cd->hash_lock);
426 if (current_detail == cd)
427 current_detail = NULL;
428 list_del_init(&cd->others);
429 spin_unlock(&cd->hash_lock);
430 spin_unlock(&cache_list_lock);
431 if (list_empty(&cache_list)) {
432 /* module must be being unloaded so its safe to kill the worker */
433 cancel_delayed_work_sync(&cache_cleaner);
434 }
435 }
436 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
437
438 /* clean cache tries to find something to clean
439 * and cleans it.
440 * It returns 1 if it cleaned something,
441 * 0 if it didn't find anything this time
442 * -1 if it fell off the end of the list.
443 */
cache_clean(void)444 static int cache_clean(void)
445 {
446 int rv = 0;
447 struct list_head *next;
448
449 spin_lock(&cache_list_lock);
450
451 /* find a suitable table if we don't already have one */
452 while (current_detail == NULL ||
453 current_index >= current_detail->hash_size) {
454 if (current_detail)
455 next = current_detail->others.next;
456 else
457 next = cache_list.next;
458 if (next == &cache_list) {
459 current_detail = NULL;
460 spin_unlock(&cache_list_lock);
461 return -1;
462 }
463 current_detail = list_entry(next, struct cache_detail, others);
464 if (current_detail->nextcheck > seconds_since_boot())
465 current_index = current_detail->hash_size;
466 else {
467 current_index = 0;
468 current_detail->nextcheck = seconds_since_boot()+30*60;
469 }
470 }
471
472 spin_lock(¤t_detail->hash_lock);
473
474 /* find a non-empty bucket in the table */
475 while (current_index < current_detail->hash_size &&
476 hlist_empty(¤t_detail->hash_table[current_index]))
477 current_index++;
478
479 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
480 if (current_index < current_detail->hash_size) {
481 struct cache_head *ch = NULL;
482 struct cache_detail *d;
483 struct hlist_head *head;
484 struct hlist_node *tmp;
485
486 /* Ok, now to clean this strand */
487 head = ¤t_detail->hash_table[current_index];
488 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
489 if (current_detail->nextcheck > ch->expiry_time)
490 current_detail->nextcheck = ch->expiry_time+1;
491 if (!cache_is_expired(current_detail, ch))
492 continue;
493
494 sunrpc_begin_cache_remove_entry(ch, current_detail);
495 trace_cache_entry_expired(current_detail, ch);
496 rv = 1;
497 break;
498 }
499
500 spin_unlock(¤t_detail->hash_lock);
501 d = current_detail;
502 if (!ch)
503 current_index ++;
504 spin_unlock(&cache_list_lock);
505 if (ch)
506 sunrpc_end_cache_remove_entry(ch, d);
507 } else {
508 spin_unlock(¤t_detail->hash_lock);
509 spin_unlock(&cache_list_lock);
510 }
511
512 return rv;
513 }
514
515 /*
516 * We want to regularly clean the cache, so we need to schedule some work ...
517 */
do_cache_clean(struct work_struct * work)518 static void do_cache_clean(struct work_struct *work)
519 {
520 int delay;
521
522 if (list_empty(&cache_list))
523 return;
524
525 if (cache_clean() == -1)
526 delay = round_jiffies_relative(30*HZ);
527 else
528 delay = 5;
529
530 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
531 }
532
533
534 /*
535 * Clean all caches promptly. This just calls cache_clean
536 * repeatedly until we are sure that every cache has had a chance to
537 * be fully cleaned
538 */
cache_flush(void)539 void cache_flush(void)
540 {
541 while (cache_clean() != -1)
542 cond_resched();
543 while (cache_clean() != -1)
544 cond_resched();
545 }
546 EXPORT_SYMBOL_GPL(cache_flush);
547
cache_purge(struct cache_detail * detail)548 void cache_purge(struct cache_detail *detail)
549 {
550 struct cache_head *ch = NULL;
551 struct hlist_head *head = NULL;
552 int i = 0;
553
554 spin_lock(&detail->hash_lock);
555 if (!detail->entries) {
556 spin_unlock(&detail->hash_lock);
557 return;
558 }
559
560 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
561 for (i = 0; i < detail->hash_size; i++) {
562 head = &detail->hash_table[i];
563 while (!hlist_empty(head)) {
564 ch = hlist_entry(head->first, struct cache_head,
565 cache_list);
566 sunrpc_begin_cache_remove_entry(ch, detail);
567 spin_unlock(&detail->hash_lock);
568 sunrpc_end_cache_remove_entry(ch, detail);
569 spin_lock(&detail->hash_lock);
570 }
571 }
572 spin_unlock(&detail->hash_lock);
573 }
574 EXPORT_SYMBOL_GPL(cache_purge);
575
576
577 /*
578 * Deferral and Revisiting of Requests.
579 *
580 * If a cache lookup finds a pending entry, we
581 * need to defer the request and revisit it later.
582 * All deferred requests are stored in a hash table,
583 * indexed by "struct cache_head *".
584 * As it may be wasteful to store a whole request
585 * structure, we allow the request to provide a
586 * deferred form, which must contain a
587 * 'struct cache_deferred_req'
588 * This cache_deferred_req contains a method to allow
589 * it to be revisited when cache info is available
590 */
591
592 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
593 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
594
595 #define DFR_MAX 300 /* ??? */
596
597 static DEFINE_SPINLOCK(cache_defer_lock);
598 static LIST_HEAD(cache_defer_list);
599 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
600 static int cache_defer_cnt;
601
__unhash_deferred_req(struct cache_deferred_req * dreq)602 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
603 {
604 hlist_del_init(&dreq->hash);
605 if (!list_empty(&dreq->recent)) {
606 list_del_init(&dreq->recent);
607 cache_defer_cnt--;
608 }
609 }
610
__hash_deferred_req(struct cache_deferred_req * dreq,struct cache_head * item)611 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
612 {
613 int hash = DFR_HASH(item);
614
615 INIT_LIST_HEAD(&dreq->recent);
616 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
617 }
618
setup_deferral(struct cache_deferred_req * dreq,struct cache_head * item,int count_me)619 static void setup_deferral(struct cache_deferred_req *dreq,
620 struct cache_head *item,
621 int count_me)
622 {
623
624 dreq->item = item;
625
626 spin_lock(&cache_defer_lock);
627
628 __hash_deferred_req(dreq, item);
629
630 if (count_me) {
631 cache_defer_cnt++;
632 list_add(&dreq->recent, &cache_defer_list);
633 }
634
635 spin_unlock(&cache_defer_lock);
636
637 }
638
639 struct thread_deferred_req {
640 struct cache_deferred_req handle;
641 struct completion completion;
642 };
643
cache_restart_thread(struct cache_deferred_req * dreq,int too_many)644 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
645 {
646 struct thread_deferred_req *dr =
647 container_of(dreq, struct thread_deferred_req, handle);
648 complete(&dr->completion);
649 }
650
cache_wait_req(struct cache_req * req,struct cache_head * item)651 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
652 {
653 struct thread_deferred_req sleeper;
654 struct cache_deferred_req *dreq = &sleeper.handle;
655
656 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
657 dreq->revisit = cache_restart_thread;
658
659 setup_deferral(dreq, item, 0);
660
661 if (!test_bit(CACHE_PENDING, &item->flags) ||
662 wait_for_completion_interruptible_timeout(
663 &sleeper.completion, req->thread_wait) <= 0) {
664 /* The completion wasn't completed, so we need
665 * to clean up
666 */
667 spin_lock(&cache_defer_lock);
668 if (!hlist_unhashed(&sleeper.handle.hash)) {
669 __unhash_deferred_req(&sleeper.handle);
670 spin_unlock(&cache_defer_lock);
671 } else {
672 /* cache_revisit_request already removed
673 * this from the hash table, but hasn't
674 * called ->revisit yet. It will very soon
675 * and we need to wait for it.
676 */
677 spin_unlock(&cache_defer_lock);
678 wait_for_completion(&sleeper.completion);
679 }
680 }
681 }
682
cache_limit_defers(void)683 static void cache_limit_defers(void)
684 {
685 /* Make sure we haven't exceed the limit of allowed deferred
686 * requests.
687 */
688 struct cache_deferred_req *discard = NULL;
689
690 if (cache_defer_cnt <= DFR_MAX)
691 return;
692
693 spin_lock(&cache_defer_lock);
694
695 /* Consider removing either the first or the last */
696 if (cache_defer_cnt > DFR_MAX) {
697 if (get_random_u32_below(2))
698 discard = list_entry(cache_defer_list.next,
699 struct cache_deferred_req, recent);
700 else
701 discard = list_entry(cache_defer_list.prev,
702 struct cache_deferred_req, recent);
703 __unhash_deferred_req(discard);
704 }
705 spin_unlock(&cache_defer_lock);
706 if (discard)
707 discard->revisit(discard, 1);
708 }
709
710 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
cache_defer_immediately(void)711 static inline bool cache_defer_immediately(void)
712 {
713 return !fail_sunrpc.ignore_cache_wait &&
714 should_fail(&fail_sunrpc.attr, 1);
715 }
716 #else
cache_defer_immediately(void)717 static inline bool cache_defer_immediately(void)
718 {
719 return false;
720 }
721 #endif
722
723 /* Return true if and only if a deferred request is queued. */
cache_defer_req(struct cache_req * req,struct cache_head * item)724 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
725 {
726 struct cache_deferred_req *dreq;
727
728 if (!cache_defer_immediately()) {
729 cache_wait_req(req, item);
730 if (!test_bit(CACHE_PENDING, &item->flags))
731 return false;
732 }
733
734 dreq = req->defer(req);
735 if (dreq == NULL)
736 return false;
737 setup_deferral(dreq, item, 1);
738 if (!test_bit(CACHE_PENDING, &item->flags))
739 /* Bit could have been cleared before we managed to
740 * set up the deferral, so need to revisit just in case
741 */
742 cache_revisit_request(item);
743
744 cache_limit_defers();
745 return true;
746 }
747
cache_revisit_request(struct cache_head * item)748 static void cache_revisit_request(struct cache_head *item)
749 {
750 struct cache_deferred_req *dreq;
751 struct hlist_node *tmp;
752 int hash = DFR_HASH(item);
753 LIST_HEAD(pending);
754
755 spin_lock(&cache_defer_lock);
756
757 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
758 if (dreq->item == item) {
759 __unhash_deferred_req(dreq);
760 list_add(&dreq->recent, &pending);
761 }
762
763 spin_unlock(&cache_defer_lock);
764
765 while (!list_empty(&pending)) {
766 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
767 list_del_init(&dreq->recent);
768 dreq->revisit(dreq, 0);
769 }
770 }
771
cache_clean_deferred(void * owner)772 void cache_clean_deferred(void *owner)
773 {
774 struct cache_deferred_req *dreq, *tmp;
775 LIST_HEAD(pending);
776
777 spin_lock(&cache_defer_lock);
778
779 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
780 if (dreq->owner == owner) {
781 __unhash_deferred_req(dreq);
782 list_add(&dreq->recent, &pending);
783 }
784 }
785 spin_unlock(&cache_defer_lock);
786
787 while (!list_empty(&pending)) {
788 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
789 list_del_init(&dreq->recent);
790 dreq->revisit(dreq, 1);
791 }
792 }
793
794 /*
795 * communicate with user-space
796 *
797 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
798 * On read, you get a full request, or block.
799 * On write, an update request is processed.
800 * Poll works if anything to read, and always allows write.
801 */
802
803 struct cache_request {
804 struct list_head list;
805 struct cache_head *item;
806 char *buf;
807 int len;
808 int readers;
809 u64 seqno;
810 };
811 struct cache_reader {
812 struct list_head list;
813 int offset; /* if non-0, we have a refcnt on next request */
814 u64 next_seqno;
815 };
816
cache_request(struct cache_detail * detail,struct cache_request * crq)817 static int cache_request(struct cache_detail *detail,
818 struct cache_request *crq)
819 {
820 char *bp = crq->buf;
821 int len = PAGE_SIZE;
822
823 detail->cache_request(detail, crq->item, &bp, &len);
824 if (len < 0)
825 return -E2BIG;
826 return PAGE_SIZE - len;
827 }
828
829 static struct cache_request *
cache_next_request(struct cache_detail * cd,u64 seqno)830 cache_next_request(struct cache_detail *cd, u64 seqno)
831 {
832 struct cache_request *rq;
833
834 list_for_each_entry(rq, &cd->requests, list)
835 if (rq->seqno >= seqno)
836 return rq;
837 return NULL;
838 }
839
cache_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)840 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
841 loff_t *ppos, struct cache_detail *cd)
842 {
843 struct cache_reader *rp = filp->private_data;
844 struct cache_request *rq;
845 struct inode *inode = file_inode(filp);
846 int err;
847
848 if (count == 0)
849 return 0;
850
851 inode_lock(inode); /* protect against multiple concurrent
852 * readers on this file */
853 again:
854 spin_lock(&cd->queue_lock);
855 /* need to find next request */
856 rq = cache_next_request(cd, rp->next_seqno);
857 if (!rq) {
858 spin_unlock(&cd->queue_lock);
859 inode_unlock(inode);
860 WARN_ON_ONCE(rp->offset);
861 return 0;
862 }
863 if (rp->offset == 0)
864 rq->readers++;
865 spin_unlock(&cd->queue_lock);
866
867 if (rq->len == 0) {
868 err = cache_request(cd, rq);
869 if (err < 0)
870 goto out;
871 rq->len = err;
872 }
873
874 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
875 err = -EAGAIN;
876 rp->next_seqno = rq->seqno + 1;
877 } else {
878 if (rp->offset + count > rq->len)
879 count = rq->len - rp->offset;
880 err = -EFAULT;
881 if (copy_to_user(buf, rq->buf + rp->offset, count))
882 goto out;
883 rp->offset += count;
884 if (rp->offset >= rq->len) {
885 rp->offset = 0;
886 rp->next_seqno = rq->seqno + 1;
887 }
888 err = 0;
889 }
890 out:
891 if (rp->offset == 0) {
892 /* need to release rq */
893 spin_lock(&cd->queue_lock);
894 rq->readers--;
895 if (rq->readers == 0 &&
896 !test_bit(CACHE_PENDING, &rq->item->flags)) {
897 list_del(&rq->list);
898 spin_unlock(&cd->queue_lock);
899 cache_put(rq->item, cd);
900 kfree(rq->buf);
901 kfree(rq);
902 } else
903 spin_unlock(&cd->queue_lock);
904 }
905 if (err == -EAGAIN)
906 goto again;
907 inode_unlock(inode);
908 return err ? err : count;
909 }
910
cache_do_downcall(char * kaddr,const char __user * buf,size_t count,struct cache_detail * cd)911 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
912 size_t count, struct cache_detail *cd)
913 {
914 ssize_t ret;
915
916 if (count == 0)
917 return -EINVAL;
918 if (copy_from_user(kaddr, buf, count))
919 return -EFAULT;
920 kaddr[count] = '\0';
921 ret = cd->cache_parse(cd, kaddr, count);
922 if (!ret)
923 ret = count;
924 return ret;
925 }
926
cache_downcall(struct address_space * mapping,const char __user * buf,size_t count,struct cache_detail * cd)927 static ssize_t cache_downcall(struct address_space *mapping,
928 const char __user *buf,
929 size_t count, struct cache_detail *cd)
930 {
931 char *write_buf;
932 ssize_t ret = -ENOMEM;
933
934 if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
935 ret = -EINVAL;
936 goto out;
937 }
938
939 write_buf = kvmalloc(count + 1, GFP_KERNEL);
940 if (!write_buf)
941 goto out;
942
943 ret = cache_do_downcall(write_buf, buf, count, cd);
944 kvfree(write_buf);
945 out:
946 return ret;
947 }
948
cache_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)949 static ssize_t cache_write(struct file *filp, const char __user *buf,
950 size_t count, loff_t *ppos,
951 struct cache_detail *cd)
952 {
953 struct address_space *mapping = filp->f_mapping;
954 struct inode *inode = file_inode(filp);
955 ssize_t ret = -EINVAL;
956
957 if (!cd->cache_parse)
958 goto out;
959
960 inode_lock(inode);
961 ret = cache_downcall(mapping, buf, count, cd);
962 inode_unlock(inode);
963 out:
964 return ret;
965 }
966
cache_poll(struct file * filp,poll_table * wait,struct cache_detail * cd)967 static __poll_t cache_poll(struct file *filp, poll_table *wait,
968 struct cache_detail *cd)
969 {
970 __poll_t mask;
971 struct cache_reader *rp = filp->private_data;
972
973 poll_wait(filp, &cd->queue_wait, wait);
974
975 /* alway allow write */
976 mask = EPOLLOUT | EPOLLWRNORM;
977
978 if (!rp)
979 return mask;
980
981 spin_lock(&cd->queue_lock);
982
983 if (cache_next_request(cd, rp->next_seqno))
984 mask |= EPOLLIN | EPOLLRDNORM;
985 spin_unlock(&cd->queue_lock);
986 return mask;
987 }
988
cache_ioctl(struct inode * ino,struct file * filp,unsigned int cmd,unsigned long arg,struct cache_detail * cd)989 static int cache_ioctl(struct inode *ino, struct file *filp,
990 unsigned int cmd, unsigned long arg,
991 struct cache_detail *cd)
992 {
993 int len = 0;
994 struct cache_reader *rp = filp->private_data;
995 struct cache_request *rq;
996
997 if (cmd != FIONREAD || !rp)
998 return -EINVAL;
999
1000 spin_lock(&cd->queue_lock);
1001
1002 /* only find the length remaining in current request,
1003 * or the length of the next request
1004 */
1005 rq = cache_next_request(cd, rp->next_seqno);
1006 if (rq)
1007 len = rq->len - rp->offset;
1008 spin_unlock(&cd->queue_lock);
1009
1010 return put_user(len, (int __user *)arg);
1011 }
1012
cache_open(struct inode * inode,struct file * filp,struct cache_detail * cd)1013 static int cache_open(struct inode *inode, struct file *filp,
1014 struct cache_detail *cd)
1015 {
1016 struct cache_reader *rp = NULL;
1017
1018 if (!cd || !try_module_get(cd->owner))
1019 return -EACCES;
1020 nonseekable_open(inode, filp);
1021 if (filp->f_mode & FMODE_READ) {
1022 rp = kmalloc_obj(*rp);
1023 if (!rp) {
1024 module_put(cd->owner);
1025 return -ENOMEM;
1026 }
1027 rp->offset = 0;
1028 rp->next_seqno = 0;
1029
1030 spin_lock(&cd->queue_lock);
1031 list_add(&rp->list, &cd->readers);
1032 spin_unlock(&cd->queue_lock);
1033 }
1034 if (filp->f_mode & FMODE_WRITE)
1035 atomic_inc(&cd->writers);
1036 filp->private_data = rp;
1037 return 0;
1038 }
1039
cache_release(struct inode * inode,struct file * filp,struct cache_detail * cd)1040 static int cache_release(struct inode *inode, struct file *filp,
1041 struct cache_detail *cd)
1042 {
1043 struct cache_reader *rp = filp->private_data;
1044
1045 if (rp) {
1046 struct cache_request *rq = NULL;
1047
1048 spin_lock(&cd->queue_lock);
1049 if (rp->offset) {
1050 struct cache_request *cr;
1051
1052 cr = cache_next_request(cd, rp->next_seqno);
1053 if (cr) {
1054 cr->readers--;
1055 if (cr->readers == 0 &&
1056 !test_bit(CACHE_PENDING,
1057 &cr->item->flags)) {
1058 list_del(&cr->list);
1059 rq = cr;
1060 }
1061 }
1062 rp->offset = 0;
1063 }
1064 list_del(&rp->list);
1065 spin_unlock(&cd->queue_lock);
1066
1067 if (rq) {
1068 cache_put(rq->item, cd);
1069 kfree(rq->buf);
1070 kfree(rq);
1071 }
1072
1073 filp->private_data = NULL;
1074 kfree(rp);
1075 }
1076 if (filp->f_mode & FMODE_WRITE) {
1077 atomic_dec(&cd->writers);
1078 cd->last_close = seconds_since_boot();
1079 }
1080 module_put(cd->owner);
1081 return 0;
1082 }
1083
1084
1085
cache_dequeue(struct cache_detail * detail,struct cache_head * ch)1086 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1087 {
1088 struct cache_request *cr, *tmp;
1089 LIST_HEAD(dequeued);
1090
1091 spin_lock(&detail->queue_lock);
1092 list_for_each_entry_safe(cr, tmp, &detail->requests, list) {
1093 if (cr->item != ch)
1094 continue;
1095 if (test_bit(CACHE_PENDING, &ch->flags))
1096 /* Lost a race and it is pending again */
1097 break;
1098 if (cr->readers != 0)
1099 continue;
1100 list_move(&cr->list, &dequeued);
1101 }
1102 spin_unlock(&detail->queue_lock);
1103 while (!list_empty(&dequeued)) {
1104 cr = list_entry(dequeued.next, struct cache_request, list);
1105 list_del(&cr->list);
1106 cache_put(cr->item, detail);
1107 kfree(cr->buf);
1108 kfree(cr);
1109 }
1110 }
1111
1112 /*
1113 * Support routines for text-based upcalls.
1114 * Fields are separated by spaces.
1115 * Fields are either mangled to quote space tab newline slosh with slosh
1116 * or a hexified with a leading \x
1117 * Record is terminated with newline.
1118 *
1119 */
1120
qword_add(char ** bpp,int * lp,char * str)1121 void qword_add(char **bpp, int *lp, char *str)
1122 {
1123 char *bp = *bpp;
1124 int len = *lp;
1125 int ret;
1126
1127 if (len < 0) return;
1128
1129 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1130 if (ret >= len) {
1131 bp += len;
1132 len = -1;
1133 } else {
1134 bp += ret;
1135 len -= ret;
1136 *bp++ = ' ';
1137 len--;
1138 }
1139 *bpp = bp;
1140 *lp = len;
1141 }
1142 EXPORT_SYMBOL_GPL(qword_add);
1143
qword_addhex(char ** bpp,int * lp,char * buf,int blen)1144 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1145 {
1146 char *bp = *bpp;
1147 int len = *lp;
1148
1149 if (len < 0) return;
1150
1151 if (len > 2) {
1152 *bp++ = '\\';
1153 *bp++ = 'x';
1154 len -= 2;
1155 while (blen && len >= 2) {
1156 bp = hex_byte_pack(bp, *buf++);
1157 len -= 2;
1158 blen--;
1159 }
1160 }
1161 if (blen || len<1) len = -1;
1162 else {
1163 *bp++ = ' ';
1164 len--;
1165 }
1166 *bpp = bp;
1167 *lp = len;
1168 }
1169 EXPORT_SYMBOL_GPL(qword_addhex);
1170
warn_no_listener(struct cache_detail * detail)1171 static void warn_no_listener(struct cache_detail *detail)
1172 {
1173 if (detail->last_warn != detail->last_close) {
1174 detail->last_warn = detail->last_close;
1175 if (detail->warn_no_listener)
1176 detail->warn_no_listener(detail, detail->last_close != 0);
1177 }
1178 }
1179
cache_listeners_exist(struct cache_detail * detail)1180 static bool cache_listeners_exist(struct cache_detail *detail)
1181 {
1182 if (atomic_read(&detail->writers))
1183 return true;
1184 if (detail->last_close == 0)
1185 /* This cache was never opened */
1186 return false;
1187 if (detail->last_close < seconds_since_boot() - 30)
1188 /*
1189 * We allow for the possibility that someone might
1190 * restart a userspace daemon without restarting the
1191 * server; but after 30 seconds, we give up.
1192 */
1193 return false;
1194 return true;
1195 }
1196
1197 /*
1198 * register an upcall request to user-space and queue it up for read() by the
1199 * upcall daemon.
1200 *
1201 * Each request is at most one page long.
1202 */
cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1203 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1204 {
1205 char *buf;
1206 struct cache_request *crq;
1207 int ret = 0;
1208
1209 if (test_bit(CACHE_CLEANED, &h->flags))
1210 /* Too late to make an upcall */
1211 return -EAGAIN;
1212
1213 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1214 if (!buf)
1215 return -EAGAIN;
1216
1217 crq = kmalloc_obj(*crq);
1218 if (!crq) {
1219 kfree(buf);
1220 return -EAGAIN;
1221 }
1222
1223 crq->buf = buf;
1224 crq->len = 0;
1225 crq->readers = 0;
1226 spin_lock(&detail->queue_lock);
1227 if (test_bit(CACHE_PENDING, &h->flags)) {
1228 crq->item = cache_get(h);
1229 crq->seqno = detail->next_seqno++;
1230 list_add_tail(&crq->list, &detail->requests);
1231 trace_cache_entry_upcall(detail, h);
1232 } else
1233 /* Lost a race, no longer PENDING, so don't enqueue */
1234 ret = -EAGAIN;
1235 spin_unlock(&detail->queue_lock);
1236 wake_up(&detail->queue_wait);
1237 if (ret == -EAGAIN) {
1238 kfree(buf);
1239 kfree(crq);
1240 }
1241 return ret;
1242 }
1243
sunrpc_cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1244 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1245 {
1246 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1247 return 0;
1248 return cache_pipe_upcall(detail, h);
1249 }
1250 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1251
sunrpc_cache_pipe_upcall_timeout(struct cache_detail * detail,struct cache_head * h)1252 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1253 struct cache_head *h)
1254 {
1255 if (!cache_listeners_exist(detail)) {
1256 warn_no_listener(detail);
1257 trace_cache_entry_no_listener(detail, h);
1258 return -EINVAL;
1259 }
1260 return sunrpc_cache_pipe_upcall(detail, h);
1261 }
1262 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1263
1264 /*
1265 * parse a message from user-space and pass it
1266 * to an appropriate cache
1267 * Messages are, like requests, separated into fields by
1268 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1269 *
1270 * Message is
1271 * reply cachename expiry key ... content....
1272 *
1273 * key and content are both parsed by cache
1274 */
1275
qword_get(char ** bpp,char * dest,int bufsize)1276 int qword_get(char **bpp, char *dest, int bufsize)
1277 {
1278 /* return bytes copied, or -1 on error */
1279 char *bp = *bpp;
1280 int len = 0;
1281
1282 while (*bp == ' ') bp++;
1283
1284 if (bp[0] == '\\' && bp[1] == 'x') {
1285 /* HEX STRING */
1286 bp += 2;
1287 while (len < bufsize - 1) {
1288 int h, l;
1289
1290 h = hex_to_bin(bp[0]);
1291 if (h < 0)
1292 break;
1293
1294 l = hex_to_bin(bp[1]);
1295 if (l < 0)
1296 break;
1297
1298 *dest++ = (h << 4) | l;
1299 bp += 2;
1300 len++;
1301 }
1302 } else {
1303 /* text with \nnn octal quoting */
1304 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1305 if (*bp == '\\' &&
1306 isodigit(bp[1]) && (bp[1] <= '3') &&
1307 isodigit(bp[2]) &&
1308 isodigit(bp[3])) {
1309 int byte = (*++bp -'0');
1310 bp++;
1311 byte = (byte << 3) | (*bp++ - '0');
1312 byte = (byte << 3) | (*bp++ - '0');
1313 *dest++ = byte;
1314 len++;
1315 } else {
1316 *dest++ = *bp++;
1317 len++;
1318 }
1319 }
1320 }
1321
1322 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1323 return -1;
1324 while (*bp == ' ') bp++;
1325 *bpp = bp;
1326 *dest = '\0';
1327 return len;
1328 }
1329 EXPORT_SYMBOL_GPL(qword_get);
1330
1331
1332 /*
1333 * support /proc/net/rpc/$CACHENAME/content
1334 * as a seqfile.
1335 * We call ->cache_show passing NULL for the item to
1336 * get a header, then pass each real item in the cache
1337 */
1338
__cache_seq_start(struct seq_file * m,loff_t * pos)1339 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1340 {
1341 loff_t n = *pos;
1342 unsigned int hash, entry;
1343 struct cache_head *ch;
1344 struct cache_detail *cd = m->private;
1345
1346 if (!n--)
1347 return SEQ_START_TOKEN;
1348 hash = n >> 32;
1349 entry = n & ((1LL<<32) - 1);
1350
1351 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1352 if (!entry--)
1353 return ch;
1354 ch = NULL;
1355 while (!ch && ++hash < cd->hash_size)
1356 ch = hlist_entry_safe(rcu_dereference(
1357 hlist_first_rcu(&cd->hash_table[hash])),
1358 struct cache_head, cache_list);
1359
1360 *pos = ((long long)hash << 32) + 1;
1361 return ch;
1362 }
1363
cache_seq_next(struct seq_file * m,void * p,loff_t * pos)1364 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1365 {
1366 struct cache_head *ch = p;
1367 int hash = (*pos >> 32);
1368 struct cache_detail *cd = m->private;
1369
1370 if (p == SEQ_START_TOKEN) {
1371 hash = 0;
1372 ch = NULL;
1373 }
1374 while (hash < cd->hash_size) {
1375 if (ch)
1376 ch = hlist_entry_safe(
1377 rcu_dereference(
1378 hlist_next_rcu(&ch->cache_list)),
1379 struct cache_head, cache_list);
1380 else
1381 ch = hlist_entry_safe(
1382 rcu_dereference(
1383 hlist_first_rcu(&cd->hash_table[hash])),
1384 struct cache_head, cache_list);
1385 if (ch) {
1386 ++*pos;
1387 return ch;
1388 }
1389 hash++;
1390 *pos = (long long)hash << 32;
1391 }
1392 return NULL;
1393 }
1394
cache_seq_start_rcu(struct seq_file * m,loff_t * pos)1395 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1396 __acquires(RCU)
1397 {
1398 rcu_read_lock();
1399 return __cache_seq_start(m, pos);
1400 }
1401 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1402
cache_seq_next_rcu(struct seq_file * file,void * p,loff_t * pos)1403 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1404 {
1405 return cache_seq_next(file, p, pos);
1406 }
1407 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1408
cache_seq_stop_rcu(struct seq_file * m,void * p)1409 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1410 __releases(RCU)
1411 {
1412 rcu_read_unlock();
1413 }
1414 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1415
c_show(struct seq_file * m,void * p)1416 static int c_show(struct seq_file *m, void *p)
1417 {
1418 struct cache_head *cp = p;
1419 struct cache_detail *cd = m->private;
1420
1421 if (p == SEQ_START_TOKEN)
1422 return cd->cache_show(m, cd, NULL);
1423
1424 ifdebug(CACHE)
1425 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1426 convert_to_wallclock(cp->expiry_time),
1427 kref_read(&cp->ref), cp->flags);
1428
1429 if (cache_check_rcu(cd, cp, NULL))
1430 seq_puts(m, "# ");
1431 else if (cache_is_expired(cd, cp))
1432 seq_puts(m, "# ");
1433
1434 return cd->cache_show(m, cd, cp);
1435 }
1436
1437 static const struct seq_operations cache_content_op = {
1438 .start = cache_seq_start_rcu,
1439 .next = cache_seq_next_rcu,
1440 .stop = cache_seq_stop_rcu,
1441 .show = c_show,
1442 };
1443
content_open(struct inode * inode,struct file * file,struct cache_detail * cd)1444 static int content_open(struct inode *inode, struct file *file,
1445 struct cache_detail *cd)
1446 {
1447 struct seq_file *seq;
1448 int err;
1449
1450 if (!cd || !try_module_get(cd->owner))
1451 return -EACCES;
1452
1453 err = seq_open(file, &cache_content_op);
1454 if (err) {
1455 module_put(cd->owner);
1456 return err;
1457 }
1458
1459 seq = file->private_data;
1460 seq->private = cd;
1461 return 0;
1462 }
1463
content_release(struct inode * inode,struct file * file,struct cache_detail * cd)1464 static int content_release(struct inode *inode, struct file *file,
1465 struct cache_detail *cd)
1466 {
1467 int ret = seq_release(inode, file);
1468 module_put(cd->owner);
1469 return ret;
1470 }
1471
open_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1472 static int open_flush(struct inode *inode, struct file *file,
1473 struct cache_detail *cd)
1474 {
1475 if (!cd || !try_module_get(cd->owner))
1476 return -EACCES;
1477 return nonseekable_open(inode, file);
1478 }
1479
release_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1480 static int release_flush(struct inode *inode, struct file *file,
1481 struct cache_detail *cd)
1482 {
1483 module_put(cd->owner);
1484 return 0;
1485 }
1486
read_flush(struct file * file,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1487 static ssize_t read_flush(struct file *file, char __user *buf,
1488 size_t count, loff_t *ppos,
1489 struct cache_detail *cd)
1490 {
1491 char tbuf[22];
1492 size_t len;
1493
1494 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1495 convert_to_wallclock(cd->flush_time));
1496 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1497 }
1498
write_flush(struct file * file,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1499 static ssize_t write_flush(struct file *file, const char __user *buf,
1500 size_t count, loff_t *ppos,
1501 struct cache_detail *cd)
1502 {
1503 char tbuf[20];
1504 char *ep;
1505 time64_t now;
1506
1507 if (*ppos || count > sizeof(tbuf)-1)
1508 return -EINVAL;
1509 if (copy_from_user(tbuf, buf, count))
1510 return -EFAULT;
1511 tbuf[count] = 0;
1512 simple_strtoul(tbuf, &ep, 0);
1513 if (*ep && *ep != '\n')
1514 return -EINVAL;
1515 /* Note that while we check that 'buf' holds a valid number,
1516 * we always ignore the value and just flush everything.
1517 * Making use of the number leads to races.
1518 */
1519
1520 now = seconds_since_boot();
1521 /* Always flush everything, so behave like cache_purge()
1522 * Do this by advancing flush_time to the current time,
1523 * or by one second if it has already reached the current time.
1524 * Newly added cache entries will always have ->last_refresh greater
1525 * that ->flush_time, so they don't get flushed prematurely.
1526 */
1527
1528 if (cd->flush_time >= now)
1529 now = cd->flush_time + 1;
1530
1531 cd->flush_time = now;
1532 cd->nextcheck = now;
1533 cache_flush();
1534
1535 if (cd->flush)
1536 cd->flush();
1537
1538 *ppos += count;
1539 return count;
1540 }
1541
cache_read_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1542 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1543 size_t count, loff_t *ppos)
1544 {
1545 struct cache_detail *cd = pde_data(file_inode(filp));
1546
1547 return cache_read(filp, buf, count, ppos, cd);
1548 }
1549
cache_write_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1550 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1551 size_t count, loff_t *ppos)
1552 {
1553 struct cache_detail *cd = pde_data(file_inode(filp));
1554
1555 return cache_write(filp, buf, count, ppos, cd);
1556 }
1557
cache_poll_procfs(struct file * filp,poll_table * wait)1558 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1559 {
1560 struct cache_detail *cd = pde_data(file_inode(filp));
1561
1562 return cache_poll(filp, wait, cd);
1563 }
1564
cache_ioctl_procfs(struct file * filp,unsigned int cmd,unsigned long arg)1565 static long cache_ioctl_procfs(struct file *filp,
1566 unsigned int cmd, unsigned long arg)
1567 {
1568 struct inode *inode = file_inode(filp);
1569 struct cache_detail *cd = pde_data(inode);
1570
1571 return cache_ioctl(inode, filp, cmd, arg, cd);
1572 }
1573
cache_open_procfs(struct inode * inode,struct file * filp)1574 static int cache_open_procfs(struct inode *inode, struct file *filp)
1575 {
1576 struct cache_detail *cd = pde_data(inode);
1577
1578 return cache_open(inode, filp, cd);
1579 }
1580
cache_release_procfs(struct inode * inode,struct file * filp)1581 static int cache_release_procfs(struct inode *inode, struct file *filp)
1582 {
1583 struct cache_detail *cd = pde_data(inode);
1584
1585 return cache_release(inode, filp, cd);
1586 }
1587
1588 static const struct proc_ops cache_channel_proc_ops = {
1589 .proc_read = cache_read_procfs,
1590 .proc_write = cache_write_procfs,
1591 .proc_poll = cache_poll_procfs,
1592 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1593 .proc_open = cache_open_procfs,
1594 .proc_release = cache_release_procfs,
1595 };
1596
content_open_procfs(struct inode * inode,struct file * filp)1597 static int content_open_procfs(struct inode *inode, struct file *filp)
1598 {
1599 struct cache_detail *cd = pde_data(inode);
1600
1601 return content_open(inode, filp, cd);
1602 }
1603
content_release_procfs(struct inode * inode,struct file * filp)1604 static int content_release_procfs(struct inode *inode, struct file *filp)
1605 {
1606 struct cache_detail *cd = pde_data(inode);
1607
1608 return content_release(inode, filp, cd);
1609 }
1610
1611 static const struct proc_ops content_proc_ops = {
1612 .proc_open = content_open_procfs,
1613 .proc_read = seq_read,
1614 .proc_lseek = seq_lseek,
1615 .proc_release = content_release_procfs,
1616 };
1617
open_flush_procfs(struct inode * inode,struct file * filp)1618 static int open_flush_procfs(struct inode *inode, struct file *filp)
1619 {
1620 struct cache_detail *cd = pde_data(inode);
1621
1622 return open_flush(inode, filp, cd);
1623 }
1624
release_flush_procfs(struct inode * inode,struct file * filp)1625 static int release_flush_procfs(struct inode *inode, struct file *filp)
1626 {
1627 struct cache_detail *cd = pde_data(inode);
1628
1629 return release_flush(inode, filp, cd);
1630 }
1631
read_flush_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1632 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1633 size_t count, loff_t *ppos)
1634 {
1635 struct cache_detail *cd = pde_data(file_inode(filp));
1636
1637 return read_flush(filp, buf, count, ppos, cd);
1638 }
1639
write_flush_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1640 static ssize_t write_flush_procfs(struct file *filp,
1641 const char __user *buf,
1642 size_t count, loff_t *ppos)
1643 {
1644 struct cache_detail *cd = pde_data(file_inode(filp));
1645
1646 return write_flush(filp, buf, count, ppos, cd);
1647 }
1648
1649 static const struct proc_ops cache_flush_proc_ops = {
1650 .proc_open = open_flush_procfs,
1651 .proc_read = read_flush_procfs,
1652 .proc_write = write_flush_procfs,
1653 .proc_release = release_flush_procfs,
1654 };
1655
remove_cache_proc_entries(struct cache_detail * cd)1656 static void remove_cache_proc_entries(struct cache_detail *cd)
1657 {
1658 if (cd->procfs) {
1659 proc_remove(cd->procfs);
1660 cd->procfs = NULL;
1661 }
1662 }
1663
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1664 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1665 {
1666 struct proc_dir_entry *p;
1667 struct sunrpc_net *sn;
1668
1669 if (!IS_ENABLED(CONFIG_PROC_FS))
1670 return 0;
1671
1672 sn = net_generic(net, sunrpc_net_id);
1673 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1674 if (cd->procfs == NULL)
1675 goto out_nomem;
1676
1677 p = proc_create_data("flush", S_IFREG | 0600,
1678 cd->procfs, &cache_flush_proc_ops, cd);
1679 if (p == NULL)
1680 goto out_nomem;
1681
1682 if (cd->cache_request || cd->cache_parse) {
1683 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1684 &cache_channel_proc_ops, cd);
1685 if (p == NULL)
1686 goto out_nomem;
1687 }
1688 if (cd->cache_show) {
1689 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1690 &content_proc_ops, cd);
1691 if (p == NULL)
1692 goto out_nomem;
1693 }
1694 return 0;
1695 out_nomem:
1696 remove_cache_proc_entries(cd);
1697 return -ENOMEM;
1698 }
1699
cache_initialize(void)1700 void __init cache_initialize(void)
1701 {
1702 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1703 }
1704
cache_register_net(struct cache_detail * cd,struct net * net)1705 int cache_register_net(struct cache_detail *cd, struct net *net)
1706 {
1707 int ret;
1708
1709 sunrpc_init_cache_detail(cd);
1710 ret = create_cache_proc_entries(cd, net);
1711 if (ret)
1712 sunrpc_destroy_cache_detail(cd);
1713 return ret;
1714 }
1715 EXPORT_SYMBOL_GPL(cache_register_net);
1716
cache_unregister_net(struct cache_detail * cd,struct net * net)1717 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1718 {
1719 remove_cache_proc_entries(cd);
1720 sunrpc_destroy_cache_detail(cd);
1721 }
1722 EXPORT_SYMBOL_GPL(cache_unregister_net);
1723
cache_create_net(const struct cache_detail * tmpl,struct net * net)1724 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1725 {
1726 struct cache_detail *cd;
1727 int i;
1728
1729 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1730 if (cd == NULL)
1731 return ERR_PTR(-ENOMEM);
1732
1733 cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size);
1734 if (cd->hash_table == NULL) {
1735 kfree(cd);
1736 return ERR_PTR(-ENOMEM);
1737 }
1738
1739 for (i = 0; i < cd->hash_size; i++)
1740 INIT_HLIST_HEAD(&cd->hash_table[i]);
1741 cd->net = net;
1742 return cd;
1743 }
1744 EXPORT_SYMBOL_GPL(cache_create_net);
1745
cache_destroy_net(struct cache_detail * cd,struct net * net)1746 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1747 {
1748 kfree(cd->hash_table);
1749 kfree(cd);
1750 }
1751 EXPORT_SYMBOL_GPL(cache_destroy_net);
1752
cache_read_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1753 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1754 size_t count, loff_t *ppos)
1755 {
1756 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1757
1758 return cache_read(filp, buf, count, ppos, cd);
1759 }
1760
cache_write_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1761 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1762 size_t count, loff_t *ppos)
1763 {
1764 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1765
1766 return cache_write(filp, buf, count, ppos, cd);
1767 }
1768
cache_poll_pipefs(struct file * filp,poll_table * wait)1769 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1770 {
1771 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1772
1773 return cache_poll(filp, wait, cd);
1774 }
1775
cache_ioctl_pipefs(struct file * filp,unsigned int cmd,unsigned long arg)1776 static long cache_ioctl_pipefs(struct file *filp,
1777 unsigned int cmd, unsigned long arg)
1778 {
1779 struct inode *inode = file_inode(filp);
1780 struct cache_detail *cd = RPC_I(inode)->private;
1781
1782 return cache_ioctl(inode, filp, cmd, arg, cd);
1783 }
1784
cache_open_pipefs(struct inode * inode,struct file * filp)1785 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1786 {
1787 struct cache_detail *cd = RPC_I(inode)->private;
1788
1789 return cache_open(inode, filp, cd);
1790 }
1791
cache_release_pipefs(struct inode * inode,struct file * filp)1792 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1793 {
1794 struct cache_detail *cd = RPC_I(inode)->private;
1795
1796 return cache_release(inode, filp, cd);
1797 }
1798
1799 const struct file_operations cache_file_operations_pipefs = {
1800 .owner = THIS_MODULE,
1801 .read = cache_read_pipefs,
1802 .write = cache_write_pipefs,
1803 .poll = cache_poll_pipefs,
1804 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1805 .open = cache_open_pipefs,
1806 .release = cache_release_pipefs,
1807 };
1808
content_open_pipefs(struct inode * inode,struct file * filp)1809 static int content_open_pipefs(struct inode *inode, struct file *filp)
1810 {
1811 struct cache_detail *cd = RPC_I(inode)->private;
1812
1813 return content_open(inode, filp, cd);
1814 }
1815
content_release_pipefs(struct inode * inode,struct file * filp)1816 static int content_release_pipefs(struct inode *inode, struct file *filp)
1817 {
1818 struct cache_detail *cd = RPC_I(inode)->private;
1819
1820 return content_release(inode, filp, cd);
1821 }
1822
1823 const struct file_operations content_file_operations_pipefs = {
1824 .open = content_open_pipefs,
1825 .read = seq_read,
1826 .llseek = seq_lseek,
1827 .release = content_release_pipefs,
1828 };
1829
open_flush_pipefs(struct inode * inode,struct file * filp)1830 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1831 {
1832 struct cache_detail *cd = RPC_I(inode)->private;
1833
1834 return open_flush(inode, filp, cd);
1835 }
1836
release_flush_pipefs(struct inode * inode,struct file * filp)1837 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1838 {
1839 struct cache_detail *cd = RPC_I(inode)->private;
1840
1841 return release_flush(inode, filp, cd);
1842 }
1843
read_flush_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1844 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1845 size_t count, loff_t *ppos)
1846 {
1847 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1848
1849 return read_flush(filp, buf, count, ppos, cd);
1850 }
1851
write_flush_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1852 static ssize_t write_flush_pipefs(struct file *filp,
1853 const char __user *buf,
1854 size_t count, loff_t *ppos)
1855 {
1856 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1857
1858 return write_flush(filp, buf, count, ppos, cd);
1859 }
1860
1861 const struct file_operations cache_flush_operations_pipefs = {
1862 .open = open_flush_pipefs,
1863 .read = read_flush_pipefs,
1864 .write = write_flush_pipefs,
1865 .release = release_flush_pipefs,
1866 };
1867
sunrpc_cache_register_pipefs(struct dentry * parent,const char * name,umode_t umode,struct cache_detail * cd)1868 int sunrpc_cache_register_pipefs(struct dentry *parent,
1869 const char *name, umode_t umode,
1870 struct cache_detail *cd)
1871 {
1872 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1873 if (IS_ERR(dir))
1874 return PTR_ERR(dir);
1875 cd->pipefs = dir;
1876 return 0;
1877 }
1878 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1879
sunrpc_cache_unregister_pipefs(struct cache_detail * cd)1880 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1881 {
1882 if (cd->pipefs) {
1883 rpc_remove_cache_dir(cd->pipefs);
1884 cd->pipefs = NULL;
1885 }
1886 }
1887 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1888
sunrpc_cache_unhash(struct cache_detail * cd,struct cache_head * h)1889 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1890 {
1891 spin_lock(&cd->hash_lock);
1892 if (!hlist_unhashed(&h->cache_list)){
1893 sunrpc_begin_cache_remove_entry(h, cd);
1894 spin_unlock(&cd->hash_lock);
1895 sunrpc_end_cache_remove_entry(h, cd);
1896 } else
1897 spin_unlock(&cd->hash_lock);
1898 }
1899 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1900