1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sunrpc/cache.c
4 *
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
7 *
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 */
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/hex.h>
15 #include <linux/slab.h>
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/string_helpers.h>
23 #include <linux/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include <trace/events/sunrpc.h>
37
38 #include "netns.h"
39 #include "fail.h"
40
41 #define RPCDBG_FACILITY RPCDBG_CACHE
42
43 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
44 static void cache_revisit_request(struct cache_head *item);
45
cache_init(struct cache_head * h,struct cache_detail * detail)46 static void cache_init(struct cache_head *h, struct cache_detail *detail)
47 {
48 time64_t now = seconds_since_boot();
49 INIT_HLIST_NODE(&h->cache_list);
50 h->flags = 0;
51 kref_init(&h->ref);
52 h->expiry_time = now + CACHE_NEW_EXPIRY;
53 if (now <= detail->flush_time)
54 /* ensure it isn't already expired */
55 now = detail->flush_time + 1;
56 h->last_refresh = now;
57 }
58
59 static void cache_fresh_unlocked(struct cache_head *head,
60 struct cache_detail *detail);
61
sunrpc_cache_find_rcu(struct cache_detail * detail,struct cache_head * key,int hash)62 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
63 struct cache_head *key,
64 int hash)
65 {
66 struct hlist_head *head = &detail->hash_table[hash];
67 struct cache_head *tmp;
68
69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tmp, head, cache_list) {
71 if (!detail->match(tmp, key))
72 continue;
73 if (test_bit(CACHE_VALID, &tmp->flags) &&
74 cache_is_expired(detail, tmp))
75 continue;
76 tmp = cache_get_rcu(tmp);
77 rcu_read_unlock();
78 return tmp;
79 }
80 rcu_read_unlock();
81 return NULL;
82 }
83
sunrpc_begin_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)84 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
85 struct cache_detail *cd)
86 {
87 /* Must be called under cd->hash_lock */
88 hlist_del_init_rcu(&ch->cache_list);
89 set_bit(CACHE_CLEANED, &ch->flags);
90 cd->entries --;
91 }
92
sunrpc_end_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)93 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
94 struct cache_detail *cd)
95 {
96 cache_fresh_unlocked(ch, cd);
97 cache_put(ch, cd);
98 }
99
sunrpc_cache_add_entry(struct cache_detail * detail,struct cache_head * key,int hash)100 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
101 struct cache_head *key,
102 int hash)
103 {
104 struct cache_head *new, *tmp, *freeme = NULL;
105 struct hlist_head *head = &detail->hash_table[hash];
106
107 new = detail->alloc();
108 if (!new)
109 return NULL;
110 /* must fully initialise 'new', else
111 * we might get lose if we need to
112 * cache_put it soon.
113 */
114 cache_init(new, detail);
115 detail->init(new, key);
116
117 spin_lock(&detail->hash_lock);
118
119 /* check if entry appeared while we slept */
120 hlist_for_each_entry_rcu(tmp, head, cache_list,
121 lockdep_is_held(&detail->hash_lock)) {
122 if (!detail->match(tmp, key))
123 continue;
124 if (test_bit(CACHE_VALID, &tmp->flags) &&
125 cache_is_expired(detail, tmp)) {
126 sunrpc_begin_cache_remove_entry(tmp, detail);
127 trace_cache_entry_expired(detail, tmp);
128 freeme = tmp;
129 break;
130 }
131 cache_get(tmp);
132 spin_unlock(&detail->hash_lock);
133 cache_put(new, detail);
134 return tmp;
135 }
136
137 hlist_add_head_rcu(&new->cache_list, head);
138 detail->entries++;
139 if (detail->nextcheck > new->expiry_time)
140 detail->nextcheck = new->expiry_time + 1;
141 cache_get(new);
142 spin_unlock(&detail->hash_lock);
143
144 if (freeme)
145 sunrpc_end_cache_remove_entry(freeme, detail);
146 return new;
147 }
148
sunrpc_cache_lookup_rcu(struct cache_detail * detail,struct cache_head * key,int hash)149 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
150 struct cache_head *key, int hash)
151 {
152 struct cache_head *ret;
153
154 ret = sunrpc_cache_find_rcu(detail, key, hash);
155 if (ret)
156 return ret;
157 /* Didn't find anything, insert an empty entry */
158 return sunrpc_cache_add_entry(detail, key, hash);
159 }
160 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
161
162 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
163
cache_fresh_locked(struct cache_head * head,time64_t expiry,struct cache_detail * detail)164 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
165 struct cache_detail *detail)
166 {
167 time64_t now = seconds_since_boot();
168 if (now <= detail->flush_time)
169 /* ensure it isn't immediately treated as expired */
170 now = detail->flush_time + 1;
171 head->expiry_time = expiry;
172 head->last_refresh = now;
173 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
174 set_bit(CACHE_VALID, &head->flags);
175 }
176
cache_fresh_unlocked(struct cache_head * head,struct cache_detail * detail)177 static void cache_fresh_unlocked(struct cache_head *head,
178 struct cache_detail *detail)
179 {
180 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
181 cache_revisit_request(head);
182 cache_dequeue(detail, head);
183 }
184 }
185
cache_make_negative(struct cache_detail * detail,struct cache_head * h)186 static void cache_make_negative(struct cache_detail *detail,
187 struct cache_head *h)
188 {
189 set_bit(CACHE_NEGATIVE, &h->flags);
190 trace_cache_entry_make_negative(detail, h);
191 }
192
cache_entry_update(struct cache_detail * detail,struct cache_head * h,struct cache_head * new)193 static void cache_entry_update(struct cache_detail *detail,
194 struct cache_head *h,
195 struct cache_head *new)
196 {
197 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
198 detail->update(h, new);
199 trace_cache_entry_update(detail, h);
200 } else {
201 cache_make_negative(detail, h);
202 }
203 }
204
sunrpc_cache_update(struct cache_detail * detail,struct cache_head * new,struct cache_head * old,int hash)205 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
206 struct cache_head *new, struct cache_head *old, int hash)
207 {
208 /* The 'old' entry is to be replaced by 'new'.
209 * If 'old' is not VALID, we update it directly,
210 * otherwise we need to replace it
211 */
212 struct cache_head *tmp;
213
214 if (!test_bit(CACHE_VALID, &old->flags)) {
215 spin_lock(&detail->hash_lock);
216 if (!test_bit(CACHE_VALID, &old->flags)) {
217 cache_entry_update(detail, old, new);
218 cache_fresh_locked(old, new->expiry_time, detail);
219 spin_unlock(&detail->hash_lock);
220 cache_fresh_unlocked(old, detail);
221 return old;
222 }
223 spin_unlock(&detail->hash_lock);
224 }
225 /* We need to insert a new entry */
226 tmp = detail->alloc();
227 if (!tmp) {
228 cache_put(old, detail);
229 return NULL;
230 }
231 cache_init(tmp, detail);
232 detail->init(tmp, old);
233
234 spin_lock(&detail->hash_lock);
235 cache_entry_update(detail, tmp, new);
236 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
237 detail->entries++;
238 cache_get(tmp);
239 cache_fresh_locked(tmp, new->expiry_time, detail);
240 cache_fresh_locked(old, 0, detail);
241 spin_unlock(&detail->hash_lock);
242 cache_fresh_unlocked(tmp, detail);
243 cache_fresh_unlocked(old, detail);
244 cache_put(old, detail);
245 return tmp;
246 }
247 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
248
cache_is_valid(struct cache_head * h)249 static inline int cache_is_valid(struct cache_head *h)
250 {
251 if (!test_bit(CACHE_VALID, &h->flags))
252 return -EAGAIN;
253 else {
254 /* entry is valid */
255 if (test_bit(CACHE_NEGATIVE, &h->flags))
256 return -ENOENT;
257 else {
258 /*
259 * In combination with write barrier in
260 * sunrpc_cache_update, ensures that anyone
261 * using the cache entry after this sees the
262 * updated contents:
263 */
264 smp_rmb();
265 return 0;
266 }
267 }
268 }
269
try_to_negate_entry(struct cache_detail * detail,struct cache_head * h)270 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
271 {
272 int rv;
273
274 spin_lock(&detail->hash_lock);
275 rv = cache_is_valid(h);
276 if (rv == -EAGAIN) {
277 cache_make_negative(detail, h);
278 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
279 detail);
280 rv = -ENOENT;
281 }
282 spin_unlock(&detail->hash_lock);
283 cache_fresh_unlocked(h, detail);
284 return rv;
285 }
286
cache_check_rcu(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)287 int cache_check_rcu(struct cache_detail *detail,
288 struct cache_head *h, struct cache_req *rqstp)
289 {
290 int rv;
291 time64_t refresh_age, age;
292
293 /* First decide return status as best we can */
294 rv = cache_is_valid(h);
295
296 /* now see if we want to start an upcall */
297 refresh_age = (h->expiry_time - h->last_refresh);
298 age = seconds_since_boot() - h->last_refresh;
299
300 if (rqstp == NULL) {
301 if (rv == -EAGAIN)
302 rv = -ENOENT;
303 } else if (rv == -EAGAIN ||
304 (h->expiry_time != 0 && age > refresh_age/2)) {
305 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
306 refresh_age, age);
307 switch (detail->cache_upcall(detail, h)) {
308 case -EINVAL:
309 rv = try_to_negate_entry(detail, h);
310 break;
311 case -EAGAIN:
312 cache_fresh_unlocked(h, detail);
313 break;
314 }
315 }
316
317 if (rv == -EAGAIN) {
318 if (!cache_defer_req(rqstp, h)) {
319 /*
320 * Request was not deferred; handle it as best
321 * we can ourselves:
322 */
323 rv = cache_is_valid(h);
324 if (rv == -EAGAIN)
325 rv = -ETIMEDOUT;
326 }
327 }
328
329 return rv;
330 }
331 EXPORT_SYMBOL_GPL(cache_check_rcu);
332
333 /*
334 * This is the generic cache management routine for all
335 * the authentication caches.
336 * It checks the currency of a cache item and will (later)
337 * initiate an upcall to fill it if needed.
338 *
339 *
340 * Returns 0 if the cache_head can be used, or cache_puts it and returns
341 * -EAGAIN if upcall is pending and request has been queued
342 * -ETIMEDOUT if upcall failed or request could not be queue or
343 * upcall completed but item is still invalid (implying that
344 * the cache item has been replaced with a newer one).
345 * -ENOENT if cache entry was negative
346 */
cache_check(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)347 int cache_check(struct cache_detail *detail,
348 struct cache_head *h, struct cache_req *rqstp)
349 {
350 int rv;
351
352 rv = cache_check_rcu(detail, h, rqstp);
353 if (rv)
354 cache_put(h, detail);
355 return rv;
356 }
357 EXPORT_SYMBOL_GPL(cache_check);
358
359 /*
360 * caches need to be periodically cleaned.
361 * For this we maintain a list of cache_detail and
362 * a current pointer into that list and into the table
363 * for that entry.
364 *
365 * Each time cache_clean is called it finds the next non-empty entry
366 * in the current table and walks the list in that entry
367 * looking for entries that can be removed.
368 *
369 * An entry gets removed if:
370 * - The expiry is before current time
371 * - The last_refresh time is before the flush_time for that cache
372 *
373 * later we might drop old entries with non-NEVER expiry if that table
374 * is getting 'full' for some definition of 'full'
375 *
376 * The question of "how often to scan a table" is an interesting one
377 * and is answered in part by the use of the "nextcheck" field in the
378 * cache_detail.
379 * When a scan of a table begins, the nextcheck field is set to a time
380 * that is well into the future.
381 * While scanning, if an expiry time is found that is earlier than the
382 * current nextcheck time, nextcheck is set to that expiry time.
383 * If the flush_time is ever set to a time earlier than the nextcheck
384 * time, the nextcheck time is then set to that flush_time.
385 *
386 * A table is then only scanned if the current time is at least
387 * the nextcheck time.
388 *
389 */
390
391 static LIST_HEAD(cache_list);
392 static DEFINE_SPINLOCK(cache_list_lock);
393 static struct cache_detail *current_detail;
394 static int current_index;
395
396 static void do_cache_clean(struct work_struct *work);
397 static struct delayed_work cache_cleaner;
398
sunrpc_init_cache_detail(struct cache_detail * cd)399 void sunrpc_init_cache_detail(struct cache_detail *cd)
400 {
401 spin_lock_init(&cd->hash_lock);
402 INIT_LIST_HEAD(&cd->queue);
403 spin_lock(&cache_list_lock);
404 cd->nextcheck = 0;
405 cd->entries = 0;
406 atomic_set(&cd->writers, 0);
407 cd->last_close = 0;
408 cd->last_warn = -1;
409 list_add(&cd->others, &cache_list);
410 spin_unlock(&cache_list_lock);
411
412 /* start the cleaning process */
413 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
414 }
415 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
416
sunrpc_destroy_cache_detail(struct cache_detail * cd)417 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
418 {
419 cache_purge(cd);
420 spin_lock(&cache_list_lock);
421 spin_lock(&cd->hash_lock);
422 if (current_detail == cd)
423 current_detail = NULL;
424 list_del_init(&cd->others);
425 spin_unlock(&cd->hash_lock);
426 spin_unlock(&cache_list_lock);
427 if (list_empty(&cache_list)) {
428 /* module must be being unloaded so its safe to kill the worker */
429 cancel_delayed_work_sync(&cache_cleaner);
430 }
431 }
432 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
433
434 /* clean cache tries to find something to clean
435 * and cleans it.
436 * It returns 1 if it cleaned something,
437 * 0 if it didn't find anything this time
438 * -1 if it fell off the end of the list.
439 */
cache_clean(void)440 static int cache_clean(void)
441 {
442 int rv = 0;
443 struct list_head *next;
444
445 spin_lock(&cache_list_lock);
446
447 /* find a suitable table if we don't already have one */
448 while (current_detail == NULL ||
449 current_index >= current_detail->hash_size) {
450 if (current_detail)
451 next = current_detail->others.next;
452 else
453 next = cache_list.next;
454 if (next == &cache_list) {
455 current_detail = NULL;
456 spin_unlock(&cache_list_lock);
457 return -1;
458 }
459 current_detail = list_entry(next, struct cache_detail, others);
460 if (current_detail->nextcheck > seconds_since_boot())
461 current_index = current_detail->hash_size;
462 else {
463 current_index = 0;
464 current_detail->nextcheck = seconds_since_boot()+30*60;
465 }
466 }
467
468 spin_lock(¤t_detail->hash_lock);
469
470 /* find a non-empty bucket in the table */
471 while (current_index < current_detail->hash_size &&
472 hlist_empty(¤t_detail->hash_table[current_index]))
473 current_index++;
474
475 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
476 if (current_index < current_detail->hash_size) {
477 struct cache_head *ch = NULL;
478 struct cache_detail *d;
479 struct hlist_head *head;
480 struct hlist_node *tmp;
481
482 /* Ok, now to clean this strand */
483 head = ¤t_detail->hash_table[current_index];
484 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
485 if (current_detail->nextcheck > ch->expiry_time)
486 current_detail->nextcheck = ch->expiry_time+1;
487 if (!cache_is_expired(current_detail, ch))
488 continue;
489
490 sunrpc_begin_cache_remove_entry(ch, current_detail);
491 trace_cache_entry_expired(current_detail, ch);
492 rv = 1;
493 break;
494 }
495
496 spin_unlock(¤t_detail->hash_lock);
497 d = current_detail;
498 if (!ch)
499 current_index ++;
500 spin_unlock(&cache_list_lock);
501 if (ch)
502 sunrpc_end_cache_remove_entry(ch, d);
503 } else {
504 spin_unlock(¤t_detail->hash_lock);
505 spin_unlock(&cache_list_lock);
506 }
507
508 return rv;
509 }
510
511 /*
512 * We want to regularly clean the cache, so we need to schedule some work ...
513 */
do_cache_clean(struct work_struct * work)514 static void do_cache_clean(struct work_struct *work)
515 {
516 int delay;
517
518 if (list_empty(&cache_list))
519 return;
520
521 if (cache_clean() == -1)
522 delay = round_jiffies_relative(30*HZ);
523 else
524 delay = 5;
525
526 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
527 }
528
529
530 /*
531 * Clean all caches promptly. This just calls cache_clean
532 * repeatedly until we are sure that every cache has had a chance to
533 * be fully cleaned
534 */
cache_flush(void)535 void cache_flush(void)
536 {
537 while (cache_clean() != -1)
538 cond_resched();
539 while (cache_clean() != -1)
540 cond_resched();
541 }
542 EXPORT_SYMBOL_GPL(cache_flush);
543
cache_purge(struct cache_detail * detail)544 void cache_purge(struct cache_detail *detail)
545 {
546 struct cache_head *ch = NULL;
547 struct hlist_head *head = NULL;
548 int i = 0;
549
550 spin_lock(&detail->hash_lock);
551 if (!detail->entries) {
552 spin_unlock(&detail->hash_lock);
553 return;
554 }
555
556 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
557 for (i = 0; i < detail->hash_size; i++) {
558 head = &detail->hash_table[i];
559 while (!hlist_empty(head)) {
560 ch = hlist_entry(head->first, struct cache_head,
561 cache_list);
562 sunrpc_begin_cache_remove_entry(ch, detail);
563 spin_unlock(&detail->hash_lock);
564 sunrpc_end_cache_remove_entry(ch, detail);
565 spin_lock(&detail->hash_lock);
566 }
567 }
568 spin_unlock(&detail->hash_lock);
569 }
570 EXPORT_SYMBOL_GPL(cache_purge);
571
572
573 /*
574 * Deferral and Revisiting of Requests.
575 *
576 * If a cache lookup finds a pending entry, we
577 * need to defer the request and revisit it later.
578 * All deferred requests are stored in a hash table,
579 * indexed by "struct cache_head *".
580 * As it may be wasteful to store a whole request
581 * structure, we allow the request to provide a
582 * deferred form, which must contain a
583 * 'struct cache_deferred_req'
584 * This cache_deferred_req contains a method to allow
585 * it to be revisited when cache info is available
586 */
587
588 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
589 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
590
591 #define DFR_MAX 300 /* ??? */
592
593 static DEFINE_SPINLOCK(cache_defer_lock);
594 static LIST_HEAD(cache_defer_list);
595 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
596 static int cache_defer_cnt;
597
__unhash_deferred_req(struct cache_deferred_req * dreq)598 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
599 {
600 hlist_del_init(&dreq->hash);
601 if (!list_empty(&dreq->recent)) {
602 list_del_init(&dreq->recent);
603 cache_defer_cnt--;
604 }
605 }
606
__hash_deferred_req(struct cache_deferred_req * dreq,struct cache_head * item)607 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
608 {
609 int hash = DFR_HASH(item);
610
611 INIT_LIST_HEAD(&dreq->recent);
612 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
613 }
614
setup_deferral(struct cache_deferred_req * dreq,struct cache_head * item,int count_me)615 static void setup_deferral(struct cache_deferred_req *dreq,
616 struct cache_head *item,
617 int count_me)
618 {
619
620 dreq->item = item;
621
622 spin_lock(&cache_defer_lock);
623
624 __hash_deferred_req(dreq, item);
625
626 if (count_me) {
627 cache_defer_cnt++;
628 list_add(&dreq->recent, &cache_defer_list);
629 }
630
631 spin_unlock(&cache_defer_lock);
632
633 }
634
635 struct thread_deferred_req {
636 struct cache_deferred_req handle;
637 struct completion completion;
638 };
639
cache_restart_thread(struct cache_deferred_req * dreq,int too_many)640 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
641 {
642 struct thread_deferred_req *dr =
643 container_of(dreq, struct thread_deferred_req, handle);
644 complete(&dr->completion);
645 }
646
cache_wait_req(struct cache_req * req,struct cache_head * item)647 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
648 {
649 struct thread_deferred_req sleeper;
650 struct cache_deferred_req *dreq = &sleeper.handle;
651
652 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
653 dreq->revisit = cache_restart_thread;
654
655 setup_deferral(dreq, item, 0);
656
657 if (!test_bit(CACHE_PENDING, &item->flags) ||
658 wait_for_completion_interruptible_timeout(
659 &sleeper.completion, req->thread_wait) <= 0) {
660 /* The completion wasn't completed, so we need
661 * to clean up
662 */
663 spin_lock(&cache_defer_lock);
664 if (!hlist_unhashed(&sleeper.handle.hash)) {
665 __unhash_deferred_req(&sleeper.handle);
666 spin_unlock(&cache_defer_lock);
667 } else {
668 /* cache_revisit_request already removed
669 * this from the hash table, but hasn't
670 * called ->revisit yet. It will very soon
671 * and we need to wait for it.
672 */
673 spin_unlock(&cache_defer_lock);
674 wait_for_completion(&sleeper.completion);
675 }
676 }
677 }
678
cache_limit_defers(void)679 static void cache_limit_defers(void)
680 {
681 /* Make sure we haven't exceed the limit of allowed deferred
682 * requests.
683 */
684 struct cache_deferred_req *discard = NULL;
685
686 if (cache_defer_cnt <= DFR_MAX)
687 return;
688
689 spin_lock(&cache_defer_lock);
690
691 /* Consider removing either the first or the last */
692 if (cache_defer_cnt > DFR_MAX) {
693 if (get_random_u32_below(2))
694 discard = list_entry(cache_defer_list.next,
695 struct cache_deferred_req, recent);
696 else
697 discard = list_entry(cache_defer_list.prev,
698 struct cache_deferred_req, recent);
699 __unhash_deferred_req(discard);
700 }
701 spin_unlock(&cache_defer_lock);
702 if (discard)
703 discard->revisit(discard, 1);
704 }
705
706 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
cache_defer_immediately(void)707 static inline bool cache_defer_immediately(void)
708 {
709 return !fail_sunrpc.ignore_cache_wait &&
710 should_fail(&fail_sunrpc.attr, 1);
711 }
712 #else
cache_defer_immediately(void)713 static inline bool cache_defer_immediately(void)
714 {
715 return false;
716 }
717 #endif
718
719 /* Return true if and only if a deferred request is queued. */
cache_defer_req(struct cache_req * req,struct cache_head * item)720 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
721 {
722 struct cache_deferred_req *dreq;
723
724 if (!cache_defer_immediately()) {
725 cache_wait_req(req, item);
726 if (!test_bit(CACHE_PENDING, &item->flags))
727 return false;
728 }
729
730 dreq = req->defer(req);
731 if (dreq == NULL)
732 return false;
733 setup_deferral(dreq, item, 1);
734 if (!test_bit(CACHE_PENDING, &item->flags))
735 /* Bit could have been cleared before we managed to
736 * set up the deferral, so need to revisit just in case
737 */
738 cache_revisit_request(item);
739
740 cache_limit_defers();
741 return true;
742 }
743
cache_revisit_request(struct cache_head * item)744 static void cache_revisit_request(struct cache_head *item)
745 {
746 struct cache_deferred_req *dreq;
747 struct hlist_node *tmp;
748 int hash = DFR_HASH(item);
749 LIST_HEAD(pending);
750
751 spin_lock(&cache_defer_lock);
752
753 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
754 if (dreq->item == item) {
755 __unhash_deferred_req(dreq);
756 list_add(&dreq->recent, &pending);
757 }
758
759 spin_unlock(&cache_defer_lock);
760
761 while (!list_empty(&pending)) {
762 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
763 list_del_init(&dreq->recent);
764 dreq->revisit(dreq, 0);
765 }
766 }
767
cache_clean_deferred(void * owner)768 void cache_clean_deferred(void *owner)
769 {
770 struct cache_deferred_req *dreq, *tmp;
771 LIST_HEAD(pending);
772
773 spin_lock(&cache_defer_lock);
774
775 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
776 if (dreq->owner == owner) {
777 __unhash_deferred_req(dreq);
778 list_add(&dreq->recent, &pending);
779 }
780 }
781 spin_unlock(&cache_defer_lock);
782
783 while (!list_empty(&pending)) {
784 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
785 list_del_init(&dreq->recent);
786 dreq->revisit(dreq, 1);
787 }
788 }
789
790 /*
791 * communicate with user-space
792 *
793 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
794 * On read, you get a full request, or block.
795 * On write, an update request is processed.
796 * Poll works if anything to read, and always allows write.
797 *
798 * Implemented by linked list of requests. Each open file has
799 * a ->private that also exists in this list. New requests are added
800 * to the end and may wakeup and preceding readers.
801 * New readers are added to the head. If, on read, an item is found with
802 * CACHE_UPCALLING clear, we free it from the list.
803 *
804 */
805
806 static DEFINE_SPINLOCK(queue_lock);
807
808 struct cache_queue {
809 struct list_head list;
810 int reader; /* if 0, then request */
811 };
812 struct cache_request {
813 struct cache_queue q;
814 struct cache_head *item;
815 char * buf;
816 int len;
817 int readers;
818 };
819 struct cache_reader {
820 struct cache_queue q;
821 int offset; /* if non-0, we have a refcnt on next request */
822 };
823
cache_request(struct cache_detail * detail,struct cache_request * crq)824 static int cache_request(struct cache_detail *detail,
825 struct cache_request *crq)
826 {
827 char *bp = crq->buf;
828 int len = PAGE_SIZE;
829
830 detail->cache_request(detail, crq->item, &bp, &len);
831 if (len < 0)
832 return -E2BIG;
833 return PAGE_SIZE - len;
834 }
835
cache_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)836 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
837 loff_t *ppos, struct cache_detail *cd)
838 {
839 struct cache_reader *rp = filp->private_data;
840 struct cache_request *rq;
841 struct inode *inode = file_inode(filp);
842 int err;
843
844 if (count == 0)
845 return 0;
846
847 inode_lock(inode); /* protect against multiple concurrent
848 * readers on this file */
849 again:
850 spin_lock(&queue_lock);
851 /* need to find next request */
852 while (rp->q.list.next != &cd->queue &&
853 list_entry(rp->q.list.next, struct cache_queue, list)
854 ->reader) {
855 struct list_head *next = rp->q.list.next;
856 list_move(&rp->q.list, next);
857 }
858 if (rp->q.list.next == &cd->queue) {
859 spin_unlock(&queue_lock);
860 inode_unlock(inode);
861 WARN_ON_ONCE(rp->offset);
862 return 0;
863 }
864 rq = container_of(rp->q.list.next, struct cache_request, q.list);
865 WARN_ON_ONCE(rq->q.reader);
866 if (rp->offset == 0)
867 rq->readers++;
868 spin_unlock(&queue_lock);
869
870 if (rq->len == 0) {
871 err = cache_request(cd, rq);
872 if (err < 0)
873 goto out;
874 rq->len = err;
875 }
876
877 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
878 err = -EAGAIN;
879 spin_lock(&queue_lock);
880 list_move(&rp->q.list, &rq->q.list);
881 spin_unlock(&queue_lock);
882 } else {
883 if (rp->offset + count > rq->len)
884 count = rq->len - rp->offset;
885 err = -EFAULT;
886 if (copy_to_user(buf, rq->buf + rp->offset, count))
887 goto out;
888 rp->offset += count;
889 if (rp->offset >= rq->len) {
890 rp->offset = 0;
891 spin_lock(&queue_lock);
892 list_move(&rp->q.list, &rq->q.list);
893 spin_unlock(&queue_lock);
894 }
895 err = 0;
896 }
897 out:
898 if (rp->offset == 0) {
899 /* need to release rq */
900 spin_lock(&queue_lock);
901 rq->readers--;
902 if (rq->readers == 0 &&
903 !test_bit(CACHE_PENDING, &rq->item->flags)) {
904 list_del(&rq->q.list);
905 spin_unlock(&queue_lock);
906 cache_put(rq->item, cd);
907 kfree(rq->buf);
908 kfree(rq);
909 } else
910 spin_unlock(&queue_lock);
911 }
912 if (err == -EAGAIN)
913 goto again;
914 inode_unlock(inode);
915 return err ? err : count;
916 }
917
cache_do_downcall(char * kaddr,const char __user * buf,size_t count,struct cache_detail * cd)918 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
919 size_t count, struct cache_detail *cd)
920 {
921 ssize_t ret;
922
923 if (count == 0)
924 return -EINVAL;
925 if (copy_from_user(kaddr, buf, count))
926 return -EFAULT;
927 kaddr[count] = '\0';
928 ret = cd->cache_parse(cd, kaddr, count);
929 if (!ret)
930 ret = count;
931 return ret;
932 }
933
cache_downcall(struct address_space * mapping,const char __user * buf,size_t count,struct cache_detail * cd)934 static ssize_t cache_downcall(struct address_space *mapping,
935 const char __user *buf,
936 size_t count, struct cache_detail *cd)
937 {
938 char *write_buf;
939 ssize_t ret = -ENOMEM;
940
941 if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
942 ret = -EINVAL;
943 goto out;
944 }
945
946 write_buf = kvmalloc(count + 1, GFP_KERNEL);
947 if (!write_buf)
948 goto out;
949
950 ret = cache_do_downcall(write_buf, buf, count, cd);
951 kvfree(write_buf);
952 out:
953 return ret;
954 }
955
cache_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)956 static ssize_t cache_write(struct file *filp, const char __user *buf,
957 size_t count, loff_t *ppos,
958 struct cache_detail *cd)
959 {
960 struct address_space *mapping = filp->f_mapping;
961 struct inode *inode = file_inode(filp);
962 ssize_t ret = -EINVAL;
963
964 if (!cd->cache_parse)
965 goto out;
966
967 inode_lock(inode);
968 ret = cache_downcall(mapping, buf, count, cd);
969 inode_unlock(inode);
970 out:
971 return ret;
972 }
973
974 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
975
cache_poll(struct file * filp,poll_table * wait,struct cache_detail * cd)976 static __poll_t cache_poll(struct file *filp, poll_table *wait,
977 struct cache_detail *cd)
978 {
979 __poll_t mask;
980 struct cache_reader *rp = filp->private_data;
981 struct cache_queue *cq;
982
983 poll_wait(filp, &queue_wait, wait);
984
985 /* alway allow write */
986 mask = EPOLLOUT | EPOLLWRNORM;
987
988 if (!rp)
989 return mask;
990
991 spin_lock(&queue_lock);
992
993 for (cq= &rp->q; &cq->list != &cd->queue;
994 cq = list_entry(cq->list.next, struct cache_queue, list))
995 if (!cq->reader) {
996 mask |= EPOLLIN | EPOLLRDNORM;
997 break;
998 }
999 spin_unlock(&queue_lock);
1000 return mask;
1001 }
1002
cache_ioctl(struct inode * ino,struct file * filp,unsigned int cmd,unsigned long arg,struct cache_detail * cd)1003 static int cache_ioctl(struct inode *ino, struct file *filp,
1004 unsigned int cmd, unsigned long arg,
1005 struct cache_detail *cd)
1006 {
1007 int len = 0;
1008 struct cache_reader *rp = filp->private_data;
1009 struct cache_queue *cq;
1010
1011 if (cmd != FIONREAD || !rp)
1012 return -EINVAL;
1013
1014 spin_lock(&queue_lock);
1015
1016 /* only find the length remaining in current request,
1017 * or the length of the next request
1018 */
1019 for (cq= &rp->q; &cq->list != &cd->queue;
1020 cq = list_entry(cq->list.next, struct cache_queue, list))
1021 if (!cq->reader) {
1022 struct cache_request *cr =
1023 container_of(cq, struct cache_request, q);
1024 len = cr->len - rp->offset;
1025 break;
1026 }
1027 spin_unlock(&queue_lock);
1028
1029 return put_user(len, (int __user *)arg);
1030 }
1031
cache_open(struct inode * inode,struct file * filp,struct cache_detail * cd)1032 static int cache_open(struct inode *inode, struct file *filp,
1033 struct cache_detail *cd)
1034 {
1035 struct cache_reader *rp = NULL;
1036
1037 if (!cd || !try_module_get(cd->owner))
1038 return -EACCES;
1039 nonseekable_open(inode, filp);
1040 if (filp->f_mode & FMODE_READ) {
1041 rp = kmalloc_obj(*rp, GFP_KERNEL);
1042 if (!rp) {
1043 module_put(cd->owner);
1044 return -ENOMEM;
1045 }
1046 rp->offset = 0;
1047 rp->q.reader = 1;
1048
1049 spin_lock(&queue_lock);
1050 list_add(&rp->q.list, &cd->queue);
1051 spin_unlock(&queue_lock);
1052 }
1053 if (filp->f_mode & FMODE_WRITE)
1054 atomic_inc(&cd->writers);
1055 filp->private_data = rp;
1056 return 0;
1057 }
1058
cache_release(struct inode * inode,struct file * filp,struct cache_detail * cd)1059 static int cache_release(struct inode *inode, struct file *filp,
1060 struct cache_detail *cd)
1061 {
1062 struct cache_reader *rp = filp->private_data;
1063
1064 if (rp) {
1065 spin_lock(&queue_lock);
1066 if (rp->offset) {
1067 struct cache_queue *cq;
1068 for (cq= &rp->q; &cq->list != &cd->queue;
1069 cq = list_entry(cq->list.next, struct cache_queue, list))
1070 if (!cq->reader) {
1071 container_of(cq, struct cache_request, q)
1072 ->readers--;
1073 break;
1074 }
1075 rp->offset = 0;
1076 }
1077 list_del(&rp->q.list);
1078 spin_unlock(&queue_lock);
1079
1080 filp->private_data = NULL;
1081 kfree(rp);
1082
1083 }
1084 if (filp->f_mode & FMODE_WRITE) {
1085 atomic_dec(&cd->writers);
1086 cd->last_close = seconds_since_boot();
1087 }
1088 module_put(cd->owner);
1089 return 0;
1090 }
1091
1092
1093
cache_dequeue(struct cache_detail * detail,struct cache_head * ch)1094 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1095 {
1096 struct cache_queue *cq, *tmp;
1097 struct cache_request *cr;
1098 LIST_HEAD(dequeued);
1099
1100 spin_lock(&queue_lock);
1101 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1102 if (!cq->reader) {
1103 cr = container_of(cq, struct cache_request, q);
1104 if (cr->item != ch)
1105 continue;
1106 if (test_bit(CACHE_PENDING, &ch->flags))
1107 /* Lost a race and it is pending again */
1108 break;
1109 if (cr->readers != 0)
1110 continue;
1111 list_move(&cr->q.list, &dequeued);
1112 }
1113 spin_unlock(&queue_lock);
1114 while (!list_empty(&dequeued)) {
1115 cr = list_entry(dequeued.next, struct cache_request, q.list);
1116 list_del(&cr->q.list);
1117 cache_put(cr->item, detail);
1118 kfree(cr->buf);
1119 kfree(cr);
1120 }
1121 }
1122
1123 /*
1124 * Support routines for text-based upcalls.
1125 * Fields are separated by spaces.
1126 * Fields are either mangled to quote space tab newline slosh with slosh
1127 * or a hexified with a leading \x
1128 * Record is terminated with newline.
1129 *
1130 */
1131
qword_add(char ** bpp,int * lp,char * str)1132 void qword_add(char **bpp, int *lp, char *str)
1133 {
1134 char *bp = *bpp;
1135 int len = *lp;
1136 int ret;
1137
1138 if (len < 0) return;
1139
1140 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1141 if (ret >= len) {
1142 bp += len;
1143 len = -1;
1144 } else {
1145 bp += ret;
1146 len -= ret;
1147 *bp++ = ' ';
1148 len--;
1149 }
1150 *bpp = bp;
1151 *lp = len;
1152 }
1153 EXPORT_SYMBOL_GPL(qword_add);
1154
qword_addhex(char ** bpp,int * lp,char * buf,int blen)1155 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1156 {
1157 char *bp = *bpp;
1158 int len = *lp;
1159
1160 if (len < 0) return;
1161
1162 if (len > 2) {
1163 *bp++ = '\\';
1164 *bp++ = 'x';
1165 len -= 2;
1166 while (blen && len >= 2) {
1167 bp = hex_byte_pack(bp, *buf++);
1168 len -= 2;
1169 blen--;
1170 }
1171 }
1172 if (blen || len<1) len = -1;
1173 else {
1174 *bp++ = ' ';
1175 len--;
1176 }
1177 *bpp = bp;
1178 *lp = len;
1179 }
1180 EXPORT_SYMBOL_GPL(qword_addhex);
1181
warn_no_listener(struct cache_detail * detail)1182 static void warn_no_listener(struct cache_detail *detail)
1183 {
1184 if (detail->last_warn != detail->last_close) {
1185 detail->last_warn = detail->last_close;
1186 if (detail->warn_no_listener)
1187 detail->warn_no_listener(detail, detail->last_close != 0);
1188 }
1189 }
1190
cache_listeners_exist(struct cache_detail * detail)1191 static bool cache_listeners_exist(struct cache_detail *detail)
1192 {
1193 if (atomic_read(&detail->writers))
1194 return true;
1195 if (detail->last_close == 0)
1196 /* This cache was never opened */
1197 return false;
1198 if (detail->last_close < seconds_since_boot() - 30)
1199 /*
1200 * We allow for the possibility that someone might
1201 * restart a userspace daemon without restarting the
1202 * server; but after 30 seconds, we give up.
1203 */
1204 return false;
1205 return true;
1206 }
1207
1208 /*
1209 * register an upcall request to user-space and queue it up for read() by the
1210 * upcall daemon.
1211 *
1212 * Each request is at most one page long.
1213 */
cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1214 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1215 {
1216 char *buf;
1217 struct cache_request *crq;
1218 int ret = 0;
1219
1220 if (test_bit(CACHE_CLEANED, &h->flags))
1221 /* Too late to make an upcall */
1222 return -EAGAIN;
1223
1224 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1225 if (!buf)
1226 return -EAGAIN;
1227
1228 crq = kmalloc_obj(*crq, GFP_KERNEL);
1229 if (!crq) {
1230 kfree(buf);
1231 return -EAGAIN;
1232 }
1233
1234 crq->q.reader = 0;
1235 crq->buf = buf;
1236 crq->len = 0;
1237 crq->readers = 0;
1238 spin_lock(&queue_lock);
1239 if (test_bit(CACHE_PENDING, &h->flags)) {
1240 crq->item = cache_get(h);
1241 list_add_tail(&crq->q.list, &detail->queue);
1242 trace_cache_entry_upcall(detail, h);
1243 } else
1244 /* Lost a race, no longer PENDING, so don't enqueue */
1245 ret = -EAGAIN;
1246 spin_unlock(&queue_lock);
1247 wake_up(&queue_wait);
1248 if (ret == -EAGAIN) {
1249 kfree(buf);
1250 kfree(crq);
1251 }
1252 return ret;
1253 }
1254
sunrpc_cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1255 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1256 {
1257 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1258 return 0;
1259 return cache_pipe_upcall(detail, h);
1260 }
1261 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1262
sunrpc_cache_pipe_upcall_timeout(struct cache_detail * detail,struct cache_head * h)1263 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1264 struct cache_head *h)
1265 {
1266 if (!cache_listeners_exist(detail)) {
1267 warn_no_listener(detail);
1268 trace_cache_entry_no_listener(detail, h);
1269 return -EINVAL;
1270 }
1271 return sunrpc_cache_pipe_upcall(detail, h);
1272 }
1273 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1274
1275 /*
1276 * parse a message from user-space and pass it
1277 * to an appropriate cache
1278 * Messages are, like requests, separated into fields by
1279 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1280 *
1281 * Message is
1282 * reply cachename expiry key ... content....
1283 *
1284 * key and content are both parsed by cache
1285 */
1286
qword_get(char ** bpp,char * dest,int bufsize)1287 int qword_get(char **bpp, char *dest, int bufsize)
1288 {
1289 /* return bytes copied, or -1 on error */
1290 char *bp = *bpp;
1291 int len = 0;
1292
1293 while (*bp == ' ') bp++;
1294
1295 if (bp[0] == '\\' && bp[1] == 'x') {
1296 /* HEX STRING */
1297 bp += 2;
1298 while (len < bufsize - 1) {
1299 int h, l;
1300
1301 h = hex_to_bin(bp[0]);
1302 if (h < 0)
1303 break;
1304
1305 l = hex_to_bin(bp[1]);
1306 if (l < 0)
1307 break;
1308
1309 *dest++ = (h << 4) | l;
1310 bp += 2;
1311 len++;
1312 }
1313 } else {
1314 /* text with \nnn octal quoting */
1315 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1316 if (*bp == '\\' &&
1317 isodigit(bp[1]) && (bp[1] <= '3') &&
1318 isodigit(bp[2]) &&
1319 isodigit(bp[3])) {
1320 int byte = (*++bp -'0');
1321 bp++;
1322 byte = (byte << 3) | (*bp++ - '0');
1323 byte = (byte << 3) | (*bp++ - '0');
1324 *dest++ = byte;
1325 len++;
1326 } else {
1327 *dest++ = *bp++;
1328 len++;
1329 }
1330 }
1331 }
1332
1333 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1334 return -1;
1335 while (*bp == ' ') bp++;
1336 *bpp = bp;
1337 *dest = '\0';
1338 return len;
1339 }
1340 EXPORT_SYMBOL_GPL(qword_get);
1341
1342
1343 /*
1344 * support /proc/net/rpc/$CACHENAME/content
1345 * as a seqfile.
1346 * We call ->cache_show passing NULL for the item to
1347 * get a header, then pass each real item in the cache
1348 */
1349
__cache_seq_start(struct seq_file * m,loff_t * pos)1350 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1351 {
1352 loff_t n = *pos;
1353 unsigned int hash, entry;
1354 struct cache_head *ch;
1355 struct cache_detail *cd = m->private;
1356
1357 if (!n--)
1358 return SEQ_START_TOKEN;
1359 hash = n >> 32;
1360 entry = n & ((1LL<<32) - 1);
1361
1362 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1363 if (!entry--)
1364 return ch;
1365 n &= ~((1LL<<32) - 1);
1366 do {
1367 hash++;
1368 n += 1LL<<32;
1369 } while(hash < cd->hash_size &&
1370 hlist_empty(&cd->hash_table[hash]));
1371 if (hash >= cd->hash_size)
1372 return NULL;
1373 *pos = n+1;
1374 return hlist_entry_safe(rcu_dereference_raw(
1375 hlist_first_rcu(&cd->hash_table[hash])),
1376 struct cache_head, cache_list);
1377 }
1378
cache_seq_next(struct seq_file * m,void * p,loff_t * pos)1379 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1380 {
1381 struct cache_head *ch = p;
1382 int hash = (*pos >> 32);
1383 struct cache_detail *cd = m->private;
1384
1385 if (p == SEQ_START_TOKEN)
1386 hash = 0;
1387 else if (ch->cache_list.next == NULL) {
1388 hash++;
1389 *pos += 1LL<<32;
1390 } else {
1391 ++*pos;
1392 return hlist_entry_safe(rcu_dereference_raw(
1393 hlist_next_rcu(&ch->cache_list)),
1394 struct cache_head, cache_list);
1395 }
1396 *pos &= ~((1LL<<32) - 1);
1397 while (hash < cd->hash_size &&
1398 hlist_empty(&cd->hash_table[hash])) {
1399 hash++;
1400 *pos += 1LL<<32;
1401 }
1402 if (hash >= cd->hash_size)
1403 return NULL;
1404 ++*pos;
1405 return hlist_entry_safe(rcu_dereference_raw(
1406 hlist_first_rcu(&cd->hash_table[hash])),
1407 struct cache_head, cache_list);
1408 }
1409
cache_seq_start_rcu(struct seq_file * m,loff_t * pos)1410 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1411 __acquires(RCU)
1412 {
1413 rcu_read_lock();
1414 return __cache_seq_start(m, pos);
1415 }
1416 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1417
cache_seq_next_rcu(struct seq_file * file,void * p,loff_t * pos)1418 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1419 {
1420 return cache_seq_next(file, p, pos);
1421 }
1422 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1423
cache_seq_stop_rcu(struct seq_file * m,void * p)1424 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1425 __releases(RCU)
1426 {
1427 rcu_read_unlock();
1428 }
1429 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1430
c_show(struct seq_file * m,void * p)1431 static int c_show(struct seq_file *m, void *p)
1432 {
1433 struct cache_head *cp = p;
1434 struct cache_detail *cd = m->private;
1435
1436 if (p == SEQ_START_TOKEN)
1437 return cd->cache_show(m, cd, NULL);
1438
1439 ifdebug(CACHE)
1440 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1441 convert_to_wallclock(cp->expiry_time),
1442 kref_read(&cp->ref), cp->flags);
1443
1444 if (cache_check_rcu(cd, cp, NULL))
1445 seq_puts(m, "# ");
1446 else if (cache_is_expired(cd, cp))
1447 seq_puts(m, "# ");
1448
1449 return cd->cache_show(m, cd, cp);
1450 }
1451
1452 static const struct seq_operations cache_content_op = {
1453 .start = cache_seq_start_rcu,
1454 .next = cache_seq_next_rcu,
1455 .stop = cache_seq_stop_rcu,
1456 .show = c_show,
1457 };
1458
content_open(struct inode * inode,struct file * file,struct cache_detail * cd)1459 static int content_open(struct inode *inode, struct file *file,
1460 struct cache_detail *cd)
1461 {
1462 struct seq_file *seq;
1463 int err;
1464
1465 if (!cd || !try_module_get(cd->owner))
1466 return -EACCES;
1467
1468 err = seq_open(file, &cache_content_op);
1469 if (err) {
1470 module_put(cd->owner);
1471 return err;
1472 }
1473
1474 seq = file->private_data;
1475 seq->private = cd;
1476 return 0;
1477 }
1478
content_release(struct inode * inode,struct file * file,struct cache_detail * cd)1479 static int content_release(struct inode *inode, struct file *file,
1480 struct cache_detail *cd)
1481 {
1482 int ret = seq_release(inode, file);
1483 module_put(cd->owner);
1484 return ret;
1485 }
1486
open_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1487 static int open_flush(struct inode *inode, struct file *file,
1488 struct cache_detail *cd)
1489 {
1490 if (!cd || !try_module_get(cd->owner))
1491 return -EACCES;
1492 return nonseekable_open(inode, file);
1493 }
1494
release_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1495 static int release_flush(struct inode *inode, struct file *file,
1496 struct cache_detail *cd)
1497 {
1498 module_put(cd->owner);
1499 return 0;
1500 }
1501
read_flush(struct file * file,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1502 static ssize_t read_flush(struct file *file, char __user *buf,
1503 size_t count, loff_t *ppos,
1504 struct cache_detail *cd)
1505 {
1506 char tbuf[22];
1507 size_t len;
1508
1509 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1510 convert_to_wallclock(cd->flush_time));
1511 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1512 }
1513
write_flush(struct file * file,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1514 static ssize_t write_flush(struct file *file, const char __user *buf,
1515 size_t count, loff_t *ppos,
1516 struct cache_detail *cd)
1517 {
1518 char tbuf[20];
1519 char *ep;
1520 time64_t now;
1521
1522 if (*ppos || count > sizeof(tbuf)-1)
1523 return -EINVAL;
1524 if (copy_from_user(tbuf, buf, count))
1525 return -EFAULT;
1526 tbuf[count] = 0;
1527 simple_strtoul(tbuf, &ep, 0);
1528 if (*ep && *ep != '\n')
1529 return -EINVAL;
1530 /* Note that while we check that 'buf' holds a valid number,
1531 * we always ignore the value and just flush everything.
1532 * Making use of the number leads to races.
1533 */
1534
1535 now = seconds_since_boot();
1536 /* Always flush everything, so behave like cache_purge()
1537 * Do this by advancing flush_time to the current time,
1538 * or by one second if it has already reached the current time.
1539 * Newly added cache entries will always have ->last_refresh greater
1540 * that ->flush_time, so they don't get flushed prematurely.
1541 */
1542
1543 if (cd->flush_time >= now)
1544 now = cd->flush_time + 1;
1545
1546 cd->flush_time = now;
1547 cd->nextcheck = now;
1548 cache_flush();
1549
1550 if (cd->flush)
1551 cd->flush();
1552
1553 *ppos += count;
1554 return count;
1555 }
1556
cache_read_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1557 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1558 size_t count, loff_t *ppos)
1559 {
1560 struct cache_detail *cd = pde_data(file_inode(filp));
1561
1562 return cache_read(filp, buf, count, ppos, cd);
1563 }
1564
cache_write_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1565 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1566 size_t count, loff_t *ppos)
1567 {
1568 struct cache_detail *cd = pde_data(file_inode(filp));
1569
1570 return cache_write(filp, buf, count, ppos, cd);
1571 }
1572
cache_poll_procfs(struct file * filp,poll_table * wait)1573 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1574 {
1575 struct cache_detail *cd = pde_data(file_inode(filp));
1576
1577 return cache_poll(filp, wait, cd);
1578 }
1579
cache_ioctl_procfs(struct file * filp,unsigned int cmd,unsigned long arg)1580 static long cache_ioctl_procfs(struct file *filp,
1581 unsigned int cmd, unsigned long arg)
1582 {
1583 struct inode *inode = file_inode(filp);
1584 struct cache_detail *cd = pde_data(inode);
1585
1586 return cache_ioctl(inode, filp, cmd, arg, cd);
1587 }
1588
cache_open_procfs(struct inode * inode,struct file * filp)1589 static int cache_open_procfs(struct inode *inode, struct file *filp)
1590 {
1591 struct cache_detail *cd = pde_data(inode);
1592
1593 return cache_open(inode, filp, cd);
1594 }
1595
cache_release_procfs(struct inode * inode,struct file * filp)1596 static int cache_release_procfs(struct inode *inode, struct file *filp)
1597 {
1598 struct cache_detail *cd = pde_data(inode);
1599
1600 return cache_release(inode, filp, cd);
1601 }
1602
1603 static const struct proc_ops cache_channel_proc_ops = {
1604 .proc_read = cache_read_procfs,
1605 .proc_write = cache_write_procfs,
1606 .proc_poll = cache_poll_procfs,
1607 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1608 .proc_open = cache_open_procfs,
1609 .proc_release = cache_release_procfs,
1610 };
1611
content_open_procfs(struct inode * inode,struct file * filp)1612 static int content_open_procfs(struct inode *inode, struct file *filp)
1613 {
1614 struct cache_detail *cd = pde_data(inode);
1615
1616 return content_open(inode, filp, cd);
1617 }
1618
content_release_procfs(struct inode * inode,struct file * filp)1619 static int content_release_procfs(struct inode *inode, struct file *filp)
1620 {
1621 struct cache_detail *cd = pde_data(inode);
1622
1623 return content_release(inode, filp, cd);
1624 }
1625
1626 static const struct proc_ops content_proc_ops = {
1627 .proc_open = content_open_procfs,
1628 .proc_read = seq_read,
1629 .proc_lseek = seq_lseek,
1630 .proc_release = content_release_procfs,
1631 };
1632
open_flush_procfs(struct inode * inode,struct file * filp)1633 static int open_flush_procfs(struct inode *inode, struct file *filp)
1634 {
1635 struct cache_detail *cd = pde_data(inode);
1636
1637 return open_flush(inode, filp, cd);
1638 }
1639
release_flush_procfs(struct inode * inode,struct file * filp)1640 static int release_flush_procfs(struct inode *inode, struct file *filp)
1641 {
1642 struct cache_detail *cd = pde_data(inode);
1643
1644 return release_flush(inode, filp, cd);
1645 }
1646
read_flush_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1647 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1648 size_t count, loff_t *ppos)
1649 {
1650 struct cache_detail *cd = pde_data(file_inode(filp));
1651
1652 return read_flush(filp, buf, count, ppos, cd);
1653 }
1654
write_flush_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1655 static ssize_t write_flush_procfs(struct file *filp,
1656 const char __user *buf,
1657 size_t count, loff_t *ppos)
1658 {
1659 struct cache_detail *cd = pde_data(file_inode(filp));
1660
1661 return write_flush(filp, buf, count, ppos, cd);
1662 }
1663
1664 static const struct proc_ops cache_flush_proc_ops = {
1665 .proc_open = open_flush_procfs,
1666 .proc_read = read_flush_procfs,
1667 .proc_write = write_flush_procfs,
1668 .proc_release = release_flush_procfs,
1669 };
1670
remove_cache_proc_entries(struct cache_detail * cd)1671 static void remove_cache_proc_entries(struct cache_detail *cd)
1672 {
1673 if (cd->procfs) {
1674 proc_remove(cd->procfs);
1675 cd->procfs = NULL;
1676 }
1677 }
1678
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1679 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1680 {
1681 struct proc_dir_entry *p;
1682 struct sunrpc_net *sn;
1683
1684 if (!IS_ENABLED(CONFIG_PROC_FS))
1685 return 0;
1686
1687 sn = net_generic(net, sunrpc_net_id);
1688 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1689 if (cd->procfs == NULL)
1690 goto out_nomem;
1691
1692 p = proc_create_data("flush", S_IFREG | 0600,
1693 cd->procfs, &cache_flush_proc_ops, cd);
1694 if (p == NULL)
1695 goto out_nomem;
1696
1697 if (cd->cache_request || cd->cache_parse) {
1698 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1699 &cache_channel_proc_ops, cd);
1700 if (p == NULL)
1701 goto out_nomem;
1702 }
1703 if (cd->cache_show) {
1704 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1705 &content_proc_ops, cd);
1706 if (p == NULL)
1707 goto out_nomem;
1708 }
1709 return 0;
1710 out_nomem:
1711 remove_cache_proc_entries(cd);
1712 return -ENOMEM;
1713 }
1714
cache_initialize(void)1715 void __init cache_initialize(void)
1716 {
1717 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1718 }
1719
cache_register_net(struct cache_detail * cd,struct net * net)1720 int cache_register_net(struct cache_detail *cd, struct net *net)
1721 {
1722 int ret;
1723
1724 sunrpc_init_cache_detail(cd);
1725 ret = create_cache_proc_entries(cd, net);
1726 if (ret)
1727 sunrpc_destroy_cache_detail(cd);
1728 return ret;
1729 }
1730 EXPORT_SYMBOL_GPL(cache_register_net);
1731
cache_unregister_net(struct cache_detail * cd,struct net * net)1732 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1733 {
1734 remove_cache_proc_entries(cd);
1735 sunrpc_destroy_cache_detail(cd);
1736 }
1737 EXPORT_SYMBOL_GPL(cache_unregister_net);
1738
cache_create_net(const struct cache_detail * tmpl,struct net * net)1739 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1740 {
1741 struct cache_detail *cd;
1742 int i;
1743
1744 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1745 if (cd == NULL)
1746 return ERR_PTR(-ENOMEM);
1747
1748 cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size,
1749 GFP_KERNEL);
1750 if (cd->hash_table == NULL) {
1751 kfree(cd);
1752 return ERR_PTR(-ENOMEM);
1753 }
1754
1755 for (i = 0; i < cd->hash_size; i++)
1756 INIT_HLIST_HEAD(&cd->hash_table[i]);
1757 cd->net = net;
1758 return cd;
1759 }
1760 EXPORT_SYMBOL_GPL(cache_create_net);
1761
cache_destroy_net(struct cache_detail * cd,struct net * net)1762 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1763 {
1764 kfree(cd->hash_table);
1765 kfree(cd);
1766 }
1767 EXPORT_SYMBOL_GPL(cache_destroy_net);
1768
cache_read_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1769 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1770 size_t count, loff_t *ppos)
1771 {
1772 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1773
1774 return cache_read(filp, buf, count, ppos, cd);
1775 }
1776
cache_write_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1777 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1778 size_t count, loff_t *ppos)
1779 {
1780 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1781
1782 return cache_write(filp, buf, count, ppos, cd);
1783 }
1784
cache_poll_pipefs(struct file * filp,poll_table * wait)1785 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1786 {
1787 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1788
1789 return cache_poll(filp, wait, cd);
1790 }
1791
cache_ioctl_pipefs(struct file * filp,unsigned int cmd,unsigned long arg)1792 static long cache_ioctl_pipefs(struct file *filp,
1793 unsigned int cmd, unsigned long arg)
1794 {
1795 struct inode *inode = file_inode(filp);
1796 struct cache_detail *cd = RPC_I(inode)->private;
1797
1798 return cache_ioctl(inode, filp, cmd, arg, cd);
1799 }
1800
cache_open_pipefs(struct inode * inode,struct file * filp)1801 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1802 {
1803 struct cache_detail *cd = RPC_I(inode)->private;
1804
1805 return cache_open(inode, filp, cd);
1806 }
1807
cache_release_pipefs(struct inode * inode,struct file * filp)1808 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1809 {
1810 struct cache_detail *cd = RPC_I(inode)->private;
1811
1812 return cache_release(inode, filp, cd);
1813 }
1814
1815 const struct file_operations cache_file_operations_pipefs = {
1816 .owner = THIS_MODULE,
1817 .read = cache_read_pipefs,
1818 .write = cache_write_pipefs,
1819 .poll = cache_poll_pipefs,
1820 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1821 .open = cache_open_pipefs,
1822 .release = cache_release_pipefs,
1823 };
1824
content_open_pipefs(struct inode * inode,struct file * filp)1825 static int content_open_pipefs(struct inode *inode, struct file *filp)
1826 {
1827 struct cache_detail *cd = RPC_I(inode)->private;
1828
1829 return content_open(inode, filp, cd);
1830 }
1831
content_release_pipefs(struct inode * inode,struct file * filp)1832 static int content_release_pipefs(struct inode *inode, struct file *filp)
1833 {
1834 struct cache_detail *cd = RPC_I(inode)->private;
1835
1836 return content_release(inode, filp, cd);
1837 }
1838
1839 const struct file_operations content_file_operations_pipefs = {
1840 .open = content_open_pipefs,
1841 .read = seq_read,
1842 .llseek = seq_lseek,
1843 .release = content_release_pipefs,
1844 };
1845
open_flush_pipefs(struct inode * inode,struct file * filp)1846 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1847 {
1848 struct cache_detail *cd = RPC_I(inode)->private;
1849
1850 return open_flush(inode, filp, cd);
1851 }
1852
release_flush_pipefs(struct inode * inode,struct file * filp)1853 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1854 {
1855 struct cache_detail *cd = RPC_I(inode)->private;
1856
1857 return release_flush(inode, filp, cd);
1858 }
1859
read_flush_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1860 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1861 size_t count, loff_t *ppos)
1862 {
1863 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1864
1865 return read_flush(filp, buf, count, ppos, cd);
1866 }
1867
write_flush_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1868 static ssize_t write_flush_pipefs(struct file *filp,
1869 const char __user *buf,
1870 size_t count, loff_t *ppos)
1871 {
1872 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1873
1874 return write_flush(filp, buf, count, ppos, cd);
1875 }
1876
1877 const struct file_operations cache_flush_operations_pipefs = {
1878 .open = open_flush_pipefs,
1879 .read = read_flush_pipefs,
1880 .write = write_flush_pipefs,
1881 .release = release_flush_pipefs,
1882 };
1883
sunrpc_cache_register_pipefs(struct dentry * parent,const char * name,umode_t umode,struct cache_detail * cd)1884 int sunrpc_cache_register_pipefs(struct dentry *parent,
1885 const char *name, umode_t umode,
1886 struct cache_detail *cd)
1887 {
1888 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1889 if (IS_ERR(dir))
1890 return PTR_ERR(dir);
1891 cd->pipefs = dir;
1892 return 0;
1893 }
1894 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1895
sunrpc_cache_unregister_pipefs(struct cache_detail * cd)1896 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1897 {
1898 if (cd->pipefs) {
1899 rpc_remove_cache_dir(cd->pipefs);
1900 cd->pipefs = NULL;
1901 }
1902 }
1903 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1904
sunrpc_cache_unhash(struct cache_detail * cd,struct cache_head * h)1905 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1906 {
1907 spin_lock(&cd->hash_lock);
1908 if (!hlist_unhashed(&h->cache_list)){
1909 sunrpc_begin_cache_remove_entry(h, cd);
1910 spin_unlock(&cd->hash_lock);
1911 sunrpc_end_cache_remove_entry(h, cd);
1912 } else
1913 spin_unlock(&cd->hash_lock);
1914 }
1915 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1916