xref: /linux/net/sunrpc/cache.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 
35 #define	 RPCDBG_FACILITY RPCDBG_CACHE
36 
37 static void cache_defer_req(struct cache_req *req, struct cache_head *item);
38 static void cache_revisit_request(struct cache_head *item);
39 
40 static void cache_init(struct cache_head *h)
41 {
42 	time_t now = get_seconds();
43 	h->next = NULL;
44 	h->flags = 0;
45 	kref_init(&h->ref);
46 	h->expiry_time = now + CACHE_NEW_EXPIRY;
47 	h->last_refresh = now;
48 }
49 
50 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51 				       struct cache_head *key, int hash)
52 {
53 	struct cache_head **head,  **hp;
54 	struct cache_head *new = NULL;
55 
56 	head = &detail->hash_table[hash];
57 
58 	read_lock(&detail->hash_lock);
59 
60 	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61 		struct cache_head *tmp = *hp;
62 		if (detail->match(tmp, key)) {
63 			cache_get(tmp);
64 			read_unlock(&detail->hash_lock);
65 			return tmp;
66 		}
67 	}
68 	read_unlock(&detail->hash_lock);
69 	/* Didn't find anything, insert an empty entry */
70 
71 	new = detail->alloc();
72 	if (!new)
73 		return NULL;
74 	cache_init(new);
75 
76 	write_lock(&detail->hash_lock);
77 
78 	/* check if entry appeared while we slept */
79 	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
80 		struct cache_head *tmp = *hp;
81 		if (detail->match(tmp, key)) {
82 			cache_get(tmp);
83 			write_unlock(&detail->hash_lock);
84 			cache_put(new, detail);
85 			return tmp;
86 		}
87 	}
88 	detail->init(new, key);
89 	new->next = *head;
90 	*head = new;
91 	detail->entries++;
92 	cache_get(new);
93 	write_unlock(&detail->hash_lock);
94 
95 	return new;
96 }
97 EXPORT_SYMBOL(sunrpc_cache_lookup);
98 
99 
100 static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
101 
102 static int cache_fresh_locked(struct cache_head *head, time_t expiry)
103 {
104 	head->expiry_time = expiry;
105 	head->last_refresh = get_seconds();
106 	return !test_and_set_bit(CACHE_VALID, &head->flags);
107 }
108 
109 static void cache_fresh_unlocked(struct cache_head *head,
110 			struct cache_detail *detail, int new)
111 {
112 	if (new)
113 		cache_revisit_request(head);
114 	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
115 		cache_revisit_request(head);
116 		queue_loose(detail, head);
117 	}
118 }
119 
120 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
121 				       struct cache_head *new, struct cache_head *old, int hash)
122 {
123 	/* The 'old' entry is to be replaced by 'new'.
124 	 * If 'old' is not VALID, we update it directly,
125 	 * otherwise we need to replace it
126 	 */
127 	struct cache_head **head;
128 	struct cache_head *tmp;
129 	int is_new;
130 
131 	if (!test_bit(CACHE_VALID, &old->flags)) {
132 		write_lock(&detail->hash_lock);
133 		if (!test_bit(CACHE_VALID, &old->flags)) {
134 			if (test_bit(CACHE_NEGATIVE, &new->flags))
135 				set_bit(CACHE_NEGATIVE, &old->flags);
136 			else
137 				detail->update(old, new);
138 			is_new = cache_fresh_locked(old, new->expiry_time);
139 			write_unlock(&detail->hash_lock);
140 			cache_fresh_unlocked(old, detail, is_new);
141 			return old;
142 		}
143 		write_unlock(&detail->hash_lock);
144 	}
145 	/* We need to insert a new entry */
146 	tmp = detail->alloc();
147 	if (!tmp) {
148 		cache_put(old, detail);
149 		return NULL;
150 	}
151 	cache_init(tmp);
152 	detail->init(tmp, old);
153 	head = &detail->hash_table[hash];
154 
155 	write_lock(&detail->hash_lock);
156 	if (test_bit(CACHE_NEGATIVE, &new->flags))
157 		set_bit(CACHE_NEGATIVE, &tmp->flags);
158 	else
159 		detail->update(tmp, new);
160 	tmp->next = *head;
161 	*head = tmp;
162 	cache_get(tmp);
163 	is_new = cache_fresh_locked(tmp, new->expiry_time);
164 	cache_fresh_locked(old, 0);
165 	write_unlock(&detail->hash_lock);
166 	cache_fresh_unlocked(tmp, detail, is_new);
167 	cache_fresh_unlocked(old, detail, 0);
168 	cache_put(old, detail);
169 	return tmp;
170 }
171 EXPORT_SYMBOL(sunrpc_cache_update);
172 
173 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
174 /*
175  * This is the generic cache management routine for all
176  * the authentication caches.
177  * It checks the currency of a cache item and will (later)
178  * initiate an upcall to fill it if needed.
179  *
180  *
181  * Returns 0 if the cache_head can be used, or cache_puts it and returns
182  * -EAGAIN if upcall is pending,
183  * -ENOENT if cache entry was negative
184  */
185 int cache_check(struct cache_detail *detail,
186 		    struct cache_head *h, struct cache_req *rqstp)
187 {
188 	int rv;
189 	long refresh_age, age;
190 
191 	/* First decide return status as best we can */
192 	if (!test_bit(CACHE_VALID, &h->flags) ||
193 	    h->expiry_time < get_seconds())
194 		rv = -EAGAIN;
195 	else if (detail->flush_time > h->last_refresh)
196 		rv = -EAGAIN;
197 	else {
198 		/* entry is valid */
199 		if (test_bit(CACHE_NEGATIVE, &h->flags))
200 			rv = -ENOENT;
201 		else rv = 0;
202 	}
203 
204 	/* now see if we want to start an upcall */
205 	refresh_age = (h->expiry_time - h->last_refresh);
206 	age = get_seconds() - h->last_refresh;
207 
208 	if (rqstp == NULL) {
209 		if (rv == -EAGAIN)
210 			rv = -ENOENT;
211 	} else if (rv == -EAGAIN || age > refresh_age/2) {
212 		dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age);
213 		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
214 			switch (cache_make_upcall(detail, h)) {
215 			case -EINVAL:
216 				clear_bit(CACHE_PENDING, &h->flags);
217 				if (rv == -EAGAIN) {
218 					set_bit(CACHE_NEGATIVE, &h->flags);
219 					cache_fresh_unlocked(h, detail,
220 					     cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
221 					rv = -ENOENT;
222 				}
223 				break;
224 
225 			case -EAGAIN:
226 				clear_bit(CACHE_PENDING, &h->flags);
227 				cache_revisit_request(h);
228 				break;
229 			}
230 		}
231 	}
232 
233 	if (rv == -EAGAIN)
234 		cache_defer_req(rqstp, h);
235 
236 	if (rv)
237 		cache_put(h, detail);
238 	return rv;
239 }
240 
241 /*
242  * caches need to be periodically cleaned.
243  * For this we maintain a list of cache_detail and
244  * a current pointer into that list and into the table
245  * for that entry.
246  *
247  * Each time clean_cache is called it finds the next non-empty entry
248  * in the current table and walks the list in that entry
249  * looking for entries that can be removed.
250  *
251  * An entry gets removed if:
252  * - The expiry is before current time
253  * - The last_refresh time is before the flush_time for that cache
254  *
255  * later we might drop old entries with non-NEVER expiry if that table
256  * is getting 'full' for some definition of 'full'
257  *
258  * The question of "how often to scan a table" is an interesting one
259  * and is answered in part by the use of the "nextcheck" field in the
260  * cache_detail.
261  * When a scan of a table begins, the nextcheck field is set to a time
262  * that is well into the future.
263  * While scanning, if an expiry time is found that is earlier than the
264  * current nextcheck time, nextcheck is set to that expiry time.
265  * If the flush_time is ever set to a time earlier than the nextcheck
266  * time, the nextcheck time is then set to that flush_time.
267  *
268  * A table is then only scanned if the current time is at least
269  * the nextcheck time.
270  *
271  */
272 
273 static LIST_HEAD(cache_list);
274 static DEFINE_SPINLOCK(cache_list_lock);
275 static struct cache_detail *current_detail;
276 static int current_index;
277 
278 static struct file_operations cache_file_operations;
279 static struct file_operations content_file_operations;
280 static struct file_operations cache_flush_operations;
281 
282 static void do_cache_clean(void *data);
283 static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
284 
285 void cache_register(struct cache_detail *cd)
286 {
287 	cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
288 	if (cd->proc_ent) {
289 		struct proc_dir_entry *p;
290 		cd->proc_ent->owner = cd->owner;
291 		cd->channel_ent = cd->content_ent = NULL;
292 
293  		p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
294  				      cd->proc_ent);
295 		cd->flush_ent =  p;
296  		if (p) {
297  			p->proc_fops = &cache_flush_operations;
298  			p->owner = cd->owner;
299  			p->data = cd;
300  		}
301 
302 		if (cd->cache_request || cd->cache_parse) {
303 			p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
304 					      cd->proc_ent);
305 			cd->channel_ent = p;
306 			if (p) {
307 				p->proc_fops = &cache_file_operations;
308 				p->owner = cd->owner;
309 				p->data = cd;
310 			}
311 		}
312  		if (cd->cache_show) {
313  			p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
314  					      cd->proc_ent);
315 			cd->content_ent = p;
316  			if (p) {
317  				p->proc_fops = &content_file_operations;
318  				p->owner = cd->owner;
319  				p->data = cd;
320  			}
321  		}
322 	}
323 	rwlock_init(&cd->hash_lock);
324 	INIT_LIST_HEAD(&cd->queue);
325 	spin_lock(&cache_list_lock);
326 	cd->nextcheck = 0;
327 	cd->entries = 0;
328 	atomic_set(&cd->readers, 0);
329 	cd->last_close = 0;
330 	cd->last_warn = -1;
331 	list_add(&cd->others, &cache_list);
332 	spin_unlock(&cache_list_lock);
333 
334 	/* start the cleaning process */
335 	schedule_work(&cache_cleaner);
336 }
337 
338 int cache_unregister(struct cache_detail *cd)
339 {
340 	cache_purge(cd);
341 	spin_lock(&cache_list_lock);
342 	write_lock(&cd->hash_lock);
343 	if (cd->entries || atomic_read(&cd->inuse)) {
344 		write_unlock(&cd->hash_lock);
345 		spin_unlock(&cache_list_lock);
346 		return -EBUSY;
347 	}
348 	if (current_detail == cd)
349 		current_detail = NULL;
350 	list_del_init(&cd->others);
351 	write_unlock(&cd->hash_lock);
352 	spin_unlock(&cache_list_lock);
353 	if (cd->proc_ent) {
354 		if (cd->flush_ent)
355 			remove_proc_entry("flush", cd->proc_ent);
356 		if (cd->channel_ent)
357 			remove_proc_entry("channel", cd->proc_ent);
358 		if (cd->content_ent)
359 			remove_proc_entry("content", cd->proc_ent);
360 
361 		cd->proc_ent = NULL;
362 		remove_proc_entry(cd->name, proc_net_rpc);
363 	}
364 	if (list_empty(&cache_list)) {
365 		/* module must be being unloaded so its safe to kill the worker */
366 		cancel_delayed_work(&cache_cleaner);
367 		flush_scheduled_work();
368 	}
369 	return 0;
370 }
371 
372 /* clean cache tries to find something to clean
373  * and cleans it.
374  * It returns 1 if it cleaned something,
375  *            0 if it didn't find anything this time
376  *           -1 if it fell off the end of the list.
377  */
378 static int cache_clean(void)
379 {
380 	int rv = 0;
381 	struct list_head *next;
382 
383 	spin_lock(&cache_list_lock);
384 
385 	/* find a suitable table if we don't already have one */
386 	while (current_detail == NULL ||
387 	    current_index >= current_detail->hash_size) {
388 		if (current_detail)
389 			next = current_detail->others.next;
390 		else
391 			next = cache_list.next;
392 		if (next == &cache_list) {
393 			current_detail = NULL;
394 			spin_unlock(&cache_list_lock);
395 			return -1;
396 		}
397 		current_detail = list_entry(next, struct cache_detail, others);
398 		if (current_detail->nextcheck > get_seconds())
399 			current_index = current_detail->hash_size;
400 		else {
401 			current_index = 0;
402 			current_detail->nextcheck = get_seconds()+30*60;
403 		}
404 	}
405 
406 	/* find a non-empty bucket in the table */
407 	while (current_detail &&
408 	       current_index < current_detail->hash_size &&
409 	       current_detail->hash_table[current_index] == NULL)
410 		current_index++;
411 
412 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
413 
414 	if (current_detail && current_index < current_detail->hash_size) {
415 		struct cache_head *ch, **cp;
416 		struct cache_detail *d;
417 
418 		write_lock(&current_detail->hash_lock);
419 
420 		/* Ok, now to clean this strand */
421 
422 		cp = & current_detail->hash_table[current_index];
423 		ch = *cp;
424 		for (; ch; cp= & ch->next, ch= *cp) {
425 			if (current_detail->nextcheck > ch->expiry_time)
426 				current_detail->nextcheck = ch->expiry_time+1;
427 			if (ch->expiry_time >= get_seconds()
428 			    && ch->last_refresh >= current_detail->flush_time
429 				)
430 				continue;
431 			if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
432 				queue_loose(current_detail, ch);
433 
434 			if (atomic_read(&ch->ref.refcount) == 1)
435 				break;
436 		}
437 		if (ch) {
438 			*cp = ch->next;
439 			ch->next = NULL;
440 			current_detail->entries--;
441 			rv = 1;
442 		}
443 		write_unlock(&current_detail->hash_lock);
444 		d = current_detail;
445 		if (!ch)
446 			current_index ++;
447 		spin_unlock(&cache_list_lock);
448 		if (ch)
449 			cache_put(ch, d);
450 	} else
451 		spin_unlock(&cache_list_lock);
452 
453 	return rv;
454 }
455 
456 /*
457  * We want to regularly clean the cache, so we need to schedule some work ...
458  */
459 static void do_cache_clean(void *data)
460 {
461 	int delay = 5;
462 	if (cache_clean() == -1)
463 		delay = 30*HZ;
464 
465 	if (list_empty(&cache_list))
466 		delay = 0;
467 
468 	if (delay)
469 		schedule_delayed_work(&cache_cleaner, delay);
470 }
471 
472 
473 /*
474  * Clean all caches promptly.  This just calls cache_clean
475  * repeatedly until we are sure that every cache has had a chance to
476  * be fully cleaned
477  */
478 void cache_flush(void)
479 {
480 	while (cache_clean() != -1)
481 		cond_resched();
482 	while (cache_clean() != -1)
483 		cond_resched();
484 }
485 
486 void cache_purge(struct cache_detail *detail)
487 {
488 	detail->flush_time = LONG_MAX;
489 	detail->nextcheck = get_seconds();
490 	cache_flush();
491 	detail->flush_time = 1;
492 }
493 
494 
495 
496 /*
497  * Deferral and Revisiting of Requests.
498  *
499  * If a cache lookup finds a pending entry, we
500  * need to defer the request and revisit it later.
501  * All deferred requests are stored in a hash table,
502  * indexed by "struct cache_head *".
503  * As it may be wasteful to store a whole request
504  * structure, we allow the request to provide a
505  * deferred form, which must contain a
506  * 'struct cache_deferred_req'
507  * This cache_deferred_req contains a method to allow
508  * it to be revisited when cache info is available
509  */
510 
511 #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
512 #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
513 
514 #define	DFR_MAX	300	/* ??? */
515 
516 static DEFINE_SPINLOCK(cache_defer_lock);
517 static LIST_HEAD(cache_defer_list);
518 static struct list_head cache_defer_hash[DFR_HASHSIZE];
519 static int cache_defer_cnt;
520 
521 static void cache_defer_req(struct cache_req *req, struct cache_head *item)
522 {
523 	struct cache_deferred_req *dreq;
524 	int hash = DFR_HASH(item);
525 
526 	dreq = req->defer(req);
527 	if (dreq == NULL)
528 		return;
529 
530 	dreq->item = item;
531 	dreq->recv_time = get_seconds();
532 
533 	spin_lock(&cache_defer_lock);
534 
535 	list_add(&dreq->recent, &cache_defer_list);
536 
537 	if (cache_defer_hash[hash].next == NULL)
538 		INIT_LIST_HEAD(&cache_defer_hash[hash]);
539 	list_add(&dreq->hash, &cache_defer_hash[hash]);
540 
541 	/* it is in, now maybe clean up */
542 	dreq = NULL;
543 	if (++cache_defer_cnt > DFR_MAX) {
544 		/* too much in the cache, randomly drop
545 		 * first or last
546 		 */
547 		if (net_random()&1)
548 			dreq = list_entry(cache_defer_list.next,
549 					  struct cache_deferred_req,
550 					  recent);
551 		else
552 			dreq = list_entry(cache_defer_list.prev,
553 					  struct cache_deferred_req,
554 					  recent);
555 		list_del(&dreq->recent);
556 		list_del(&dreq->hash);
557 		cache_defer_cnt--;
558 	}
559 	spin_unlock(&cache_defer_lock);
560 
561 	if (dreq) {
562 		/* there was one too many */
563 		dreq->revisit(dreq, 1);
564 	}
565 	if (!test_bit(CACHE_PENDING, &item->flags)) {
566 		/* must have just been validated... */
567 		cache_revisit_request(item);
568 	}
569 }
570 
571 static void cache_revisit_request(struct cache_head *item)
572 {
573 	struct cache_deferred_req *dreq;
574 	struct list_head pending;
575 
576 	struct list_head *lp;
577 	int hash = DFR_HASH(item);
578 
579 	INIT_LIST_HEAD(&pending);
580 	spin_lock(&cache_defer_lock);
581 
582 	lp = cache_defer_hash[hash].next;
583 	if (lp) {
584 		while (lp != &cache_defer_hash[hash]) {
585 			dreq = list_entry(lp, struct cache_deferred_req, hash);
586 			lp = lp->next;
587 			if (dreq->item == item) {
588 				list_del(&dreq->hash);
589 				list_move(&dreq->recent, &pending);
590 				cache_defer_cnt--;
591 			}
592 		}
593 	}
594 	spin_unlock(&cache_defer_lock);
595 
596 	while (!list_empty(&pending)) {
597 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
598 		list_del_init(&dreq->recent);
599 		dreq->revisit(dreq, 0);
600 	}
601 }
602 
603 void cache_clean_deferred(void *owner)
604 {
605 	struct cache_deferred_req *dreq, *tmp;
606 	struct list_head pending;
607 
608 
609 	INIT_LIST_HEAD(&pending);
610 	spin_lock(&cache_defer_lock);
611 
612 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
613 		if (dreq->owner == owner) {
614 			list_del(&dreq->hash);
615 			list_move(&dreq->recent, &pending);
616 			cache_defer_cnt--;
617 		}
618 	}
619 	spin_unlock(&cache_defer_lock);
620 
621 	while (!list_empty(&pending)) {
622 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
623 		list_del_init(&dreq->recent);
624 		dreq->revisit(dreq, 1);
625 	}
626 }
627 
628 /*
629  * communicate with user-space
630  *
631  * We have a magic /proc file - /proc/sunrpc/cache
632  * On read, you get a full request, or block
633  * On write, an update request is processed
634  * Poll works if anything to read, and always allows write
635  *
636  * Implemented by linked list of requests.  Each open file has
637  * a ->private that also exists in this list.  New request are added
638  * to the end and may wakeup and preceding readers.
639  * New readers are added to the head.  If, on read, an item is found with
640  * CACHE_UPCALLING clear, we free it from the list.
641  *
642  */
643 
644 static DEFINE_SPINLOCK(queue_lock);
645 static DEFINE_MUTEX(queue_io_mutex);
646 
647 struct cache_queue {
648 	struct list_head	list;
649 	int			reader;	/* if 0, then request */
650 };
651 struct cache_request {
652 	struct cache_queue	q;
653 	struct cache_head	*item;
654 	char			* buf;
655 	int			len;
656 	int			readers;
657 };
658 struct cache_reader {
659 	struct cache_queue	q;
660 	int			offset;	/* if non-0, we have a refcnt on next request */
661 };
662 
663 static ssize_t
664 cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
665 {
666 	struct cache_reader *rp = filp->private_data;
667 	struct cache_request *rq;
668 	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
669 	int err;
670 
671 	if (count == 0)
672 		return 0;
673 
674 	mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
675 			      * readers on this file */
676  again:
677 	spin_lock(&queue_lock);
678 	/* need to find next request */
679 	while (rp->q.list.next != &cd->queue &&
680 	       list_entry(rp->q.list.next, struct cache_queue, list)
681 	       ->reader) {
682 		struct list_head *next = rp->q.list.next;
683 		list_move(&rp->q.list, next);
684 	}
685 	if (rp->q.list.next == &cd->queue) {
686 		spin_unlock(&queue_lock);
687 		mutex_unlock(&queue_io_mutex);
688 		BUG_ON(rp->offset);
689 		return 0;
690 	}
691 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
692 	BUG_ON(rq->q.reader);
693 	if (rp->offset == 0)
694 		rq->readers++;
695 	spin_unlock(&queue_lock);
696 
697 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
698 		err = -EAGAIN;
699 		spin_lock(&queue_lock);
700 		list_move(&rp->q.list, &rq->q.list);
701 		spin_unlock(&queue_lock);
702 	} else {
703 		if (rp->offset + count > rq->len)
704 			count = rq->len - rp->offset;
705 		err = -EFAULT;
706 		if (copy_to_user(buf, rq->buf + rp->offset, count))
707 			goto out;
708 		rp->offset += count;
709 		if (rp->offset >= rq->len) {
710 			rp->offset = 0;
711 			spin_lock(&queue_lock);
712 			list_move(&rp->q.list, &rq->q.list);
713 			spin_unlock(&queue_lock);
714 		}
715 		err = 0;
716 	}
717  out:
718 	if (rp->offset == 0) {
719 		/* need to release rq */
720 		spin_lock(&queue_lock);
721 		rq->readers--;
722 		if (rq->readers == 0 &&
723 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
724 			list_del(&rq->q.list);
725 			spin_unlock(&queue_lock);
726 			cache_put(rq->item, cd);
727 			kfree(rq->buf);
728 			kfree(rq);
729 		} else
730 			spin_unlock(&queue_lock);
731 	}
732 	if (err == -EAGAIN)
733 		goto again;
734 	mutex_unlock(&queue_io_mutex);
735 	return err ? err :  count;
736 }
737 
738 static char write_buf[8192]; /* protected by queue_io_mutex */
739 
740 static ssize_t
741 cache_write(struct file *filp, const char __user *buf, size_t count,
742 	    loff_t *ppos)
743 {
744 	int err;
745 	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
746 
747 	if (count == 0)
748 		return 0;
749 	if (count >= sizeof(write_buf))
750 		return -EINVAL;
751 
752 	mutex_lock(&queue_io_mutex);
753 
754 	if (copy_from_user(write_buf, buf, count)) {
755 		mutex_unlock(&queue_io_mutex);
756 		return -EFAULT;
757 	}
758 	write_buf[count] = '\0';
759 	if (cd->cache_parse)
760 		err = cd->cache_parse(cd, write_buf, count);
761 	else
762 		err = -EINVAL;
763 
764 	mutex_unlock(&queue_io_mutex);
765 	return err ? err : count;
766 }
767 
768 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
769 
770 static unsigned int
771 cache_poll(struct file *filp, poll_table *wait)
772 {
773 	unsigned int mask;
774 	struct cache_reader *rp = filp->private_data;
775 	struct cache_queue *cq;
776 	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
777 
778 	poll_wait(filp, &queue_wait, wait);
779 
780 	/* alway allow write */
781 	mask = POLL_OUT | POLLWRNORM;
782 
783 	if (!rp)
784 		return mask;
785 
786 	spin_lock(&queue_lock);
787 
788 	for (cq= &rp->q; &cq->list != &cd->queue;
789 	     cq = list_entry(cq->list.next, struct cache_queue, list))
790 		if (!cq->reader) {
791 			mask |= POLLIN | POLLRDNORM;
792 			break;
793 		}
794 	spin_unlock(&queue_lock);
795 	return mask;
796 }
797 
798 static int
799 cache_ioctl(struct inode *ino, struct file *filp,
800 	    unsigned int cmd, unsigned long arg)
801 {
802 	int len = 0;
803 	struct cache_reader *rp = filp->private_data;
804 	struct cache_queue *cq;
805 	struct cache_detail *cd = PDE(ino)->data;
806 
807 	if (cmd != FIONREAD || !rp)
808 		return -EINVAL;
809 
810 	spin_lock(&queue_lock);
811 
812 	/* only find the length remaining in current request,
813 	 * or the length of the next request
814 	 */
815 	for (cq= &rp->q; &cq->list != &cd->queue;
816 	     cq = list_entry(cq->list.next, struct cache_queue, list))
817 		if (!cq->reader) {
818 			struct cache_request *cr =
819 				container_of(cq, struct cache_request, q);
820 			len = cr->len - rp->offset;
821 			break;
822 		}
823 	spin_unlock(&queue_lock);
824 
825 	return put_user(len, (int __user *)arg);
826 }
827 
828 static int
829 cache_open(struct inode *inode, struct file *filp)
830 {
831 	struct cache_reader *rp = NULL;
832 
833 	nonseekable_open(inode, filp);
834 	if (filp->f_mode & FMODE_READ) {
835 		struct cache_detail *cd = PDE(inode)->data;
836 
837 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
838 		if (!rp)
839 			return -ENOMEM;
840 		rp->offset = 0;
841 		rp->q.reader = 1;
842 		atomic_inc(&cd->readers);
843 		spin_lock(&queue_lock);
844 		list_add(&rp->q.list, &cd->queue);
845 		spin_unlock(&queue_lock);
846 	}
847 	filp->private_data = rp;
848 	return 0;
849 }
850 
851 static int
852 cache_release(struct inode *inode, struct file *filp)
853 {
854 	struct cache_reader *rp = filp->private_data;
855 	struct cache_detail *cd = PDE(inode)->data;
856 
857 	if (rp) {
858 		spin_lock(&queue_lock);
859 		if (rp->offset) {
860 			struct cache_queue *cq;
861 			for (cq= &rp->q; &cq->list != &cd->queue;
862 			     cq = list_entry(cq->list.next, struct cache_queue, list))
863 				if (!cq->reader) {
864 					container_of(cq, struct cache_request, q)
865 						->readers--;
866 					break;
867 				}
868 			rp->offset = 0;
869 		}
870 		list_del(&rp->q.list);
871 		spin_unlock(&queue_lock);
872 
873 		filp->private_data = NULL;
874 		kfree(rp);
875 
876 		cd->last_close = get_seconds();
877 		atomic_dec(&cd->readers);
878 	}
879 	return 0;
880 }
881 
882 
883 
884 static struct file_operations cache_file_operations = {
885 	.owner		= THIS_MODULE,
886 	.llseek		= no_llseek,
887 	.read		= cache_read,
888 	.write		= cache_write,
889 	.poll		= cache_poll,
890 	.ioctl		= cache_ioctl, /* for FIONREAD */
891 	.open		= cache_open,
892 	.release	= cache_release,
893 };
894 
895 
896 static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
897 {
898 	struct cache_queue *cq;
899 	spin_lock(&queue_lock);
900 	list_for_each_entry(cq, &detail->queue, list)
901 		if (!cq->reader) {
902 			struct cache_request *cr = container_of(cq, struct cache_request, q);
903 			if (cr->item != ch)
904 				continue;
905 			if (cr->readers != 0)
906 				continue;
907 			list_del(&cr->q.list);
908 			spin_unlock(&queue_lock);
909 			cache_put(cr->item, detail);
910 			kfree(cr->buf);
911 			kfree(cr);
912 			return;
913 		}
914 	spin_unlock(&queue_lock);
915 }
916 
917 /*
918  * Support routines for text-based upcalls.
919  * Fields are separated by spaces.
920  * Fields are either mangled to quote space tab newline slosh with slosh
921  * or a hexified with a leading \x
922  * Record is terminated with newline.
923  *
924  */
925 
926 void qword_add(char **bpp, int *lp, char *str)
927 {
928 	char *bp = *bpp;
929 	int len = *lp;
930 	char c;
931 
932 	if (len < 0) return;
933 
934 	while ((c=*str++) && len)
935 		switch(c) {
936 		case ' ':
937 		case '\t':
938 		case '\n':
939 		case '\\':
940 			if (len >= 4) {
941 				*bp++ = '\\';
942 				*bp++ = '0' + ((c & 0300)>>6);
943 				*bp++ = '0' + ((c & 0070)>>3);
944 				*bp++ = '0' + ((c & 0007)>>0);
945 			}
946 			len -= 4;
947 			break;
948 		default:
949 			*bp++ = c;
950 			len--;
951 		}
952 	if (c || len <1) len = -1;
953 	else {
954 		*bp++ = ' ';
955 		len--;
956 	}
957 	*bpp = bp;
958 	*lp = len;
959 }
960 
961 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
962 {
963 	char *bp = *bpp;
964 	int len = *lp;
965 
966 	if (len < 0) return;
967 
968 	if (len > 2) {
969 		*bp++ = '\\';
970 		*bp++ = 'x';
971 		len -= 2;
972 		while (blen && len >= 2) {
973 			unsigned char c = *buf++;
974 			*bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
975 			*bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
976 			len -= 2;
977 			blen--;
978 		}
979 	}
980 	if (blen || len<1) len = -1;
981 	else {
982 		*bp++ = ' ';
983 		len--;
984 	}
985 	*bpp = bp;
986 	*lp = len;
987 }
988 
989 static void warn_no_listener(struct cache_detail *detail)
990 {
991 	if (detail->last_warn != detail->last_close) {
992 		detail->last_warn = detail->last_close;
993 		if (detail->warn_no_listener)
994 			detail->warn_no_listener(detail);
995 	}
996 }
997 
998 /*
999  * register an upcall request to user-space.
1000  * Each request is at most one page long.
1001  */
1002 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1003 {
1004 
1005 	char *buf;
1006 	struct cache_request *crq;
1007 	char *bp;
1008 	int len;
1009 
1010 	if (detail->cache_request == NULL)
1011 		return -EINVAL;
1012 
1013 	if (atomic_read(&detail->readers) == 0 &&
1014 	    detail->last_close < get_seconds() - 30) {
1015 			warn_no_listener(detail);
1016 			return -EINVAL;
1017 	}
1018 
1019 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1020 	if (!buf)
1021 		return -EAGAIN;
1022 
1023 	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1024 	if (!crq) {
1025 		kfree(buf);
1026 		return -EAGAIN;
1027 	}
1028 
1029 	bp = buf; len = PAGE_SIZE;
1030 
1031 	detail->cache_request(detail, h, &bp, &len);
1032 
1033 	if (len < 0) {
1034 		kfree(buf);
1035 		kfree(crq);
1036 		return -EAGAIN;
1037 	}
1038 	crq->q.reader = 0;
1039 	crq->item = cache_get(h);
1040 	crq->buf = buf;
1041 	crq->len = PAGE_SIZE - len;
1042 	crq->readers = 0;
1043 	spin_lock(&queue_lock);
1044 	list_add_tail(&crq->q.list, &detail->queue);
1045 	spin_unlock(&queue_lock);
1046 	wake_up(&queue_wait);
1047 	return 0;
1048 }
1049 
1050 /*
1051  * parse a message from user-space and pass it
1052  * to an appropriate cache
1053  * Messages are, like requests, separated into fields by
1054  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1055  *
1056  * Message is
1057  *   reply cachename expiry key ... content....
1058  *
1059  * key and content are both parsed by cache
1060  */
1061 
1062 #define isodigit(c) (isdigit(c) && c <= '7')
1063 int qword_get(char **bpp, char *dest, int bufsize)
1064 {
1065 	/* return bytes copied, or -1 on error */
1066 	char *bp = *bpp;
1067 	int len = 0;
1068 
1069 	while (*bp == ' ') bp++;
1070 
1071 	if (bp[0] == '\\' && bp[1] == 'x') {
1072 		/* HEX STRING */
1073 		bp += 2;
1074 		while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1075 			int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1076 			bp++;
1077 			byte <<= 4;
1078 			byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1079 			*dest++ = byte;
1080 			bp++;
1081 			len++;
1082 		}
1083 	} else {
1084 		/* text with \nnn octal quoting */
1085 		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1086 			if (*bp == '\\' &&
1087 			    isodigit(bp[1]) && (bp[1] <= '3') &&
1088 			    isodigit(bp[2]) &&
1089 			    isodigit(bp[3])) {
1090 				int byte = (*++bp -'0');
1091 				bp++;
1092 				byte = (byte << 3) | (*bp++ - '0');
1093 				byte = (byte << 3) | (*bp++ - '0');
1094 				*dest++ = byte;
1095 				len++;
1096 			} else {
1097 				*dest++ = *bp++;
1098 				len++;
1099 			}
1100 		}
1101 	}
1102 
1103 	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1104 		return -1;
1105 	while (*bp == ' ') bp++;
1106 	*bpp = bp;
1107 	*dest = '\0';
1108 	return len;
1109 }
1110 
1111 
1112 /*
1113  * support /proc/sunrpc/cache/$CACHENAME/content
1114  * as a seqfile.
1115  * We call ->cache_show passing NULL for the item to
1116  * get a header, then pass each real item in the cache
1117  */
1118 
1119 struct handle {
1120 	struct cache_detail *cd;
1121 };
1122 
1123 static void *c_start(struct seq_file *m, loff_t *pos)
1124 {
1125 	loff_t n = *pos;
1126 	unsigned hash, entry;
1127 	struct cache_head *ch;
1128 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1129 
1130 
1131 	read_lock(&cd->hash_lock);
1132 	if (!n--)
1133 		return SEQ_START_TOKEN;
1134 	hash = n >> 32;
1135 	entry = n & ((1LL<<32) - 1);
1136 
1137 	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1138 		if (!entry--)
1139 			return ch;
1140 	n &= ~((1LL<<32) - 1);
1141 	do {
1142 		hash++;
1143 		n += 1LL<<32;
1144 	} while(hash < cd->hash_size &&
1145 		cd->hash_table[hash]==NULL);
1146 	if (hash >= cd->hash_size)
1147 		return NULL;
1148 	*pos = n+1;
1149 	return cd->hash_table[hash];
1150 }
1151 
1152 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1153 {
1154 	struct cache_head *ch = p;
1155 	int hash = (*pos >> 32);
1156 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1157 
1158 	if (p == SEQ_START_TOKEN)
1159 		hash = 0;
1160 	else if (ch->next == NULL) {
1161 		hash++;
1162 		*pos += 1LL<<32;
1163 	} else {
1164 		++*pos;
1165 		return ch->next;
1166 	}
1167 	*pos &= ~((1LL<<32) - 1);
1168 	while (hash < cd->hash_size &&
1169 	       cd->hash_table[hash] == NULL) {
1170 		hash++;
1171 		*pos += 1LL<<32;
1172 	}
1173 	if (hash >= cd->hash_size)
1174 		return NULL;
1175 	++*pos;
1176 	return cd->hash_table[hash];
1177 }
1178 
1179 static void c_stop(struct seq_file *m, void *p)
1180 {
1181 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1182 	read_unlock(&cd->hash_lock);
1183 }
1184 
1185 static int c_show(struct seq_file *m, void *p)
1186 {
1187 	struct cache_head *cp = p;
1188 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1189 
1190 	if (p == SEQ_START_TOKEN)
1191 		return cd->cache_show(m, cd, NULL);
1192 
1193 	ifdebug(CACHE)
1194 		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1195 			   cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1196 	cache_get(cp);
1197 	if (cache_check(cd, cp, NULL))
1198 		/* cache_check does a cache_put on failure */
1199 		seq_printf(m, "# ");
1200 	else
1201 		cache_put(cp, cd);
1202 
1203 	return cd->cache_show(m, cd, cp);
1204 }
1205 
1206 static struct seq_operations cache_content_op = {
1207 	.start	= c_start,
1208 	.next	= c_next,
1209 	.stop	= c_stop,
1210 	.show	= c_show,
1211 };
1212 
1213 static int content_open(struct inode *inode, struct file *file)
1214 {
1215 	int res;
1216 	struct handle *han;
1217 	struct cache_detail *cd = PDE(inode)->data;
1218 
1219 	han = kmalloc(sizeof(*han), GFP_KERNEL);
1220 	if (han == NULL)
1221 		return -ENOMEM;
1222 
1223 	han->cd = cd;
1224 
1225 	res = seq_open(file, &cache_content_op);
1226 	if (res)
1227 		kfree(han);
1228 	else
1229 		((struct seq_file *)file->private_data)->private = han;
1230 
1231 	return res;
1232 }
1233 static int content_release(struct inode *inode, struct file *file)
1234 {
1235 	struct seq_file *m = (struct seq_file *)file->private_data;
1236 	struct handle *han = m->private;
1237 	kfree(han);
1238 	m->private = NULL;
1239 	return seq_release(inode, file);
1240 }
1241 
1242 static struct file_operations content_file_operations = {
1243 	.open		= content_open,
1244 	.read		= seq_read,
1245 	.llseek		= seq_lseek,
1246 	.release	= content_release,
1247 };
1248 
1249 static ssize_t read_flush(struct file *file, char __user *buf,
1250 			    size_t count, loff_t *ppos)
1251 {
1252 	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
1253 	char tbuf[20];
1254 	unsigned long p = *ppos;
1255 	int len;
1256 
1257 	sprintf(tbuf, "%lu\n", cd->flush_time);
1258 	len = strlen(tbuf);
1259 	if (p >= len)
1260 		return 0;
1261 	len -= p;
1262 	if (len > count) len = count;
1263 	if (copy_to_user(buf, (void*)(tbuf+p), len))
1264 		len = -EFAULT;
1265 	else
1266 		*ppos += len;
1267 	return len;
1268 }
1269 
1270 static ssize_t write_flush(struct file * file, const char __user * buf,
1271 			     size_t count, loff_t *ppos)
1272 {
1273 	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
1274 	char tbuf[20];
1275 	char *ep;
1276 	long flushtime;
1277 	if (*ppos || count > sizeof(tbuf)-1)
1278 		return -EINVAL;
1279 	if (copy_from_user(tbuf, buf, count))
1280 		return -EFAULT;
1281 	tbuf[count] = 0;
1282 	flushtime = simple_strtoul(tbuf, &ep, 0);
1283 	if (*ep && *ep != '\n')
1284 		return -EINVAL;
1285 
1286 	cd->flush_time = flushtime;
1287 	cd->nextcheck = get_seconds();
1288 	cache_flush();
1289 
1290 	*ppos += count;
1291 	return count;
1292 }
1293 
1294 static struct file_operations cache_flush_operations = {
1295 	.open		= nonseekable_open,
1296 	.read		= read_flush,
1297 	.write		= write_flush,
1298 };
1299