xref: /linux/fs/nfsd/nfscache.c (revision 0cac60c776a6bd15fbadc1c6c5c079b9a0c39634)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Request reply cache. This is currently a global cache, but this may
4  * change in the future and be a per-client cache.
5  *
6  * This code is heavily inspired by the 44BSD implementation, although
7  * it does things a bit differently.
8  *
9  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10  */
11 
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
20 
21 #include "nfsd.h"
22 #include "cache.h"
23 #include "trace.h"
24 
25 /*
26  * We use this value to determine the number of hash buckets from the max
27  * cache size, the idea being that when the cache is at its maximum number
28  * of entries, then this should be the average number of entries per bucket.
29  */
30 #define TARGET_BUCKET_SIZE	64
31 
32 struct nfsd_drc_bucket {
33 	struct rb_root rb_head;
34 	struct list_head lru_head;
35 	spinlock_t cache_lock;
36 };
37 
38 static struct kmem_cache	*drc_slab;
39 
40 static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 					    struct shrink_control *sc);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 					   struct shrink_control *sc);
45 
46 /*
47  * Put a cap on the size of the DRC based on the amount of available
48  * low memory in the machine.
49  *
50  *  64MB:    8192
51  * 128MB:   11585
52  * 256MB:   16384
53  * 512MB:   23170
54  *   1GB:   32768
55  *   2GB:   46340
56  *   4GB:   65536
57  *   8GB:   92681
58  *  16GB:  131072
59  *
60  * ...with a hard cap of 256k entries. In the worst case, each entry will be
61  * ~1k, so the above numbers should give a rough max of the amount of memory
62  * used in k.
63  *
64  * XXX: these limits are per-container, so memory used will increase
65  * linearly with number of containers.  Maybe that's OK.
66  */
67 static unsigned int
68 nfsd_cache_size_limit(void)
69 {
70 	unsigned int limit;
71 	unsigned long low_pages = totalram_pages() - totalhigh_pages();
72 
73 	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 	return min_t(unsigned int, limit, 256*1024);
75 }
76 
77 /*
78  * Compute the number of hash buckets we need. Divide the max cachesize by
79  * the "target" max bucket size, and round up to next power of two.
80  */
81 static unsigned int
82 nfsd_hashsize(unsigned int limit)
83 {
84 	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85 }
86 
87 static struct svc_cacherep *
88 nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
89 			struct nfsd_net *nn)
90 {
91 	struct svc_cacherep	*rp;
92 
93 	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
94 	if (rp) {
95 		rp->c_state = RC_UNUSED;
96 		rp->c_type = RC_NOCACHE;
97 		RB_CLEAR_NODE(&rp->c_node);
98 		INIT_LIST_HEAD(&rp->c_lru);
99 
100 		memset(&rp->c_key, 0, sizeof(rp->c_key));
101 		rp->c_key.k_xid = rqstp->rq_xid;
102 		rp->c_key.k_proc = rqstp->rq_proc;
103 		rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104 		rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105 		rp->c_key.k_prot = rqstp->rq_prot;
106 		rp->c_key.k_vers = rqstp->rq_vers;
107 		rp->c_key.k_len = rqstp->rq_arg.len;
108 		rp->c_key.k_csum = csum;
109 	}
110 	return rp;
111 }
112 
113 static void
114 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
115 				struct nfsd_net *nn)
116 {
117 	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
118 		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
119 		kfree(rp->c_replvec.iov_base);
120 	}
121 	if (rp->c_state != RC_UNUSED) {
122 		rb_erase(&rp->c_node, &b->rb_head);
123 		list_del(&rp->c_lru);
124 		atomic_dec(&nn->num_drc_entries);
125 		nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
126 	}
127 	kmem_cache_free(drc_slab, rp);
128 }
129 
130 static void
131 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
132 			struct nfsd_net *nn)
133 {
134 	spin_lock(&b->cache_lock);
135 	nfsd_reply_cache_free_locked(b, rp, nn);
136 	spin_unlock(&b->cache_lock);
137 }
138 
139 int nfsd_drc_slab_create(void)
140 {
141 	drc_slab = kmem_cache_create("nfsd_drc",
142 				sizeof(struct svc_cacherep), 0, 0, NULL);
143 	return drc_slab ? 0: -ENOMEM;
144 }
145 
146 void nfsd_drc_slab_free(void)
147 {
148 	kmem_cache_destroy(drc_slab);
149 }
150 
151 static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
152 {
153 	return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
154 }
155 
156 static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
157 {
158 	nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
159 }
160 
161 int nfsd_reply_cache_init(struct nfsd_net *nn)
162 {
163 	unsigned int hashsize;
164 	unsigned int i;
165 	int status = 0;
166 
167 	nn->max_drc_entries = nfsd_cache_size_limit();
168 	atomic_set(&nn->num_drc_entries, 0);
169 	hashsize = nfsd_hashsize(nn->max_drc_entries);
170 	nn->maskbits = ilog2(hashsize);
171 
172 	status = nfsd_reply_cache_stats_init(nn);
173 	if (status)
174 		goto out_nomem;
175 
176 	nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
177 	nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
178 	nn->nfsd_reply_cache_shrinker.seeks = 1;
179 	status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
180 				   "nfsd-reply:%s", nn->nfsd_name);
181 	if (status)
182 		goto out_stats_destroy;
183 
184 	nn->drc_hashtbl = kvzalloc(array_size(hashsize,
185 				sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
186 	if (!nn->drc_hashtbl)
187 		goto out_shrinker;
188 
189 	for (i = 0; i < hashsize; i++) {
190 		INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
191 		spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
192 	}
193 	nn->drc_hashsize = hashsize;
194 
195 	return 0;
196 out_shrinker:
197 	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
198 out_stats_destroy:
199 	nfsd_reply_cache_stats_destroy(nn);
200 out_nomem:
201 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
202 	return -ENOMEM;
203 }
204 
205 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
206 {
207 	struct svc_cacherep	*rp;
208 	unsigned int i;
209 
210 	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
211 
212 	for (i = 0; i < nn->drc_hashsize; i++) {
213 		struct list_head *head = &nn->drc_hashtbl[i].lru_head;
214 		while (!list_empty(head)) {
215 			rp = list_first_entry(head, struct svc_cacherep, c_lru);
216 			nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
217 									rp, nn);
218 		}
219 	}
220 	nfsd_reply_cache_stats_destroy(nn);
221 
222 	kvfree(nn->drc_hashtbl);
223 	nn->drc_hashtbl = NULL;
224 	nn->drc_hashsize = 0;
225 
226 }
227 
228 /*
229  * Move cache entry to end of LRU list, and queue the cleaner to run if it's
230  * not already scheduled.
231  */
232 static void
233 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
234 {
235 	rp->c_timestamp = jiffies;
236 	list_move_tail(&rp->c_lru, &b->lru_head);
237 }
238 
239 static noinline struct nfsd_drc_bucket *
240 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
241 {
242 	unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
243 
244 	return &nn->drc_hashtbl[hash];
245 }
246 
247 static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
248 			 unsigned int max)
249 {
250 	struct svc_cacherep *rp, *tmp;
251 	long freed = 0;
252 
253 	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
254 		/*
255 		 * Don't free entries attached to calls that are still
256 		 * in-progress, but do keep scanning the list.
257 		 */
258 		if (rp->c_state == RC_INPROG)
259 			continue;
260 		if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
261 		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
262 			break;
263 		nfsd_reply_cache_free_locked(b, rp, nn);
264 		if (max && freed++ > max)
265 			break;
266 	}
267 	return freed;
268 }
269 
270 static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
271 {
272 	return prune_bucket(b, nn, 3);
273 }
274 
275 /*
276  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
277  * Also prune the oldest ones when the total exceeds the max number of entries.
278  */
279 static long
280 prune_cache_entries(struct nfsd_net *nn)
281 {
282 	unsigned int i;
283 	long freed = 0;
284 
285 	for (i = 0; i < nn->drc_hashsize; i++) {
286 		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
287 
288 		if (list_empty(&b->lru_head))
289 			continue;
290 		spin_lock(&b->cache_lock);
291 		freed += prune_bucket(b, nn, 0);
292 		spin_unlock(&b->cache_lock);
293 	}
294 	return freed;
295 }
296 
297 static unsigned long
298 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
299 {
300 	struct nfsd_net *nn = container_of(shrink,
301 				struct nfsd_net, nfsd_reply_cache_shrinker);
302 
303 	return atomic_read(&nn->num_drc_entries);
304 }
305 
306 static unsigned long
307 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
308 {
309 	struct nfsd_net *nn = container_of(shrink,
310 				struct nfsd_net, nfsd_reply_cache_shrinker);
311 
312 	return prune_cache_entries(nn);
313 }
314 /*
315  * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
316  */
317 static __wsum
318 nfsd_cache_csum(struct svc_rqst *rqstp)
319 {
320 	int idx;
321 	unsigned int base;
322 	__wsum csum;
323 	struct xdr_buf *buf = &rqstp->rq_arg;
324 	const unsigned char *p = buf->head[0].iov_base;
325 	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
326 				RC_CSUMLEN);
327 	size_t len = min(buf->head[0].iov_len, csum_len);
328 
329 	/* rq_arg.head first */
330 	csum = csum_partial(p, len, 0);
331 	csum_len -= len;
332 
333 	/* Continue into page array */
334 	idx = buf->page_base / PAGE_SIZE;
335 	base = buf->page_base & ~PAGE_MASK;
336 	while (csum_len) {
337 		p = page_address(buf->pages[idx]) + base;
338 		len = min_t(size_t, PAGE_SIZE - base, csum_len);
339 		csum = csum_partial(p, len, csum);
340 		csum_len -= len;
341 		base = 0;
342 		++idx;
343 	}
344 	return csum;
345 }
346 
347 static int
348 nfsd_cache_key_cmp(const struct svc_cacherep *key,
349 			const struct svc_cacherep *rp, struct nfsd_net *nn)
350 {
351 	if (key->c_key.k_xid == rp->c_key.k_xid &&
352 	    key->c_key.k_csum != rp->c_key.k_csum) {
353 		nfsd_stats_payload_misses_inc(nn);
354 		trace_nfsd_drc_mismatch(nn, key, rp);
355 	}
356 
357 	return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
358 }
359 
360 /*
361  * Search the request hash for an entry that matches the given rqstp.
362  * Must be called with cache_lock held. Returns the found entry or
363  * inserts an empty key on failure.
364  */
365 static struct svc_cacherep *
366 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
367 			struct nfsd_net *nn)
368 {
369 	struct svc_cacherep	*rp, *ret = key;
370 	struct rb_node		**p = &b->rb_head.rb_node,
371 				*parent = NULL;
372 	unsigned int		entries = 0;
373 	int cmp;
374 
375 	while (*p != NULL) {
376 		++entries;
377 		parent = *p;
378 		rp = rb_entry(parent, struct svc_cacherep, c_node);
379 
380 		cmp = nfsd_cache_key_cmp(key, rp, nn);
381 		if (cmp < 0)
382 			p = &parent->rb_left;
383 		else if (cmp > 0)
384 			p = &parent->rb_right;
385 		else {
386 			ret = rp;
387 			goto out;
388 		}
389 	}
390 	rb_link_node(&key->c_node, parent, p);
391 	rb_insert_color(&key->c_node, &b->rb_head);
392 out:
393 	/* tally hash chain length stats */
394 	if (entries > nn->longest_chain) {
395 		nn->longest_chain = entries;
396 		nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
397 	} else if (entries == nn->longest_chain) {
398 		/* prefer to keep the smallest cachesize possible here */
399 		nn->longest_chain_cachesize = min_t(unsigned int,
400 				nn->longest_chain_cachesize,
401 				atomic_read(&nn->num_drc_entries));
402 	}
403 
404 	lru_put_end(b, ret);
405 	return ret;
406 }
407 
408 /**
409  * nfsd_cache_lookup - Find an entry in the duplicate reply cache
410  * @rqstp: Incoming Call to find
411  *
412  * Try to find an entry matching the current call in the cache. When none
413  * is found, we try to grab the oldest expired entry off the LRU list. If
414  * a suitable one isn't there, then drop the cache_lock and allocate a
415  * new one, then search again in case one got inserted while this thread
416  * didn't hold the lock.
417  *
418  * Return values:
419  *   %RC_DOIT: Process the request normally
420  *   %RC_REPLY: Reply from cache
421  *   %RC_DROPIT: Do not process the request further
422  */
423 int nfsd_cache_lookup(struct svc_rqst *rqstp)
424 {
425 	struct nfsd_net		*nn;
426 	struct svc_cacherep	*rp, *found;
427 	__wsum			csum;
428 	struct nfsd_drc_bucket	*b;
429 	int type = rqstp->rq_cachetype;
430 	int rtn = RC_DOIT;
431 
432 	rqstp->rq_cacherep = NULL;
433 	if (type == RC_NOCACHE) {
434 		nfsd_stats_rc_nocache_inc();
435 		goto out;
436 	}
437 
438 	csum = nfsd_cache_csum(rqstp);
439 
440 	/*
441 	 * Since the common case is a cache miss followed by an insert,
442 	 * preallocate an entry.
443 	 */
444 	nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
445 	rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
446 	if (!rp)
447 		goto out;
448 
449 	b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
450 	spin_lock(&b->cache_lock);
451 	found = nfsd_cache_insert(b, rp, nn);
452 	if (found != rp)
453 		goto found_entry;
454 
455 	nfsd_stats_rc_misses_inc();
456 	rqstp->rq_cacherep = rp;
457 	rp->c_state = RC_INPROG;
458 
459 	atomic_inc(&nn->num_drc_entries);
460 	nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
461 
462 	nfsd_prune_bucket(b, nn);
463 
464 out_unlock:
465 	spin_unlock(&b->cache_lock);
466 out:
467 	return rtn;
468 
469 found_entry:
470 	/* We found a matching entry which is either in progress or done. */
471 	nfsd_reply_cache_free_locked(NULL, rp, nn);
472 	nfsd_stats_rc_hits_inc();
473 	rtn = RC_DROPIT;
474 	rp = found;
475 
476 	/* Request being processed */
477 	if (rp->c_state == RC_INPROG)
478 		goto out_trace;
479 
480 	/* From the hall of fame of impractical attacks:
481 	 * Is this a user who tries to snoop on the cache? */
482 	rtn = RC_DOIT;
483 	if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
484 		goto out_trace;
485 
486 	/* Compose RPC reply header */
487 	switch (rp->c_type) {
488 	case RC_NOCACHE:
489 		break;
490 	case RC_REPLSTAT:
491 		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
492 		rtn = RC_REPLY;
493 		break;
494 	case RC_REPLBUFF:
495 		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
496 			goto out_unlock; /* should not happen */
497 		rtn = RC_REPLY;
498 		break;
499 	default:
500 		WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
501 	}
502 
503 out_trace:
504 	trace_nfsd_drc_found(nn, rqstp, rtn);
505 	goto out_unlock;
506 }
507 
508 /**
509  * nfsd_cache_update - Update an entry in the duplicate reply cache.
510  * @rqstp: svc_rqst with a finished Reply
511  * @cachetype: which cache to update
512  * @statp: Reply's status code
513  *
514  * This is called from nfsd_dispatch when the procedure has been
515  * executed and the complete reply is in rqstp->rq_res.
516  *
517  * We're copying around data here rather than swapping buffers because
518  * the toplevel loop requires max-sized buffers, which would be a waste
519  * of memory for a cache with a max reply size of 100 bytes (diropokres).
520  *
521  * If we should start to use different types of cache entries tailored
522  * specifically for attrstat and fh's, we may save even more space.
523  *
524  * Also note that a cachetype of RC_NOCACHE can legally be passed when
525  * nfsd failed to encode a reply that otherwise would have been cached.
526  * In this case, nfsd_cache_update is called with statp == NULL.
527  */
528 void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
529 {
530 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
531 	struct svc_cacherep *rp = rqstp->rq_cacherep;
532 	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
533 	struct nfsd_drc_bucket *b;
534 	int		len;
535 	size_t		bufsize = 0;
536 
537 	if (!rp)
538 		return;
539 
540 	b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
541 
542 	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
543 	len >>= 2;
544 
545 	/* Don't cache excessive amounts of data and XDR failures */
546 	if (!statp || len > (256 >> 2)) {
547 		nfsd_reply_cache_free(b, rp, nn);
548 		return;
549 	}
550 
551 	switch (cachetype) {
552 	case RC_REPLSTAT:
553 		if (len != 1)
554 			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
555 		rp->c_replstat = *statp;
556 		break;
557 	case RC_REPLBUFF:
558 		cachv = &rp->c_replvec;
559 		bufsize = len << 2;
560 		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
561 		if (!cachv->iov_base) {
562 			nfsd_reply_cache_free(b, rp, nn);
563 			return;
564 		}
565 		cachv->iov_len = bufsize;
566 		memcpy(cachv->iov_base, statp, bufsize);
567 		break;
568 	case RC_NOCACHE:
569 		nfsd_reply_cache_free(b, rp, nn);
570 		return;
571 	}
572 	spin_lock(&b->cache_lock);
573 	nfsd_stats_drc_mem_usage_add(nn, bufsize);
574 	lru_put_end(b, rp);
575 	rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
576 	rp->c_type = cachetype;
577 	rp->c_state = RC_DONE;
578 	spin_unlock(&b->cache_lock);
579 	return;
580 }
581 
582 /*
583  * Copy cached reply to current reply buffer. Should always fit.
584  * FIXME as reply is in a page, we should just attach the page, and
585  * keep a refcount....
586  */
587 static int
588 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
589 {
590 	struct kvec	*vec = &rqstp->rq_res.head[0];
591 
592 	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
593 		printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
594 				data->iov_len);
595 		return 0;
596 	}
597 	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
598 	vec->iov_len += data->iov_len;
599 	return 1;
600 }
601 
602 /*
603  * Note that fields may be added, removed or reordered in the future. Programs
604  * scraping this file for info should test the labels to ensure they're
605  * getting the correct field.
606  */
607 int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
608 {
609 	struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
610 					  nfsd_net_id);
611 
612 	seq_printf(m, "max entries:           %u\n", nn->max_drc_entries);
613 	seq_printf(m, "num entries:           %u\n",
614 		   atomic_read(&nn->num_drc_entries));
615 	seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
616 	seq_printf(m, "mem usage:             %lld\n",
617 		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
618 	seq_printf(m, "cache hits:            %lld\n",
619 		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
620 	seq_printf(m, "cache misses:          %lld\n",
621 		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
622 	seq_printf(m, "not cached:            %lld\n",
623 		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
624 	seq_printf(m, "payload misses:        %lld\n",
625 		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
626 	seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
627 	seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
628 	return 0;
629 }
630