1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Request reply cache. This is currently a global cache, but this may 4 * change in the future and be a per-client cache. 5 * 6 * This code is heavily inspired by the 44BSD implementation, although 7 * it does things a bit differently. 8 * 9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 10 */ 11 12 #include <linux/sunrpc/svc_xprt.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 #include <linux/sunrpc/addr.h> 16 #include <linux/highmem.h> 17 #include <linux/log2.h> 18 #include <linux/hash.h> 19 #include <net/checksum.h> 20 21 #include "nfsd.h" 22 #include "cache.h" 23 #include "trace.h" 24 25 /* 26 * We use this value to determine the number of hash buckets from the max 27 * cache size, the idea being that when the cache is at its maximum number 28 * of entries, then this should be the average number of entries per bucket. 29 */ 30 #define TARGET_BUCKET_SIZE 64 31 32 struct nfsd_drc_bucket { 33 struct rb_root rb_head; 34 struct list_head lru_head; 35 spinlock_t cache_lock; 36 }; 37 38 static struct kmem_cache *drc_slab; 39 40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 42 struct shrink_control *sc); 43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 44 struct shrink_control *sc); 45 46 /* 47 * Put a cap on the size of the DRC based on the amount of available 48 * low memory in the machine. 49 * 50 * 64MB: 8192 51 * 128MB: 11585 52 * 256MB: 16384 53 * 512MB: 23170 54 * 1GB: 32768 55 * 2GB: 46340 56 * 4GB: 65536 57 * 8GB: 92681 58 * 16GB: 131072 59 * 60 * ...with a hard cap of 256k entries. In the worst case, each entry will be 61 * ~1k, so the above numbers should give a rough max of the amount of memory 62 * used in k. 63 * 64 * XXX: these limits are per-container, so memory used will increase 65 * linearly with number of containers. Maybe that's OK. 66 */ 67 static unsigned int 68 nfsd_cache_size_limit(void) 69 { 70 unsigned int limit; 71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); 72 73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 74 return min_t(unsigned int, limit, 256*1024); 75 } 76 77 /* 78 * Compute the number of hash buckets we need. Divide the max cachesize by 79 * the "target" max bucket size, and round up to next power of two. 80 */ 81 static unsigned int 82 nfsd_hashsize(unsigned int limit) 83 { 84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 85 } 86 87 static struct nfsd_cacherep * 88 nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum, 89 struct nfsd_net *nn) 90 { 91 struct nfsd_cacherep *rp; 92 93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 94 if (rp) { 95 rp->c_state = RC_UNUSED; 96 rp->c_type = RC_NOCACHE; 97 RB_CLEAR_NODE(&rp->c_node); 98 INIT_LIST_HEAD(&rp->c_lru); 99 100 memset(&rp->c_key, 0, sizeof(rp->c_key)); 101 rp->c_key.k_xid = rqstp->rq_xid; 102 rp->c_key.k_proc = rqstp->rq_proc; 103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); 104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); 105 rp->c_key.k_prot = rqstp->rq_prot; 106 rp->c_key.k_vers = rqstp->rq_vers; 107 rp->c_key.k_len = rqstp->rq_arg.len; 108 rp->c_key.k_csum = csum; 109 } 110 return rp; 111 } 112 113 static void nfsd_cacherep_free(struct nfsd_cacherep *rp) 114 { 115 if (rp->c_type == RC_REPLBUFF) 116 kfree(rp->c_replvec.iov_base); 117 kmem_cache_free(drc_slab, rp); 118 } 119 120 static unsigned long 121 nfsd_cacherep_dispose(struct list_head *dispose) 122 { 123 struct nfsd_cacherep *rp; 124 unsigned long freed = 0; 125 126 while (!list_empty(dispose)) { 127 rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru); 128 list_del(&rp->c_lru); 129 nfsd_cacherep_free(rp); 130 freed++; 131 } 132 return freed; 133 } 134 135 static void 136 nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, 137 struct nfsd_cacherep *rp) 138 { 139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) 140 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); 141 if (rp->c_state != RC_UNUSED) { 142 rb_erase(&rp->c_node, &b->rb_head); 143 list_del(&rp->c_lru); 144 atomic_dec(&nn->num_drc_entries); 145 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp)); 146 } 147 } 148 149 static void 150 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, 151 struct nfsd_net *nn) 152 { 153 nfsd_cacherep_unlink_locked(nn, b, rp); 154 nfsd_cacherep_free(rp); 155 } 156 157 static void 158 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp, 159 struct nfsd_net *nn) 160 { 161 spin_lock(&b->cache_lock); 162 nfsd_cacherep_unlink_locked(nn, b, rp); 163 spin_unlock(&b->cache_lock); 164 nfsd_cacherep_free(rp); 165 } 166 167 int nfsd_drc_slab_create(void) 168 { 169 drc_slab = kmem_cache_create("nfsd_drc", 170 sizeof(struct nfsd_cacherep), 0, 0, NULL); 171 return drc_slab ? 0: -ENOMEM; 172 } 173 174 void nfsd_drc_slab_free(void) 175 { 176 kmem_cache_destroy(drc_slab); 177 } 178 179 /** 180 * nfsd_net_reply_cache_init - per net namespace reply cache set-up 181 * @nn: nfsd_net being initialized 182 * 183 * Returns zero on succes; otherwise a negative errno is returned. 184 */ 185 int nfsd_net_reply_cache_init(struct nfsd_net *nn) 186 { 187 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); 188 } 189 190 /** 191 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down 192 * @nn: nfsd_net being freed 193 * 194 */ 195 void nfsd_net_reply_cache_destroy(struct nfsd_net *nn) 196 { 197 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); 198 } 199 200 int nfsd_reply_cache_init(struct nfsd_net *nn) 201 { 202 unsigned int hashsize; 203 unsigned int i; 204 205 nn->max_drc_entries = nfsd_cache_size_limit(); 206 atomic_set(&nn->num_drc_entries, 0); 207 hashsize = nfsd_hashsize(nn->max_drc_entries); 208 nn->maskbits = ilog2(hashsize); 209 210 nn->drc_hashtbl = kvzalloc(array_size(hashsize, 211 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); 212 if (!nn->drc_hashtbl) 213 return -ENOMEM; 214 215 nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s", 216 nn->nfsd_name); 217 if (!nn->nfsd_reply_cache_shrinker) 218 goto out_shrinker; 219 220 nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan; 221 nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count; 222 nn->nfsd_reply_cache_shrinker->seeks = 1; 223 nn->nfsd_reply_cache_shrinker->private_data = nn; 224 225 shrinker_register(nn->nfsd_reply_cache_shrinker); 226 227 for (i = 0; i < hashsize; i++) { 228 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); 229 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); 230 } 231 nn->drc_hashsize = hashsize; 232 233 return 0; 234 out_shrinker: 235 kvfree(nn->drc_hashtbl); 236 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 237 return -ENOMEM; 238 } 239 240 void nfsd_reply_cache_shutdown(struct nfsd_net *nn) 241 { 242 struct nfsd_cacherep *rp; 243 unsigned int i; 244 245 shrinker_free(nn->nfsd_reply_cache_shrinker); 246 247 for (i = 0; i < nn->drc_hashsize; i++) { 248 struct list_head *head = &nn->drc_hashtbl[i].lru_head; 249 while (!list_empty(head)) { 250 rp = list_first_entry(head, struct nfsd_cacherep, c_lru); 251 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], 252 rp, nn); 253 } 254 } 255 256 kvfree(nn->drc_hashtbl); 257 nn->drc_hashtbl = NULL; 258 nn->drc_hashsize = 0; 259 260 } 261 262 /* 263 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 264 * not already scheduled. 265 */ 266 static void 267 lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp) 268 { 269 rp->c_timestamp = jiffies; 270 list_move_tail(&rp->c_lru, &b->lru_head); 271 } 272 273 static noinline struct nfsd_drc_bucket * 274 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn) 275 { 276 unsigned int hash = hash_32((__force u32)xid, nn->maskbits); 277 278 return &nn->drc_hashtbl[hash]; 279 } 280 281 /* 282 * Remove and return no more than @max expired entries in bucket @b. 283 * If @max is zero, do not limit the number of removed entries. 284 */ 285 static void 286 nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b, 287 unsigned int max, struct list_head *dispose) 288 { 289 unsigned long expiry = jiffies - RC_EXPIRE; 290 struct nfsd_cacherep *rp, *tmp; 291 unsigned int freed = 0; 292 293 lockdep_assert_held(&b->cache_lock); 294 295 /* The bucket LRU is ordered oldest-first. */ 296 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { 297 /* 298 * Don't free entries attached to calls that are still 299 * in-progress, but do keep scanning the list. 300 */ 301 if (rp->c_state == RC_INPROG) 302 continue; 303 304 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && 305 time_before(expiry, rp->c_timestamp)) 306 break; 307 308 nfsd_cacherep_unlink_locked(nn, b, rp); 309 list_add(&rp->c_lru, dispose); 310 311 if (max && ++freed > max) 312 break; 313 } 314 } 315 316 /** 317 * nfsd_reply_cache_count - count_objects method for the DRC shrinker 318 * @shrink: our registered shrinker context 319 * @sc: garbage collection parameters 320 * 321 * Returns the total number of entries in the duplicate reply cache. To 322 * keep things simple and quick, this is not the number of expired entries 323 * in the cache (ie, the number that would be removed by a call to 324 * nfsd_reply_cache_scan). 325 */ 326 static unsigned long 327 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 328 { 329 struct nfsd_net *nn = shrink->private_data; 330 331 return atomic_read(&nn->num_drc_entries); 332 } 333 334 /** 335 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker 336 * @shrink: our registered shrinker context 337 * @sc: garbage collection parameters 338 * 339 * Free expired entries on each bucket's LRU list until we've released 340 * nr_to_scan freed objects. Nothing will be released if the cache 341 * has not exceeded it's max_drc_entries limit. 342 * 343 * Returns the number of entries released by this call. 344 */ 345 static unsigned long 346 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 347 { 348 struct nfsd_net *nn = shrink->private_data; 349 unsigned long freed = 0; 350 LIST_HEAD(dispose); 351 unsigned int i; 352 353 for (i = 0; i < nn->drc_hashsize; i++) { 354 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; 355 356 if (list_empty(&b->lru_head)) 357 continue; 358 359 spin_lock(&b->cache_lock); 360 nfsd_prune_bucket_locked(nn, b, 0, &dispose); 361 spin_unlock(&b->cache_lock); 362 363 freed += nfsd_cacherep_dispose(&dispose); 364 if (freed > sc->nr_to_scan) 365 break; 366 } 367 return freed; 368 } 369 370 /** 371 * nfsd_cache_csum - Checksum incoming NFS Call arguments 372 * @buf: buffer containing a whole RPC Call message 373 * @start: starting byte of the NFS Call header 374 * @remaining: size of the NFS Call header, in bytes 375 * 376 * Compute a weak checksum of the leading bytes of an NFS procedure 377 * call header to help verify that a retransmitted Call matches an 378 * entry in the duplicate reply cache. 379 * 380 * To avoid assumptions about how the RPC message is laid out in 381 * @buf and what else it might contain (eg, a GSS MIC suffix), the 382 * caller passes us the exact location and length of the NFS Call 383 * header. 384 * 385 * Returns a 32-bit checksum value, as defined in RFC 793. 386 */ 387 static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start, 388 unsigned int remaining) 389 { 390 unsigned int base, len; 391 struct xdr_buf subbuf; 392 __wsum csum = 0; 393 void *p; 394 int idx; 395 396 if (remaining > RC_CSUMLEN) 397 remaining = RC_CSUMLEN; 398 if (xdr_buf_subsegment(buf, &subbuf, start, remaining)) 399 return csum; 400 401 /* rq_arg.head first */ 402 if (subbuf.head[0].iov_len) { 403 len = min_t(unsigned int, subbuf.head[0].iov_len, remaining); 404 csum = csum_partial(subbuf.head[0].iov_base, len, csum); 405 remaining -= len; 406 } 407 408 /* Continue into page array */ 409 idx = subbuf.page_base / PAGE_SIZE; 410 base = subbuf.page_base & ~PAGE_MASK; 411 while (remaining) { 412 p = page_address(subbuf.pages[idx]) + base; 413 len = min_t(unsigned int, PAGE_SIZE - base, remaining); 414 csum = csum_partial(p, len, csum); 415 remaining -= len; 416 base = 0; 417 ++idx; 418 } 419 return csum; 420 } 421 422 static int 423 nfsd_cache_key_cmp(const struct nfsd_cacherep *key, 424 const struct nfsd_cacherep *rp, struct nfsd_net *nn) 425 { 426 if (key->c_key.k_xid == rp->c_key.k_xid && 427 key->c_key.k_csum != rp->c_key.k_csum) { 428 nfsd_stats_payload_misses_inc(nn); 429 trace_nfsd_drc_mismatch(nn, key, rp); 430 } 431 432 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); 433 } 434 435 /* 436 * Search the request hash for an entry that matches the given rqstp. 437 * Must be called with cache_lock held. Returns the found entry or 438 * inserts an empty key on failure. 439 */ 440 static struct nfsd_cacherep * 441 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key, 442 struct nfsd_net *nn) 443 { 444 struct nfsd_cacherep *rp, *ret = key; 445 struct rb_node **p = &b->rb_head.rb_node, 446 *parent = NULL; 447 unsigned int entries = 0; 448 int cmp; 449 450 while (*p != NULL) { 451 ++entries; 452 parent = *p; 453 rp = rb_entry(parent, struct nfsd_cacherep, c_node); 454 455 cmp = nfsd_cache_key_cmp(key, rp, nn); 456 if (cmp < 0) 457 p = &parent->rb_left; 458 else if (cmp > 0) 459 p = &parent->rb_right; 460 else { 461 ret = rp; 462 goto out; 463 } 464 } 465 rb_link_node(&key->c_node, parent, p); 466 rb_insert_color(&key->c_node, &b->rb_head); 467 out: 468 /* tally hash chain length stats */ 469 if (entries > nn->longest_chain) { 470 nn->longest_chain = entries; 471 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); 472 } else if (entries == nn->longest_chain) { 473 /* prefer to keep the smallest cachesize possible here */ 474 nn->longest_chain_cachesize = min_t(unsigned int, 475 nn->longest_chain_cachesize, 476 atomic_read(&nn->num_drc_entries)); 477 } 478 479 lru_put_end(b, ret); 480 return ret; 481 } 482 483 /** 484 * nfsd_cache_lookup - Find an entry in the duplicate reply cache 485 * @rqstp: Incoming Call to find 486 * @start: starting byte in @rqstp->rq_arg of the NFS Call header 487 * @len: size of the NFS Call header, in bytes 488 * @cacherep: OUT: DRC entry for this request 489 * 490 * Try to find an entry matching the current call in the cache. When none 491 * is found, we try to grab the oldest expired entry off the LRU list. If 492 * a suitable one isn't there, then drop the cache_lock and allocate a 493 * new one, then search again in case one got inserted while this thread 494 * didn't hold the lock. 495 * 496 * Return values: 497 * %RC_DOIT: Process the request normally 498 * %RC_REPLY: Reply from cache 499 * %RC_DROPIT: Do not process the request further 500 */ 501 int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, 502 unsigned int len, struct nfsd_cacherep **cacherep) 503 { 504 struct nfsd_net *nn; 505 struct nfsd_cacherep *rp, *found; 506 __wsum csum; 507 struct nfsd_drc_bucket *b; 508 int type = rqstp->rq_cachetype; 509 LIST_HEAD(dispose); 510 int rtn = RC_DOIT; 511 512 if (type == RC_NOCACHE) { 513 nfsd_stats_rc_nocache_inc(); 514 goto out; 515 } 516 517 csum = nfsd_cache_csum(&rqstp->rq_arg, start, len); 518 519 /* 520 * Since the common case is a cache miss followed by an insert, 521 * preallocate an entry. 522 */ 523 nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 524 rp = nfsd_cacherep_alloc(rqstp, csum, nn); 525 if (!rp) 526 goto out; 527 528 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn); 529 spin_lock(&b->cache_lock); 530 found = nfsd_cache_insert(b, rp, nn); 531 if (found != rp) 532 goto found_entry; 533 *cacherep = rp; 534 rp->c_state = RC_INPROG; 535 nfsd_prune_bucket_locked(nn, b, 3, &dispose); 536 spin_unlock(&b->cache_lock); 537 538 nfsd_cacherep_dispose(&dispose); 539 540 nfsd_stats_rc_misses_inc(); 541 atomic_inc(&nn->num_drc_entries); 542 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp)); 543 goto out; 544 545 found_entry: 546 /* We found a matching entry which is either in progress or done. */ 547 nfsd_reply_cache_free_locked(NULL, rp, nn); 548 nfsd_stats_rc_hits_inc(); 549 rtn = RC_DROPIT; 550 rp = found; 551 552 /* Request being processed */ 553 if (rp->c_state == RC_INPROG) 554 goto out_trace; 555 556 /* From the hall of fame of impractical attacks: 557 * Is this a user who tries to snoop on the cache? */ 558 rtn = RC_DOIT; 559 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) 560 goto out_trace; 561 562 /* Compose RPC reply header */ 563 switch (rp->c_type) { 564 case RC_NOCACHE: 565 break; 566 case RC_REPLSTAT: 567 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat); 568 rtn = RC_REPLY; 569 break; 570 case RC_REPLBUFF: 571 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 572 goto out_unlock; /* should not happen */ 573 rtn = RC_REPLY; 574 break; 575 default: 576 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); 577 } 578 579 out_trace: 580 trace_nfsd_drc_found(nn, rqstp, rtn); 581 out_unlock: 582 spin_unlock(&b->cache_lock); 583 out: 584 return rtn; 585 } 586 587 /** 588 * nfsd_cache_update - Update an entry in the duplicate reply cache. 589 * @rqstp: svc_rqst with a finished Reply 590 * @rp: IN: DRC entry for this request 591 * @cachetype: which cache to update 592 * @statp: pointer to Reply's NFS status code, or NULL 593 * 594 * This is called from nfsd_dispatch when the procedure has been 595 * executed and the complete reply is in rqstp->rq_res. 596 * 597 * We're copying around data here rather than swapping buffers because 598 * the toplevel loop requires max-sized buffers, which would be a waste 599 * of memory for a cache with a max reply size of 100 bytes (diropokres). 600 * 601 * If we should start to use different types of cache entries tailored 602 * specifically for attrstat and fh's, we may save even more space. 603 * 604 * Also note that a cachetype of RC_NOCACHE can legally be passed when 605 * nfsd failed to encode a reply that otherwise would have been cached. 606 * In this case, nfsd_cache_update is called with statp == NULL. 607 */ 608 void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, 609 int cachetype, __be32 *statp) 610 { 611 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 612 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 613 struct nfsd_drc_bucket *b; 614 int len; 615 size_t bufsize = 0; 616 617 if (!rp) 618 return; 619 620 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn); 621 622 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 623 len >>= 2; 624 625 /* Don't cache excessive amounts of data and XDR failures */ 626 if (!statp || len > (256 >> 2)) { 627 nfsd_reply_cache_free(b, rp, nn); 628 return; 629 } 630 631 switch (cachetype) { 632 case RC_REPLSTAT: 633 if (len != 1) 634 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 635 rp->c_replstat = *statp; 636 break; 637 case RC_REPLBUFF: 638 cachv = &rp->c_replvec; 639 bufsize = len << 2; 640 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 641 if (!cachv->iov_base) { 642 nfsd_reply_cache_free(b, rp, nn); 643 return; 644 } 645 cachv->iov_len = bufsize; 646 memcpy(cachv->iov_base, statp, bufsize); 647 break; 648 case RC_NOCACHE: 649 nfsd_reply_cache_free(b, rp, nn); 650 return; 651 } 652 spin_lock(&b->cache_lock); 653 nfsd_stats_drc_mem_usage_add(nn, bufsize); 654 lru_put_end(b, rp); 655 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); 656 rp->c_type = cachetype; 657 rp->c_state = RC_DONE; 658 spin_unlock(&b->cache_lock); 659 return; 660 } 661 662 static int 663 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 664 { 665 __be32 *p; 666 667 p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len); 668 if (unlikely(!p)) 669 return false; 670 memcpy(p, data->iov_base, data->iov_len); 671 xdr_commit_encode(&rqstp->rq_res_stream); 672 return true; 673 } 674 675 /* 676 * Note that fields may be added, removed or reordered in the future. Programs 677 * scraping this file for info should test the labels to ensure they're 678 * getting the correct field. 679 */ 680 int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 681 { 682 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info, 683 nfsd_net_id); 684 685 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 686 seq_printf(m, "num entries: %u\n", 687 atomic_read(&nn->num_drc_entries)); 688 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); 689 seq_printf(m, "mem usage: %lld\n", 690 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); 691 seq_printf(m, "cache hits: %lld\n", 692 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS])); 693 seq_printf(m, "cache misses: %lld\n", 694 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES])); 695 seq_printf(m, "not cached: %lld\n", 696 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE])); 697 seq_printf(m, "payload misses: %lld\n", 698 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); 699 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); 700 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); 701 return 0; 702 } 703