Lines Matching +full:max +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0
4 * change in the future and be a per-client cache.
26 * We use this value to determine the number of hash buckets from the max
61 * ~1k, so the above numbers should give a rough max of the amount of memory
64 * XXX: these limits are per-container, so memory used will increase
71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); in nfsd_cache_size_limit()
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); in nfsd_cache_size_limit()
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
95 rp->c_state = RC_UNUSED; in nfsd_cacherep_alloc()
96 rp->c_type = RC_NOCACHE; in nfsd_cacherep_alloc()
97 RB_CLEAR_NODE(&rp->c_node); in nfsd_cacherep_alloc()
98 INIT_LIST_HEAD(&rp->c_lru); in nfsd_cacherep_alloc()
100 memset(&rp->c_key, 0, sizeof(rp->c_key)); in nfsd_cacherep_alloc()
101 rp->c_key.k_xid = rqstp->rq_xid; in nfsd_cacherep_alloc()
102 rp->c_key.k_proc = rqstp->rq_proc; in nfsd_cacherep_alloc()
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); in nfsd_cacherep_alloc()
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); in nfsd_cacherep_alloc()
105 rp->c_key.k_prot = rqstp->rq_prot; in nfsd_cacherep_alloc()
106 rp->c_key.k_vers = rqstp->rq_vers; in nfsd_cacherep_alloc()
107 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_cacherep_alloc()
108 rp->c_key.k_csum = csum; in nfsd_cacherep_alloc()
115 if (rp->c_type == RC_REPLBUFF) in nfsd_cacherep_free()
116 kfree(rp->c_replvec.iov_base); in nfsd_cacherep_free()
128 list_del(&rp->c_lru); in nfsd_cacherep_dispose()
139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) in nfsd_cacherep_unlink_locked()
140 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_cacherep_unlink_locked()
141 if (rp->c_state != RC_UNUSED) { in nfsd_cacherep_unlink_locked()
142 rb_erase(&rp->c_node, &b->rb_head); in nfsd_cacherep_unlink_locked()
143 list_del(&rp->c_lru); in nfsd_cacherep_unlink_locked()
144 atomic_dec(&nn->num_drc_entries); in nfsd_cacherep_unlink_locked()
161 spin_lock(&b->cache_lock); in nfsd_reply_cache_free()
163 spin_unlock(&b->cache_lock); in nfsd_reply_cache_free()
170 return drc_slab ? 0: -ENOMEM; in nfsd_drc_slab_create()
183 nn->max_drc_entries = nfsd_cache_size_limit(); in nfsd_reply_cache_init()
184 atomic_set(&nn->num_drc_entries, 0); in nfsd_reply_cache_init()
185 hashsize = nfsd_hashsize(nn->max_drc_entries); in nfsd_reply_cache_init()
186 nn->maskbits = ilog2(hashsize); in nfsd_reply_cache_init()
188 nn->drc_hashtbl = kvzalloc(array_size(hashsize, in nfsd_reply_cache_init()
189 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); in nfsd_reply_cache_init()
190 if (!nn->drc_hashtbl) in nfsd_reply_cache_init()
191 return -ENOMEM; in nfsd_reply_cache_init()
193 nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s", in nfsd_reply_cache_init()
194 nn->nfsd_name); in nfsd_reply_cache_init()
195 if (!nn->nfsd_reply_cache_shrinker) in nfsd_reply_cache_init()
198 nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan; in nfsd_reply_cache_init()
199 nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count; in nfsd_reply_cache_init()
200 nn->nfsd_reply_cache_shrinker->seeks = 1; in nfsd_reply_cache_init()
201 nn->nfsd_reply_cache_shrinker->private_data = nn; in nfsd_reply_cache_init()
203 shrinker_register(nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
206 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); in nfsd_reply_cache_init()
207 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); in nfsd_reply_cache_init()
209 nn->drc_hashsize = hashsize; in nfsd_reply_cache_init()
213 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_init()
215 return -ENOMEM; in nfsd_reply_cache_init()
223 shrinker_free(nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_shutdown()
225 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_shutdown()
226 struct list_head *head = &nn->drc_hashtbl[i].lru_head; in nfsd_reply_cache_shutdown()
229 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], in nfsd_reply_cache_shutdown()
234 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_shutdown()
235 nn->drc_hashtbl = NULL; in nfsd_reply_cache_shutdown()
236 nn->drc_hashsize = 0; in nfsd_reply_cache_shutdown()
247 rp->c_timestamp = jiffies; in lru_put_end()
248 list_move_tail(&rp->c_lru, &b->lru_head); in lru_put_end()
254 unsigned int hash = hash_32((__force u32)xid, nn->maskbits); in nfsd_cache_bucket_find()
256 return &nn->drc_hashtbl[hash]; in nfsd_cache_bucket_find()
260 * Remove and return no more than @max expired entries in bucket @b.
261 * If @max is zero, do not limit the number of removed entries.
265 unsigned int max, struct list_head *dispose) in nfsd_prune_bucket_locked() argument
267 unsigned long expiry = jiffies - RC_EXPIRE; in nfsd_prune_bucket_locked()
271 lockdep_assert_held(&b->cache_lock); in nfsd_prune_bucket_locked()
273 /* The bucket LRU is ordered oldest-first. */ in nfsd_prune_bucket_locked()
274 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { in nfsd_prune_bucket_locked()
277 * in-progress, but do keep scanning the list. in nfsd_prune_bucket_locked()
279 if (rp->c_state == RC_INPROG) in nfsd_prune_bucket_locked()
282 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && in nfsd_prune_bucket_locked()
283 time_before(expiry, rp->c_timestamp)) in nfsd_prune_bucket_locked()
287 list_add(&rp->c_lru, dispose); in nfsd_prune_bucket_locked()
289 if (max && ++freed > max) in nfsd_prune_bucket_locked()
295 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
307 struct nfsd_net *nn = shrink->private_data; in nfsd_reply_cache_count()
309 return atomic_read(&nn->num_drc_entries); in nfsd_reply_cache_count()
313 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
326 struct nfsd_net *nn = shrink->private_data; in nfsd_reply_cache_scan()
331 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_scan()
332 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; in nfsd_reply_cache_scan()
334 if (list_empty(&b->lru_head)) in nfsd_reply_cache_scan()
337 spin_lock(&b->cache_lock); in nfsd_reply_cache_scan()
339 spin_unlock(&b->cache_lock); in nfsd_reply_cache_scan()
342 if (freed > sc->nr_to_scan) in nfsd_reply_cache_scan()
349 * nfsd_cache_csum - Checksum incoming NFS Call arguments
363 * Returns a 32-bit checksum value, as defined in RFC 793.
368 unsigned int base, len; in nfsd_cache_csum() local
381 len = min_t(unsigned int, subbuf.head[0].iov_len, remaining); in nfsd_cache_csum()
382 csum = csum_partial(subbuf.head[0].iov_base, len, csum); in nfsd_cache_csum()
383 remaining -= len; in nfsd_cache_csum()
391 len = min_t(unsigned int, PAGE_SIZE - base, remaining); in nfsd_cache_csum()
392 csum = csum_partial(p, len, csum); in nfsd_cache_csum()
393 remaining -= len; in nfsd_cache_csum()
404 if (key->c_key.k_xid == rp->c_key.k_xid && in nfsd_cache_key_cmp()
405 key->c_key.k_csum != rp->c_key.k_csum) { in nfsd_cache_key_cmp()
410 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); in nfsd_cache_key_cmp()
423 struct rb_node **p = &b->rb_head.rb_node, in nfsd_cache_insert()
435 p = &parent->rb_left; in nfsd_cache_insert()
437 p = &parent->rb_right; in nfsd_cache_insert()
443 rb_link_node(&key->c_node, parent, p); in nfsd_cache_insert()
444 rb_insert_color(&key->c_node, &b->rb_head); in nfsd_cache_insert()
447 if (entries > nn->longest_chain) { in nfsd_cache_insert()
448 nn->longest_chain = entries; in nfsd_cache_insert()
449 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); in nfsd_cache_insert()
450 } else if (entries == nn->longest_chain) { in nfsd_cache_insert()
452 nn->longest_chain_cachesize = min_t(unsigned int, in nfsd_cache_insert()
453 nn->longest_chain_cachesize, in nfsd_cache_insert()
454 atomic_read(&nn->num_drc_entries)); in nfsd_cache_insert()
462 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
464 * @start: starting byte in @rqstp->rq_arg of the NFS Call header
465 * @len: size of the NFS Call header, in bytes
480 unsigned int len, struct nfsd_cacherep **cacherep) in nfsd_cache_lookup() argument
486 int type = rqstp->rq_cachetype; in nfsd_cache_lookup()
495 csum = nfsd_cache_csum(&rqstp->rq_arg, start, len); in nfsd_cache_lookup()
505 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn); in nfsd_cache_lookup()
506 spin_lock(&b->cache_lock); in nfsd_cache_lookup()
511 rp->c_state = RC_INPROG; in nfsd_cache_lookup()
513 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
518 atomic_inc(&nn->num_drc_entries); in nfsd_cache_lookup()
530 if (rp->c_state == RC_INPROG) in nfsd_cache_lookup()
536 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
540 switch (rp->c_type) { in nfsd_cache_lookup()
544 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat); in nfsd_cache_lookup()
548 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) in nfsd_cache_lookup()
553 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); in nfsd_cache_lookup()
559 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
565 * nfsd_cache_update - Update an entry in the duplicate reply cache.
572 * executed and the complete reply is in rqstp->rq_res.
575 * the toplevel loop requires max-sized buffers, which would be a waste
576 * of memory for a cache with a max reply size of 100 bytes (diropokres).
589 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; in nfsd_cache_update()
591 int len; in nfsd_cache_update() local
597 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn); in nfsd_cache_update()
599 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
600 len >>= 2; in nfsd_cache_update()
603 if (!statp || len > (256 >> 2)) { in nfsd_cache_update()
610 if (len != 1) in nfsd_cache_update()
611 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); in nfsd_cache_update()
612 rp->c_replstat = *statp; in nfsd_cache_update()
615 cachv = &rp->c_replvec; in nfsd_cache_update()
616 bufsize = len << 2; in nfsd_cache_update()
617 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); in nfsd_cache_update()
618 if (!cachv->iov_base) { in nfsd_cache_update()
622 cachv->iov_len = bufsize; in nfsd_cache_update()
623 memcpy(cachv->iov_base, statp, bufsize); in nfsd_cache_update()
629 spin_lock(&b->cache_lock); in nfsd_cache_update()
632 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
633 rp->c_type = cachetype; in nfsd_cache_update()
634 rp->c_state = RC_DONE; in nfsd_cache_update()
635 spin_unlock(&b->cache_lock); in nfsd_cache_update()
644 p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len); in nfsd_cache_append()
647 memcpy(p, data->iov_base, data->iov_len); in nfsd_cache_append()
648 xdr_commit_encode(&rqstp->rq_res_stream); in nfsd_cache_append()
659 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info, in nfsd_reply_cache_stats_show()
662 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); in nfsd_reply_cache_stats_show()
664 atomic_read(&nn->num_drc_entries)); in nfsd_reply_cache_stats_show()
665 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); in nfsd_reply_cache_stats_show()
667 percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE])); in nfsd_reply_cache_stats_show()
669 percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS])); in nfsd_reply_cache_stats_show()
671 percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES])); in nfsd_reply_cache_stats_show()
673 percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE])); in nfsd_reply_cache_stats_show()
675 percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES])); in nfsd_reply_cache_stats_show()
676 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); in nfsd_reply_cache_stats_show()
677 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); in nfsd_reply_cache_stats_show()