1 /* 2 * Request reply cache. This is currently a global cache, but this may 3 * change in the future and be a per-client cache. 4 * 5 * This code is heavily inspired by the 44BSD implementation, although 6 * it does things a bit differently. 7 * 8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/sunrpc/addr.h> 13 #include <linux/highmem.h> 14 #include <net/checksum.h> 15 16 #include "nfsd.h" 17 #include "cache.h" 18 19 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE 20 21 #define HASHSIZE 64 22 23 static struct hlist_head * cache_hash; 24 static struct list_head lru_head; 25 static struct kmem_cache *drc_slab; 26 static unsigned int num_drc_entries; 27 static unsigned int max_drc_entries; 28 29 /* 30 * Calculate the hash index from an XID. 31 */ 32 static inline u32 request_hash(u32 xid) 33 { 34 u32 h = xid; 35 h ^= (xid >> 24); 36 return h & (HASHSIZE-1); 37 } 38 39 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 40 static void cache_cleaner_func(struct work_struct *unused); 41 static int nfsd_reply_cache_shrink(struct shrinker *shrink, 42 struct shrink_control *sc); 43 44 struct shrinker nfsd_reply_cache_shrinker = { 45 .shrink = nfsd_reply_cache_shrink, 46 .seeks = 1, 47 }; 48 49 /* 50 * locking for the reply cache: 51 * A cache entry is "single use" if c_state == RC_INPROG 52 * Otherwise, it when accessing _prev or _next, the lock must be held. 53 */ 54 static DEFINE_SPINLOCK(cache_lock); 55 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); 56 57 /* 58 * Put a cap on the size of the DRC based on the amount of available 59 * low memory in the machine. 60 * 61 * 64MB: 8192 62 * 128MB: 11585 63 * 256MB: 16384 64 * 512MB: 23170 65 * 1GB: 32768 66 * 2GB: 46340 67 * 4GB: 65536 68 * 8GB: 92681 69 * 16GB: 131072 70 * 71 * ...with a hard cap of 256k entries. In the worst case, each entry will be 72 * ~1k, so the above numbers should give a rough max of the amount of memory 73 * used in k. 74 */ 75 static unsigned int 76 nfsd_cache_size_limit(void) 77 { 78 unsigned int limit; 79 unsigned long low_pages = totalram_pages - totalhigh_pages; 80 81 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 82 return min_t(unsigned int, limit, 256*1024); 83 } 84 85 static struct svc_cacherep * 86 nfsd_reply_cache_alloc(void) 87 { 88 struct svc_cacherep *rp; 89 90 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 91 if (rp) { 92 rp->c_state = RC_UNUSED; 93 rp->c_type = RC_NOCACHE; 94 INIT_LIST_HEAD(&rp->c_lru); 95 INIT_HLIST_NODE(&rp->c_hash); 96 } 97 return rp; 98 } 99 100 static void 101 nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 102 { 103 if (rp->c_type == RC_REPLBUFF) 104 kfree(rp->c_replvec.iov_base); 105 if (!hlist_unhashed(&rp->c_hash)) 106 hlist_del(&rp->c_hash); 107 list_del(&rp->c_lru); 108 --num_drc_entries; 109 kmem_cache_free(drc_slab, rp); 110 } 111 112 static void 113 nfsd_reply_cache_free(struct svc_cacherep *rp) 114 { 115 spin_lock(&cache_lock); 116 nfsd_reply_cache_free_locked(rp); 117 spin_unlock(&cache_lock); 118 } 119 120 int nfsd_reply_cache_init(void) 121 { 122 INIT_LIST_HEAD(&lru_head); 123 max_drc_entries = nfsd_cache_size_limit(); 124 num_drc_entries = 0; 125 126 register_shrinker(&nfsd_reply_cache_shrinker); 127 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 128 0, 0, NULL); 129 if (!drc_slab) 130 goto out_nomem; 131 132 cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); 133 if (!cache_hash) 134 goto out_nomem; 135 136 return 0; 137 out_nomem: 138 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 139 nfsd_reply_cache_shutdown(); 140 return -ENOMEM; 141 } 142 143 void nfsd_reply_cache_shutdown(void) 144 { 145 struct svc_cacherep *rp; 146 147 unregister_shrinker(&nfsd_reply_cache_shrinker); 148 cancel_delayed_work_sync(&cache_cleaner); 149 150 while (!list_empty(&lru_head)) { 151 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 152 nfsd_reply_cache_free_locked(rp); 153 } 154 155 kfree (cache_hash); 156 cache_hash = NULL; 157 158 if (drc_slab) { 159 kmem_cache_destroy(drc_slab); 160 drc_slab = NULL; 161 } 162 } 163 164 /* 165 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 166 * not already scheduled. 167 */ 168 static void 169 lru_put_end(struct svc_cacherep *rp) 170 { 171 rp->c_timestamp = jiffies; 172 list_move_tail(&rp->c_lru, &lru_head); 173 schedule_delayed_work(&cache_cleaner, RC_EXPIRE); 174 } 175 176 /* 177 * Move a cache entry from one hash list to another 178 */ 179 static void 180 hash_refile(struct svc_cacherep *rp) 181 { 182 hlist_del_init(&rp->c_hash); 183 hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); 184 } 185 186 static inline bool 187 nfsd_cache_entry_expired(struct svc_cacherep *rp) 188 { 189 return rp->c_state != RC_INPROG && 190 time_after(jiffies, rp->c_timestamp + RC_EXPIRE); 191 } 192 193 /* 194 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 195 * Also prune the oldest ones when the total exceeds the max number of entries. 196 */ 197 static void 198 prune_cache_entries(void) 199 { 200 struct svc_cacherep *rp, *tmp; 201 202 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 203 if (!nfsd_cache_entry_expired(rp) && 204 num_drc_entries <= max_drc_entries) 205 break; 206 nfsd_reply_cache_free_locked(rp); 207 } 208 209 /* 210 * Conditionally rearm the job. If we cleaned out the list, then 211 * cancel any pending run (since there won't be any work to do). 212 * Otherwise, we rearm the job or modify the existing one to run in 213 * RC_EXPIRE since we just ran the pruner. 214 */ 215 if (list_empty(&lru_head)) 216 cancel_delayed_work(&cache_cleaner); 217 else 218 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 219 } 220 221 static void 222 cache_cleaner_func(struct work_struct *unused) 223 { 224 spin_lock(&cache_lock); 225 prune_cache_entries(); 226 spin_unlock(&cache_lock); 227 } 228 229 static int 230 nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) 231 { 232 unsigned int num; 233 234 spin_lock(&cache_lock); 235 if (sc->nr_to_scan) 236 prune_cache_entries(); 237 num = num_drc_entries; 238 spin_unlock(&cache_lock); 239 240 return num; 241 } 242 243 /* 244 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 245 */ 246 static __wsum 247 nfsd_cache_csum(struct svc_rqst *rqstp) 248 { 249 int idx; 250 unsigned int base; 251 __wsum csum; 252 struct xdr_buf *buf = &rqstp->rq_arg; 253 const unsigned char *p = buf->head[0].iov_base; 254 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, 255 RC_CSUMLEN); 256 size_t len = min(buf->head[0].iov_len, csum_len); 257 258 /* rq_arg.head first */ 259 csum = csum_partial(p, len, 0); 260 csum_len -= len; 261 262 /* Continue into page array */ 263 idx = buf->page_base / PAGE_SIZE; 264 base = buf->page_base & ~PAGE_MASK; 265 while (csum_len) { 266 p = page_address(buf->pages[idx]) + base; 267 len = min_t(size_t, PAGE_SIZE - base, csum_len); 268 csum = csum_partial(p, len, csum); 269 csum_len -= len; 270 base = 0; 271 ++idx; 272 } 273 return csum; 274 } 275 276 /* 277 * Search the request hash for an entry that matches the given rqstp. 278 * Must be called with cache_lock held. Returns the found entry or 279 * NULL on failure. 280 */ 281 static struct svc_cacherep * 282 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) 283 { 284 struct svc_cacherep *rp; 285 struct hlist_head *rh; 286 __be32 xid = rqstp->rq_xid; 287 u32 proto = rqstp->rq_prot, 288 vers = rqstp->rq_vers, 289 proc = rqstp->rq_proc; 290 291 rh = &cache_hash[request_hash(xid)]; 292 hlist_for_each_entry(rp, rh, c_hash) { 293 if (xid == rp->c_xid && proc == rp->c_proc && 294 proto == rp->c_prot && vers == rp->c_vers && 295 rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum && 296 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && 297 rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) 298 return rp; 299 } 300 return NULL; 301 } 302 303 /* 304 * Try to find an entry matching the current call in the cache. When none 305 * is found, we try to grab the oldest expired entry off the LRU list. If 306 * a suitable one isn't there, then drop the cache_lock and allocate a 307 * new one, then search again in case one got inserted while this thread 308 * didn't hold the lock. 309 */ 310 int 311 nfsd_cache_lookup(struct svc_rqst *rqstp) 312 { 313 struct svc_cacherep *rp, *found; 314 __be32 xid = rqstp->rq_xid; 315 u32 proto = rqstp->rq_prot, 316 vers = rqstp->rq_vers, 317 proc = rqstp->rq_proc; 318 __wsum csum; 319 unsigned long age; 320 int type = rqstp->rq_cachetype; 321 int rtn; 322 323 rqstp->rq_cacherep = NULL; 324 if (type == RC_NOCACHE) { 325 nfsdstats.rcnocache++; 326 return RC_DOIT; 327 } 328 329 csum = nfsd_cache_csum(rqstp); 330 331 spin_lock(&cache_lock); 332 rtn = RC_DOIT; 333 334 rp = nfsd_cache_search(rqstp, csum); 335 if (rp) 336 goto found_entry; 337 338 /* Try to use the first entry on the LRU */ 339 if (!list_empty(&lru_head)) { 340 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 341 if (nfsd_cache_entry_expired(rp) || 342 num_drc_entries >= max_drc_entries) { 343 lru_put_end(rp); 344 prune_cache_entries(); 345 goto setup_entry; 346 } 347 } 348 349 /* Drop the lock and allocate a new entry */ 350 spin_unlock(&cache_lock); 351 rp = nfsd_reply_cache_alloc(); 352 if (!rp) { 353 dprintk("nfsd: unable to allocate DRC entry!\n"); 354 return RC_DOIT; 355 } 356 spin_lock(&cache_lock); 357 ++num_drc_entries; 358 359 /* 360 * Must search again just in case someone inserted one 361 * after we dropped the lock above. 362 */ 363 found = nfsd_cache_search(rqstp, csum); 364 if (found) { 365 nfsd_reply_cache_free_locked(rp); 366 rp = found; 367 goto found_entry; 368 } 369 370 /* 371 * We're keeping the one we just allocated. Are we now over the 372 * limit? Prune one off the tip of the LRU in trade for the one we 373 * just allocated if so. 374 */ 375 if (num_drc_entries >= max_drc_entries) 376 nfsd_reply_cache_free_locked(list_first_entry(&lru_head, 377 struct svc_cacherep, c_lru)); 378 379 setup_entry: 380 nfsdstats.rcmisses++; 381 rqstp->rq_cacherep = rp; 382 rp->c_state = RC_INPROG; 383 rp->c_xid = xid; 384 rp->c_proc = proc; 385 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); 386 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); 387 rp->c_prot = proto; 388 rp->c_vers = vers; 389 rp->c_len = rqstp->rq_arg.len; 390 rp->c_csum = csum; 391 392 hash_refile(rp); 393 lru_put_end(rp); 394 395 /* release any buffer */ 396 if (rp->c_type == RC_REPLBUFF) { 397 kfree(rp->c_replvec.iov_base); 398 rp->c_replvec.iov_base = NULL; 399 } 400 rp->c_type = RC_NOCACHE; 401 out: 402 spin_unlock(&cache_lock); 403 return rtn; 404 405 found_entry: 406 nfsdstats.rchits++; 407 /* We found a matching entry which is either in progress or done. */ 408 age = jiffies - rp->c_timestamp; 409 lru_put_end(rp); 410 411 rtn = RC_DROPIT; 412 /* Request being processed or excessive rexmits */ 413 if (rp->c_state == RC_INPROG || age < RC_DELAY) 414 goto out; 415 416 /* From the hall of fame of impractical attacks: 417 * Is this a user who tries to snoop on the cache? */ 418 rtn = RC_DOIT; 419 if (!rqstp->rq_secure && rp->c_secure) 420 goto out; 421 422 /* Compose RPC reply header */ 423 switch (rp->c_type) { 424 case RC_NOCACHE: 425 break; 426 case RC_REPLSTAT: 427 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); 428 rtn = RC_REPLY; 429 break; 430 case RC_REPLBUFF: 431 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 432 goto out; /* should not happen */ 433 rtn = RC_REPLY; 434 break; 435 default: 436 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); 437 nfsd_reply_cache_free_locked(rp); 438 } 439 440 goto out; 441 } 442 443 /* 444 * Update a cache entry. This is called from nfsd_dispatch when 445 * the procedure has been executed and the complete reply is in 446 * rqstp->rq_res. 447 * 448 * We're copying around data here rather than swapping buffers because 449 * the toplevel loop requires max-sized buffers, which would be a waste 450 * of memory for a cache with a max reply size of 100 bytes (diropokres). 451 * 452 * If we should start to use different types of cache entries tailored 453 * specifically for attrstat and fh's, we may save even more space. 454 * 455 * Also note that a cachetype of RC_NOCACHE can legally be passed when 456 * nfsd failed to encode a reply that otherwise would have been cached. 457 * In this case, nfsd_cache_update is called with statp == NULL. 458 */ 459 void 460 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 461 { 462 struct svc_cacherep *rp = rqstp->rq_cacherep; 463 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 464 int len; 465 466 if (!rp) 467 return; 468 469 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 470 len >>= 2; 471 472 /* Don't cache excessive amounts of data and XDR failures */ 473 if (!statp || len > (256 >> 2)) { 474 nfsd_reply_cache_free(rp); 475 return; 476 } 477 478 switch (cachetype) { 479 case RC_REPLSTAT: 480 if (len != 1) 481 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 482 rp->c_replstat = *statp; 483 break; 484 case RC_REPLBUFF: 485 cachv = &rp->c_replvec; 486 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); 487 if (!cachv->iov_base) { 488 nfsd_reply_cache_free(rp); 489 return; 490 } 491 cachv->iov_len = len << 2; 492 memcpy(cachv->iov_base, statp, len << 2); 493 break; 494 case RC_NOCACHE: 495 nfsd_reply_cache_free(rp); 496 return; 497 } 498 spin_lock(&cache_lock); 499 lru_put_end(rp); 500 rp->c_secure = rqstp->rq_secure; 501 rp->c_type = cachetype; 502 rp->c_state = RC_DONE; 503 spin_unlock(&cache_lock); 504 return; 505 } 506 507 /* 508 * Copy cached reply to current reply buffer. Should always fit. 509 * FIXME as reply is in a page, we should just attach the page, and 510 * keep a refcount.... 511 */ 512 static int 513 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 514 { 515 struct kvec *vec = &rqstp->rq_res.head[0]; 516 517 if (vec->iov_len + data->iov_len > PAGE_SIZE) { 518 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", 519 data->iov_len); 520 return 0; 521 } 522 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); 523 vec->iov_len += data->iov_len; 524 return 1; 525 } 526