11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * net/sunrpc/cache.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic code for various authentication-related caches 51da177e4SLinus Torvalds * used by sunrpc clients and servers. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Released under terms in GPL version 2. See COPYING. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds */ 121da177e4SLinus Torvalds 131da177e4SLinus Torvalds #include <linux/types.h> 141da177e4SLinus Torvalds #include <linux/fs.h> 151da177e4SLinus Torvalds #include <linux/file.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/signal.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kmod.h> 201da177e4SLinus Torvalds #include <linux/list.h> 211da177e4SLinus Torvalds #include <linux/module.h> 221da177e4SLinus Torvalds #include <linux/ctype.h> 231b2e122dSAndy Shevchenko #include <linux/string_helpers.h> 247c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 251da177e4SLinus Torvalds #include <linux/poll.h> 261da177e4SLinus Torvalds #include <linux/seq_file.h> 271da177e4SLinus Torvalds #include <linux/proc_fs.h> 281da177e4SLinus Torvalds #include <linux/net.h> 291da177e4SLinus Torvalds #include <linux/workqueue.h> 304a3e2f71SArjan van de Ven #include <linux/mutex.h> 31da77005fSTrond Myklebust #include <linux/pagemap.h> 321da177e4SLinus Torvalds #include <asm/ioctls.h> 331da177e4SLinus Torvalds #include <linux/sunrpc/types.h> 341da177e4SLinus Torvalds #include <linux/sunrpc/cache.h> 351da177e4SLinus Torvalds #include <linux/sunrpc/stats.h> 368854e82dSTrond Myklebust #include <linux/sunrpc/rpc_pipe_fs.h> 374f42d0d5SPavel Emelyanov #include "netns.h" 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #define RPCDBG_FACILITY RPCDBG_CACHE 401da177e4SLinus Torvalds 41d76d1815SJ. Bruce Fields static bool cache_defer_req(struct cache_req *req, struct cache_head *item); 421da177e4SLinus Torvalds static void cache_revisit_request(struct cache_head *item); 431da177e4SLinus Torvalds 4477862036SNeil Brown static void cache_init(struct cache_head *h, struct cache_detail *detail) 451da177e4SLinus Torvalds { 46c5b29f88SNeilBrown time_t now = seconds_since_boot(); 47129e5824SKinglong Mee INIT_HLIST_NODE(&h->cache_list); 481da177e4SLinus Torvalds h->flags = 0; 49baab935fSNeilBrown kref_init(&h->ref); 501da177e4SLinus Torvalds h->expiry_time = now + CACHE_NEW_EXPIRY; 5177862036SNeil Brown if (now <= detail->flush_time) 5277862036SNeil Brown /* ensure it isn't already expired */ 5377862036SNeil Brown now = detail->flush_time + 1; 541da177e4SLinus Torvalds h->last_refresh = now; 551da177e4SLinus Torvalds } 561da177e4SLinus Torvalds 5715a5f6bdSNeilBrown struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, 5815a5f6bdSNeilBrown struct cache_head *key, int hash) 5915a5f6bdSNeilBrown { 60129e5824SKinglong Mee struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL; 61129e5824SKinglong Mee struct hlist_head *head; 6215a5f6bdSNeilBrown 6315a5f6bdSNeilBrown head = &detail->hash_table[hash]; 6415a5f6bdSNeilBrown 6515a5f6bdSNeilBrown read_lock(&detail->hash_lock); 6615a5f6bdSNeilBrown 67129e5824SKinglong Mee hlist_for_each_entry(tmp, head, cache_list) { 6815a5f6bdSNeilBrown if (detail->match(tmp, key)) { 69d202cce8SNeilBrown if (cache_is_expired(detail, tmp)) 70d202cce8SNeilBrown /* This entry is expired, we will discard it. */ 71d202cce8SNeilBrown break; 7215a5f6bdSNeilBrown cache_get(tmp); 7315a5f6bdSNeilBrown read_unlock(&detail->hash_lock); 7415a5f6bdSNeilBrown return tmp; 7515a5f6bdSNeilBrown } 7615a5f6bdSNeilBrown } 7715a5f6bdSNeilBrown read_unlock(&detail->hash_lock); 7815a5f6bdSNeilBrown /* Didn't find anything, insert an empty entry */ 7915a5f6bdSNeilBrown 8015a5f6bdSNeilBrown new = detail->alloc(); 8115a5f6bdSNeilBrown if (!new) 8215a5f6bdSNeilBrown return NULL; 832f34931fSNeil Brown /* must fully initialise 'new', else 842f34931fSNeil Brown * we might get lose if we need to 852f34931fSNeil Brown * cache_put it soon. 862f34931fSNeil Brown */ 8777862036SNeil Brown cache_init(new, detail); 882f34931fSNeil Brown detail->init(new, key); 8915a5f6bdSNeilBrown 9015a5f6bdSNeilBrown write_lock(&detail->hash_lock); 9115a5f6bdSNeilBrown 9215a5f6bdSNeilBrown /* check if entry appeared while we slept */ 93129e5824SKinglong Mee hlist_for_each_entry(tmp, head, cache_list) { 9415a5f6bdSNeilBrown if (detail->match(tmp, key)) { 95d202cce8SNeilBrown if (cache_is_expired(detail, tmp)) { 96129e5824SKinglong Mee hlist_del_init(&tmp->cache_list); 97d202cce8SNeilBrown detail->entries --; 98d202cce8SNeilBrown freeme = tmp; 99d202cce8SNeilBrown break; 100d202cce8SNeilBrown } 10115a5f6bdSNeilBrown cache_get(tmp); 10215a5f6bdSNeilBrown write_unlock(&detail->hash_lock); 103baab935fSNeilBrown cache_put(new, detail); 10415a5f6bdSNeilBrown return tmp; 10515a5f6bdSNeilBrown } 10615a5f6bdSNeilBrown } 107129e5824SKinglong Mee 108129e5824SKinglong Mee hlist_add_head(&new->cache_list, head); 10915a5f6bdSNeilBrown detail->entries++; 11015a5f6bdSNeilBrown cache_get(new); 11115a5f6bdSNeilBrown write_unlock(&detail->hash_lock); 11215a5f6bdSNeilBrown 113d202cce8SNeilBrown if (freeme) 114d202cce8SNeilBrown cache_put(freeme, detail); 11515a5f6bdSNeilBrown return new; 11615a5f6bdSNeilBrown } 11724c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); 11815a5f6bdSNeilBrown 119ebd0cb1aSNeilBrown 120f866a819SNeilBrown static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); 121ebd0cb1aSNeilBrown 12277862036SNeil Brown static void cache_fresh_locked(struct cache_head *head, time_t expiry, 12377862036SNeil Brown struct cache_detail *detail) 124ebd0cb1aSNeilBrown { 12577862036SNeil Brown time_t now = seconds_since_boot(); 12677862036SNeil Brown if (now <= detail->flush_time) 12777862036SNeil Brown /* ensure it isn't immediately treated as expired */ 12877862036SNeil Brown now = detail->flush_time + 1; 129ebd0cb1aSNeilBrown head->expiry_time = expiry; 13077862036SNeil Brown head->last_refresh = now; 131fdef7aa5SJ. Bruce Fields smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ 132908329f2SNeilBrown set_bit(CACHE_VALID, &head->flags); 133ebd0cb1aSNeilBrown } 134ebd0cb1aSNeilBrown 135ebd0cb1aSNeilBrown static void cache_fresh_unlocked(struct cache_head *head, 136908329f2SNeilBrown struct cache_detail *detail) 137ebd0cb1aSNeilBrown { 138ebd0cb1aSNeilBrown if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { 139ebd0cb1aSNeilBrown cache_revisit_request(head); 140f866a819SNeilBrown cache_dequeue(detail, head); 141ebd0cb1aSNeilBrown } 142ebd0cb1aSNeilBrown } 143ebd0cb1aSNeilBrown 14415a5f6bdSNeilBrown struct cache_head *sunrpc_cache_update(struct cache_detail *detail, 14515a5f6bdSNeilBrown struct cache_head *new, struct cache_head *old, int hash) 14615a5f6bdSNeilBrown { 14715a5f6bdSNeilBrown /* The 'old' entry is to be replaced by 'new'. 14815a5f6bdSNeilBrown * If 'old' is not VALID, we update it directly, 14915a5f6bdSNeilBrown * otherwise we need to replace it 15015a5f6bdSNeilBrown */ 15115a5f6bdSNeilBrown struct cache_head *tmp; 15215a5f6bdSNeilBrown 15315a5f6bdSNeilBrown if (!test_bit(CACHE_VALID, &old->flags)) { 15415a5f6bdSNeilBrown write_lock(&detail->hash_lock); 15515a5f6bdSNeilBrown if (!test_bit(CACHE_VALID, &old->flags)) { 15615a5f6bdSNeilBrown if (test_bit(CACHE_NEGATIVE, &new->flags)) 15715a5f6bdSNeilBrown set_bit(CACHE_NEGATIVE, &old->flags); 15815a5f6bdSNeilBrown else 15915a5f6bdSNeilBrown detail->update(old, new); 16077862036SNeil Brown cache_fresh_locked(old, new->expiry_time, detail); 16115a5f6bdSNeilBrown write_unlock(&detail->hash_lock); 162908329f2SNeilBrown cache_fresh_unlocked(old, detail); 16315a5f6bdSNeilBrown return old; 16415a5f6bdSNeilBrown } 16515a5f6bdSNeilBrown write_unlock(&detail->hash_lock); 16615a5f6bdSNeilBrown } 16715a5f6bdSNeilBrown /* We need to insert a new entry */ 16815a5f6bdSNeilBrown tmp = detail->alloc(); 16915a5f6bdSNeilBrown if (!tmp) { 170baab935fSNeilBrown cache_put(old, detail); 17115a5f6bdSNeilBrown return NULL; 17215a5f6bdSNeilBrown } 17377862036SNeil Brown cache_init(tmp, detail); 17415a5f6bdSNeilBrown detail->init(tmp, old); 17515a5f6bdSNeilBrown 17615a5f6bdSNeilBrown write_lock(&detail->hash_lock); 17715a5f6bdSNeilBrown if (test_bit(CACHE_NEGATIVE, &new->flags)) 17815a5f6bdSNeilBrown set_bit(CACHE_NEGATIVE, &tmp->flags); 17915a5f6bdSNeilBrown else 18015a5f6bdSNeilBrown detail->update(tmp, new); 181129e5824SKinglong Mee hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); 182f2d39586SNeilBrown detail->entries++; 18315a5f6bdSNeilBrown cache_get(tmp); 18477862036SNeil Brown cache_fresh_locked(tmp, new->expiry_time, detail); 18577862036SNeil Brown cache_fresh_locked(old, 0, detail); 18615a5f6bdSNeilBrown write_unlock(&detail->hash_lock); 187908329f2SNeilBrown cache_fresh_unlocked(tmp, detail); 188908329f2SNeilBrown cache_fresh_unlocked(old, detail); 189baab935fSNeilBrown cache_put(old, detail); 19015a5f6bdSNeilBrown return tmp; 19115a5f6bdSNeilBrown } 19224c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_update); 1931da177e4SLinus Torvalds 194bc74b4f5STrond Myklebust static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) 195bc74b4f5STrond Myklebust { 1962d438338SStanislav Kinsbursky if (cd->cache_upcall) 197bc74b4f5STrond Myklebust return cd->cache_upcall(cd, h); 19821cd1254SStanislav Kinsbursky return sunrpc_cache_pipe_upcall(cd, h); 199bc74b4f5STrond Myklebust } 200989a19b9SNeilBrown 201b6040f97Schaoting fan static inline int cache_is_valid(struct cache_head *h) 202989a19b9SNeilBrown { 203d202cce8SNeilBrown if (!test_bit(CACHE_VALID, &h->flags)) 204989a19b9SNeilBrown return -EAGAIN; 205989a19b9SNeilBrown else { 206989a19b9SNeilBrown /* entry is valid */ 207989a19b9SNeilBrown if (test_bit(CACHE_NEGATIVE, &h->flags)) 208989a19b9SNeilBrown return -ENOENT; 209fdef7aa5SJ. Bruce Fields else { 210fdef7aa5SJ. Bruce Fields /* 211fdef7aa5SJ. Bruce Fields * In combination with write barrier in 212fdef7aa5SJ. Bruce Fields * sunrpc_cache_update, ensures that anyone 213fdef7aa5SJ. Bruce Fields * using the cache entry after this sees the 214fdef7aa5SJ. Bruce Fields * updated contents: 215fdef7aa5SJ. Bruce Fields */ 216fdef7aa5SJ. Bruce Fields smp_rmb(); 217989a19b9SNeilBrown return 0; 218989a19b9SNeilBrown } 219989a19b9SNeilBrown } 220fdef7aa5SJ. Bruce Fields } 221e9dc1221SJ. Bruce Fields 2226bab93f8SJ. Bruce Fields static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) 2236bab93f8SJ. Bruce Fields { 2246bab93f8SJ. Bruce Fields int rv; 2256bab93f8SJ. Bruce Fields 2266bab93f8SJ. Bruce Fields write_lock(&detail->hash_lock); 227b6040f97Schaoting fan rv = cache_is_valid(h); 2282a1c7f53SNeilBrown if (rv == -EAGAIN) { 2296bab93f8SJ. Bruce Fields set_bit(CACHE_NEGATIVE, &h->flags); 23077862036SNeil Brown cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, 23177862036SNeil Brown detail); 2322a1c7f53SNeilBrown rv = -ENOENT; 2332a1c7f53SNeilBrown } 2346bab93f8SJ. Bruce Fields write_unlock(&detail->hash_lock); 2356bab93f8SJ. Bruce Fields cache_fresh_unlocked(h, detail); 2362a1c7f53SNeilBrown return rv; 2376bab93f8SJ. Bruce Fields } 2386bab93f8SJ. Bruce Fields 2391da177e4SLinus Torvalds /* 2401da177e4SLinus Torvalds * This is the generic cache management routine for all 2411da177e4SLinus Torvalds * the authentication caches. 2421da177e4SLinus Torvalds * It checks the currency of a cache item and will (later) 2431da177e4SLinus Torvalds * initiate an upcall to fill it if needed. 2441da177e4SLinus Torvalds * 2451da177e4SLinus Torvalds * 2461da177e4SLinus Torvalds * Returns 0 if the cache_head can be used, or cache_puts it and returns 247989a19b9SNeilBrown * -EAGAIN if upcall is pending and request has been queued 248989a19b9SNeilBrown * -ETIMEDOUT if upcall failed or request could not be queue or 249989a19b9SNeilBrown * upcall completed but item is still invalid (implying that 250989a19b9SNeilBrown * the cache item has been replaced with a newer one). 2511da177e4SLinus Torvalds * -ENOENT if cache entry was negative 2521da177e4SLinus Torvalds */ 2531da177e4SLinus Torvalds int cache_check(struct cache_detail *detail, 2541da177e4SLinus Torvalds struct cache_head *h, struct cache_req *rqstp) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds int rv; 2571da177e4SLinus Torvalds long refresh_age, age; 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds /* First decide return status as best we can */ 260b6040f97Schaoting fan rv = cache_is_valid(h); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds /* now see if we want to start an upcall */ 2631da177e4SLinus Torvalds refresh_age = (h->expiry_time - h->last_refresh); 264c5b29f88SNeilBrown age = seconds_since_boot() - h->last_refresh; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds if (rqstp == NULL) { 2671da177e4SLinus Torvalds if (rv == -EAGAIN) 2681da177e4SLinus Torvalds rv = -ENOENT; 2690bebc633SNeilBrown } else if (rv == -EAGAIN || 2700bebc633SNeilBrown (h->expiry_time != 0 && age > refresh_age/2)) { 27146121cf7SChuck Lever dprintk("RPC: Want update, refage=%ld, age=%ld\n", 27246121cf7SChuck Lever refresh_age, age); 2731da177e4SLinus Torvalds if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { 2741da177e4SLinus Torvalds switch (cache_make_upcall(detail, h)) { 2751da177e4SLinus Torvalds case -EINVAL: 2766bab93f8SJ. Bruce Fields rv = try_to_negate_entry(detail, h); 2771da177e4SLinus Torvalds break; 2781da177e4SLinus Torvalds case -EAGAIN: 2792a1c7f53SNeilBrown cache_fresh_unlocked(h, detail); 2801da177e4SLinus Torvalds break; 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds } 2831da177e4SLinus Torvalds } 2841da177e4SLinus Torvalds 285989a19b9SNeilBrown if (rv == -EAGAIN) { 286d76d1815SJ. Bruce Fields if (!cache_defer_req(rqstp, h)) { 287d76d1815SJ. Bruce Fields /* 288d76d1815SJ. Bruce Fields * Request was not deferred; handle it as best 289d76d1815SJ. Bruce Fields * we can ourselves: 290d76d1815SJ. Bruce Fields */ 291b6040f97Schaoting fan rv = cache_is_valid(h); 2921da177e4SLinus Torvalds if (rv == -EAGAIN) 293e0bb89efSJ.Bruce Fields rv = -ETIMEDOUT; 294989a19b9SNeilBrown } 295989a19b9SNeilBrown } 2964013edeaSNeilBrown if (rv) 297baab935fSNeilBrown cache_put(h, detail); 2981da177e4SLinus Torvalds return rv; 2991da177e4SLinus Torvalds } 30024c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_check); 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds /* 3031da177e4SLinus Torvalds * caches need to be periodically cleaned. 3041da177e4SLinus Torvalds * For this we maintain a list of cache_detail and 3051da177e4SLinus Torvalds * a current pointer into that list and into the table 3061da177e4SLinus Torvalds * for that entry. 3071da177e4SLinus Torvalds * 308013920ebSNeilBrown * Each time cache_clean is called it finds the next non-empty entry 3091da177e4SLinus Torvalds * in the current table and walks the list in that entry 3101da177e4SLinus Torvalds * looking for entries that can be removed. 3111da177e4SLinus Torvalds * 3121da177e4SLinus Torvalds * An entry gets removed if: 3131da177e4SLinus Torvalds * - The expiry is before current time 3141da177e4SLinus Torvalds * - The last_refresh time is before the flush_time for that cache 3151da177e4SLinus Torvalds * 3161da177e4SLinus Torvalds * later we might drop old entries with non-NEVER expiry if that table 3171da177e4SLinus Torvalds * is getting 'full' for some definition of 'full' 3181da177e4SLinus Torvalds * 3191da177e4SLinus Torvalds * The question of "how often to scan a table" is an interesting one 3201da177e4SLinus Torvalds * and is answered in part by the use of the "nextcheck" field in the 3211da177e4SLinus Torvalds * cache_detail. 3221da177e4SLinus Torvalds * When a scan of a table begins, the nextcheck field is set to a time 3231da177e4SLinus Torvalds * that is well into the future. 3241da177e4SLinus Torvalds * While scanning, if an expiry time is found that is earlier than the 3251da177e4SLinus Torvalds * current nextcheck time, nextcheck is set to that expiry time. 3261da177e4SLinus Torvalds * If the flush_time is ever set to a time earlier than the nextcheck 3271da177e4SLinus Torvalds * time, the nextcheck time is then set to that flush_time. 3281da177e4SLinus Torvalds * 3291da177e4SLinus Torvalds * A table is then only scanned if the current time is at least 3301da177e4SLinus Torvalds * the nextcheck time. 3311da177e4SLinus Torvalds * 3321da177e4SLinus Torvalds */ 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds static LIST_HEAD(cache_list); 3351da177e4SLinus Torvalds static DEFINE_SPINLOCK(cache_list_lock); 3361da177e4SLinus Torvalds static struct cache_detail *current_detail; 3371da177e4SLinus Torvalds static int current_index; 3381da177e4SLinus Torvalds 33965f27f38SDavid Howells static void do_cache_clean(struct work_struct *work); 3408eab945cSArtem Bityutskiy static struct delayed_work cache_cleaner; 3411da177e4SLinus Torvalds 342820f9442SStanislav Kinsbursky void sunrpc_init_cache_detail(struct cache_detail *cd) 3431da177e4SLinus Torvalds { 3441da177e4SLinus Torvalds rwlock_init(&cd->hash_lock); 3451da177e4SLinus Torvalds INIT_LIST_HEAD(&cd->queue); 3461da177e4SLinus Torvalds spin_lock(&cache_list_lock); 3471da177e4SLinus Torvalds cd->nextcheck = 0; 3481da177e4SLinus Torvalds cd->entries = 0; 3491da177e4SLinus Torvalds atomic_set(&cd->readers, 0); 3501da177e4SLinus Torvalds cd->last_close = 0; 3511da177e4SLinus Torvalds cd->last_warn = -1; 3521da177e4SLinus Torvalds list_add(&cd->others, &cache_list); 3531da177e4SLinus Torvalds spin_unlock(&cache_list_lock); 3541da177e4SLinus Torvalds 3551da177e4SLinus Torvalds /* start the cleaning process */ 35677b00bc0SKe Wang queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); 3571da177e4SLinus Torvalds } 358820f9442SStanislav Kinsbursky EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); 3591da177e4SLinus Torvalds 360820f9442SStanislav Kinsbursky void sunrpc_destroy_cache_detail(struct cache_detail *cd) 3611da177e4SLinus Torvalds { 3621da177e4SLinus Torvalds cache_purge(cd); 3631da177e4SLinus Torvalds spin_lock(&cache_list_lock); 3641da177e4SLinus Torvalds write_lock(&cd->hash_lock); 3651da177e4SLinus Torvalds if (current_detail == cd) 3661da177e4SLinus Torvalds current_detail = NULL; 3671da177e4SLinus Torvalds list_del_init(&cd->others); 3681da177e4SLinus Torvalds write_unlock(&cd->hash_lock); 3691da177e4SLinus Torvalds spin_unlock(&cache_list_lock); 3701da177e4SLinus Torvalds if (list_empty(&cache_list)) { 3711da177e4SLinus Torvalds /* module must be being unloaded so its safe to kill the worker */ 3724011cd97STrond Myklebust cancel_delayed_work_sync(&cache_cleaner); 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds } 375820f9442SStanislav Kinsbursky EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds /* clean cache tries to find something to clean 3781da177e4SLinus Torvalds * and cleans it. 3791da177e4SLinus Torvalds * It returns 1 if it cleaned something, 3801da177e4SLinus Torvalds * 0 if it didn't find anything this time 3811da177e4SLinus Torvalds * -1 if it fell off the end of the list. 3821da177e4SLinus Torvalds */ 3831da177e4SLinus Torvalds static int cache_clean(void) 3841da177e4SLinus Torvalds { 3851da177e4SLinus Torvalds int rv = 0; 3861da177e4SLinus Torvalds struct list_head *next; 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds spin_lock(&cache_list_lock); 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds /* find a suitable table if we don't already have one */ 3911da177e4SLinus Torvalds while (current_detail == NULL || 3921da177e4SLinus Torvalds current_index >= current_detail->hash_size) { 3931da177e4SLinus Torvalds if (current_detail) 3941da177e4SLinus Torvalds next = current_detail->others.next; 3951da177e4SLinus Torvalds else 3961da177e4SLinus Torvalds next = cache_list.next; 3971da177e4SLinus Torvalds if (next == &cache_list) { 3981da177e4SLinus Torvalds current_detail = NULL; 3991da177e4SLinus Torvalds spin_unlock(&cache_list_lock); 4001da177e4SLinus Torvalds return -1; 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds current_detail = list_entry(next, struct cache_detail, others); 403c5b29f88SNeilBrown if (current_detail->nextcheck > seconds_since_boot()) 4041da177e4SLinus Torvalds current_index = current_detail->hash_size; 4051da177e4SLinus Torvalds else { 4061da177e4SLinus Torvalds current_index = 0; 407c5b29f88SNeilBrown current_detail->nextcheck = seconds_since_boot()+30*60; 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds } 4101da177e4SLinus Torvalds 4111da177e4SLinus Torvalds /* find a non-empty bucket in the table */ 4121da177e4SLinus Torvalds while (current_detail && 4131da177e4SLinus Torvalds current_index < current_detail->hash_size && 414129e5824SKinglong Mee hlist_empty(¤t_detail->hash_table[current_index])) 4151da177e4SLinus Torvalds current_index++; 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds if (current_detail && current_index < current_detail->hash_size) { 420129e5824SKinglong Mee struct cache_head *ch = NULL; 4211da177e4SLinus Torvalds struct cache_detail *d; 422129e5824SKinglong Mee struct hlist_head *head; 423129e5824SKinglong Mee struct hlist_node *tmp; 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds write_lock(¤t_detail->hash_lock); 4261da177e4SLinus Torvalds 4271da177e4SLinus Torvalds /* Ok, now to clean this strand */ 4281da177e4SLinus Torvalds 429129e5824SKinglong Mee head = ¤t_detail->hash_table[current_index]; 430129e5824SKinglong Mee hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 4311da177e4SLinus Torvalds if (current_detail->nextcheck > ch->expiry_time) 4321da177e4SLinus Torvalds current_detail->nextcheck = ch->expiry_time+1; 4332f50d8b6SNeilBrown if (!cache_is_expired(current_detail, ch)) 4341da177e4SLinus Torvalds continue; 4351da177e4SLinus Torvalds 436129e5824SKinglong Mee hlist_del_init(&ch->cache_list); 4371da177e4SLinus Torvalds current_detail->entries--; 4381da177e4SLinus Torvalds rv = 1; 4393af4974eSNeilBrown break; 4401da177e4SLinus Torvalds } 4413af4974eSNeilBrown 4421da177e4SLinus Torvalds write_unlock(¤t_detail->hash_lock); 4431da177e4SLinus Torvalds d = current_detail; 4441da177e4SLinus Torvalds if (!ch) 4451da177e4SLinus Torvalds current_index ++; 4461da177e4SLinus Torvalds spin_unlock(&cache_list_lock); 4475c4d2639SNeilBrown if (ch) { 448013920ebSNeilBrown set_bit(CACHE_CLEANED, &ch->flags); 4492a1c7f53SNeilBrown cache_fresh_unlocked(ch, d); 450baab935fSNeilBrown cache_put(ch, d); 4515c4d2639SNeilBrown } 4521da177e4SLinus Torvalds } else 4531da177e4SLinus Torvalds spin_unlock(&cache_list_lock); 4541da177e4SLinus Torvalds 4551da177e4SLinus Torvalds return rv; 4561da177e4SLinus Torvalds } 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* 4591da177e4SLinus Torvalds * We want to regularly clean the cache, so we need to schedule some work ... 4601da177e4SLinus Torvalds */ 46165f27f38SDavid Howells static void do_cache_clean(struct work_struct *work) 4621da177e4SLinus Torvalds { 4631da177e4SLinus Torvalds int delay = 5; 4641da177e4SLinus Torvalds if (cache_clean() == -1) 4656aad89c8SAnton Blanchard delay = round_jiffies_relative(30*HZ); 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds if (list_empty(&cache_list)) 4681da177e4SLinus Torvalds delay = 0; 4691da177e4SLinus Torvalds 4701da177e4SLinus Torvalds if (delay) 47177b00bc0SKe Wang queue_delayed_work(system_power_efficient_wq, 47277b00bc0SKe Wang &cache_cleaner, delay); 4731da177e4SLinus Torvalds } 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds 4761da177e4SLinus Torvalds /* 4771da177e4SLinus Torvalds * Clean all caches promptly. This just calls cache_clean 4781da177e4SLinus Torvalds * repeatedly until we are sure that every cache has had a chance to 4791da177e4SLinus Torvalds * be fully cleaned 4801da177e4SLinus Torvalds */ 4811da177e4SLinus Torvalds void cache_flush(void) 4821da177e4SLinus Torvalds { 4831da177e4SLinus Torvalds while (cache_clean() != -1) 4841da177e4SLinus Torvalds cond_resched(); 4851da177e4SLinus Torvalds while (cache_clean() != -1) 4861da177e4SLinus Torvalds cond_resched(); 4871da177e4SLinus Torvalds } 48824c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_flush); 4891da177e4SLinus Torvalds 4901da177e4SLinus Torvalds void cache_purge(struct cache_detail *detail) 4911da177e4SLinus Torvalds { 492471a930aSKinglong Mee struct cache_head *ch = NULL; 493471a930aSKinglong Mee struct hlist_head *head = NULL; 494471a930aSKinglong Mee struct hlist_node *tmp = NULL; 495471a930aSKinglong Mee int i = 0; 496471a930aSKinglong Mee 497471a930aSKinglong Mee write_lock(&detail->hash_lock); 498471a930aSKinglong Mee if (!detail->entries) { 499471a930aSKinglong Mee write_unlock(&detail->hash_lock); 500471a930aSKinglong Mee return; 501471a930aSKinglong Mee } 502471a930aSKinglong Mee 503471a930aSKinglong Mee dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); 504471a930aSKinglong Mee for (i = 0; i < detail->hash_size; i++) { 505471a930aSKinglong Mee head = &detail->hash_table[i]; 506471a930aSKinglong Mee hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 507471a930aSKinglong Mee hlist_del_init(&ch->cache_list); 508471a930aSKinglong Mee detail->entries--; 509471a930aSKinglong Mee 510471a930aSKinglong Mee set_bit(CACHE_CLEANED, &ch->flags); 511471a930aSKinglong Mee write_unlock(&detail->hash_lock); 512471a930aSKinglong Mee cache_fresh_unlocked(ch, detail); 513471a930aSKinglong Mee cache_put(ch, detail); 514471a930aSKinglong Mee write_lock(&detail->hash_lock); 515471a930aSKinglong Mee } 516471a930aSKinglong Mee } 517471a930aSKinglong Mee write_unlock(&detail->hash_lock); 5181da177e4SLinus Torvalds } 51924c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(cache_purge); 5201da177e4SLinus Torvalds 5211da177e4SLinus Torvalds 5221da177e4SLinus Torvalds /* 5231da177e4SLinus Torvalds * Deferral and Revisiting of Requests. 5241da177e4SLinus Torvalds * 5251da177e4SLinus Torvalds * If a cache lookup finds a pending entry, we 5261da177e4SLinus Torvalds * need to defer the request and revisit it later. 5271da177e4SLinus Torvalds * All deferred requests are stored in a hash table, 5281da177e4SLinus Torvalds * indexed by "struct cache_head *". 5291da177e4SLinus Torvalds * As it may be wasteful to store a whole request 5301da177e4SLinus Torvalds * structure, we allow the request to provide a 5311da177e4SLinus Torvalds * deferred form, which must contain a 5321da177e4SLinus Torvalds * 'struct cache_deferred_req' 5331da177e4SLinus Torvalds * This cache_deferred_req contains a method to allow 5341da177e4SLinus Torvalds * it to be revisited when cache info is available 5351da177e4SLinus Torvalds */ 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) 5381da177e4SLinus Torvalds #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) 5391da177e4SLinus Torvalds 5401da177e4SLinus Torvalds #define DFR_MAX 300 /* ??? */ 5411da177e4SLinus Torvalds 5421da177e4SLinus Torvalds static DEFINE_SPINLOCK(cache_defer_lock); 5431da177e4SLinus Torvalds static LIST_HEAD(cache_defer_list); 54411174492SNeilBrown static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; 5451da177e4SLinus Torvalds static int cache_defer_cnt; 5461da177e4SLinus Torvalds 5476610f720SJ. Bruce Fields static void __unhash_deferred_req(struct cache_deferred_req *dreq) 5481da177e4SLinus Torvalds { 54911174492SNeilBrown hlist_del_init(&dreq->hash); 550e33534d5SNeilBrown if (!list_empty(&dreq->recent)) { 551e33534d5SNeilBrown list_del_init(&dreq->recent); 5526610f720SJ. Bruce Fields cache_defer_cnt--; 5536610f720SJ. Bruce Fields } 554e33534d5SNeilBrown } 5556610f720SJ. Bruce Fields 5566610f720SJ. Bruce Fields static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) 5576610f720SJ. Bruce Fields { 5581da177e4SLinus Torvalds int hash = DFR_HASH(item); 5591da177e4SLinus Torvalds 560e33534d5SNeilBrown INIT_LIST_HEAD(&dreq->recent); 56111174492SNeilBrown hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); 56201f3bd1fSJ.Bruce Fields } 5636610f720SJ. Bruce Fields 564e33534d5SNeilBrown static void setup_deferral(struct cache_deferred_req *dreq, 565e33534d5SNeilBrown struct cache_head *item, 566e33534d5SNeilBrown int count_me) 5671da177e4SLinus Torvalds { 5681da177e4SLinus Torvalds 5691da177e4SLinus Torvalds dreq->item = item; 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds spin_lock(&cache_defer_lock); 5721da177e4SLinus Torvalds 5736610f720SJ. Bruce Fields __hash_deferred_req(dreq, item); 5741da177e4SLinus Torvalds 575e33534d5SNeilBrown if (count_me) { 576e33534d5SNeilBrown cache_defer_cnt++; 5771da177e4SLinus Torvalds list_add(&dreq->recent, &cache_defer_list); 5781da177e4SLinus Torvalds } 579e33534d5SNeilBrown 5801da177e4SLinus Torvalds spin_unlock(&cache_defer_lock); 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds } 583f16b6e8dSNeilBrown 5843211af11SJ. Bruce Fields struct thread_deferred_req { 5853211af11SJ. Bruce Fields struct cache_deferred_req handle; 5863211af11SJ. Bruce Fields struct completion completion; 5873211af11SJ. Bruce Fields }; 5883211af11SJ. Bruce Fields 5893211af11SJ. Bruce Fields static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) 5903211af11SJ. Bruce Fields { 5913211af11SJ. Bruce Fields struct thread_deferred_req *dr = 5923211af11SJ. Bruce Fields container_of(dreq, struct thread_deferred_req, handle); 5933211af11SJ. Bruce Fields complete(&dr->completion); 5943211af11SJ. Bruce Fields } 5953211af11SJ. Bruce Fields 596d29068c4SNeilBrown static void cache_wait_req(struct cache_req *req, struct cache_head *item) 5973211af11SJ. Bruce Fields { 5983211af11SJ. Bruce Fields struct thread_deferred_req sleeper; 5993211af11SJ. Bruce Fields struct cache_deferred_req *dreq = &sleeper.handle; 6003211af11SJ. Bruce Fields 6013211af11SJ. Bruce Fields sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); 6023211af11SJ. Bruce Fields dreq->revisit = cache_restart_thread; 6033211af11SJ. Bruce Fields 604e33534d5SNeilBrown setup_deferral(dreq, item, 0); 6053211af11SJ. Bruce Fields 606d29068c4SNeilBrown if (!test_bit(CACHE_PENDING, &item->flags) || 607277f68dbSNeilBrown wait_for_completion_interruptible_timeout( 608f16b6e8dSNeilBrown &sleeper.completion, req->thread_wait) <= 0) { 609f16b6e8dSNeilBrown /* The completion wasn't completed, so we need 610f16b6e8dSNeilBrown * to clean up 611f16b6e8dSNeilBrown */ 612f16b6e8dSNeilBrown spin_lock(&cache_defer_lock); 61311174492SNeilBrown if (!hlist_unhashed(&sleeper.handle.hash)) { 6146610f720SJ. Bruce Fields __unhash_deferred_req(&sleeper.handle); 615f16b6e8dSNeilBrown spin_unlock(&cache_defer_lock); 616f16b6e8dSNeilBrown } else { 617f16b6e8dSNeilBrown /* cache_revisit_request already removed 618f16b6e8dSNeilBrown * this from the hash table, but hasn't 619f16b6e8dSNeilBrown * called ->revisit yet. It will very soon 620f16b6e8dSNeilBrown * and we need to wait for it. 621f16b6e8dSNeilBrown */ 622f16b6e8dSNeilBrown spin_unlock(&cache_defer_lock); 623f16b6e8dSNeilBrown wait_for_completion(&sleeper.completion); 624f16b6e8dSNeilBrown } 625f16b6e8dSNeilBrown } 626f16b6e8dSNeilBrown } 6273211af11SJ. Bruce Fields 628e33534d5SNeilBrown static void cache_limit_defers(void) 629e33534d5SNeilBrown { 630e33534d5SNeilBrown /* Make sure we haven't exceed the limit of allowed deferred 631e33534d5SNeilBrown * requests. 632e33534d5SNeilBrown */ 633e33534d5SNeilBrown struct cache_deferred_req *discard = NULL; 634e33534d5SNeilBrown 635e33534d5SNeilBrown if (cache_defer_cnt <= DFR_MAX) 636e33534d5SNeilBrown return; 637e33534d5SNeilBrown 638e33534d5SNeilBrown spin_lock(&cache_defer_lock); 639e33534d5SNeilBrown 640e33534d5SNeilBrown /* Consider removing either the first or the last */ 641e33534d5SNeilBrown if (cache_defer_cnt > DFR_MAX) { 64263862b5bSAruna-Hewapathirane if (prandom_u32() & 1) 643e33534d5SNeilBrown discard = list_entry(cache_defer_list.next, 644e33534d5SNeilBrown struct cache_deferred_req, recent); 645e33534d5SNeilBrown else 646e33534d5SNeilBrown discard = list_entry(cache_defer_list.prev, 647e33534d5SNeilBrown struct cache_deferred_req, recent); 648e33534d5SNeilBrown __unhash_deferred_req(discard); 649e33534d5SNeilBrown } 650e33534d5SNeilBrown spin_unlock(&cache_defer_lock); 651e33534d5SNeilBrown if (discard) 652e33534d5SNeilBrown discard->revisit(discard, 1); 653e33534d5SNeilBrown } 654e33534d5SNeilBrown 655d76d1815SJ. Bruce Fields /* Return true if and only if a deferred request is queued. */ 656d76d1815SJ. Bruce Fields static bool cache_defer_req(struct cache_req *req, struct cache_head *item) 6573211af11SJ. Bruce Fields { 6583211af11SJ. Bruce Fields struct cache_deferred_req *dreq; 6593211af11SJ. Bruce Fields 6603211af11SJ. Bruce Fields if (req->thread_wait) { 661d29068c4SNeilBrown cache_wait_req(req, item); 662d29068c4SNeilBrown if (!test_bit(CACHE_PENDING, &item->flags)) 663d76d1815SJ. Bruce Fields return false; 6643211af11SJ. Bruce Fields } 6653211af11SJ. Bruce Fields dreq = req->defer(req); 6663211af11SJ. Bruce Fields if (dreq == NULL) 667d76d1815SJ. Bruce Fields return false; 668e33534d5SNeilBrown setup_deferral(dreq, item, 1); 669d29068c4SNeilBrown if (!test_bit(CACHE_PENDING, &item->flags)) 670d29068c4SNeilBrown /* Bit could have been cleared before we managed to 671d29068c4SNeilBrown * set up the deferral, so need to revisit just in case 672d29068c4SNeilBrown */ 673d29068c4SNeilBrown cache_revisit_request(item); 674e33534d5SNeilBrown 675e33534d5SNeilBrown cache_limit_defers(); 676d76d1815SJ. Bruce Fields return true; 677989a19b9SNeilBrown } 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds static void cache_revisit_request(struct cache_head *item) 6801da177e4SLinus Torvalds { 6811da177e4SLinus Torvalds struct cache_deferred_req *dreq; 6821da177e4SLinus Torvalds struct list_head pending; 683b67bfe0dSSasha Levin struct hlist_node *tmp; 6841da177e4SLinus Torvalds int hash = DFR_HASH(item); 6851da177e4SLinus Torvalds 6861da177e4SLinus Torvalds INIT_LIST_HEAD(&pending); 6871da177e4SLinus Torvalds spin_lock(&cache_defer_lock); 6881da177e4SLinus Torvalds 689b67bfe0dSSasha Levin hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) 6901da177e4SLinus Torvalds if (dreq->item == item) { 6916610f720SJ. Bruce Fields __unhash_deferred_req(dreq); 6926610f720SJ. Bruce Fields list_add(&dreq->recent, &pending); 6931da177e4SLinus Torvalds } 69411174492SNeilBrown 6951da177e4SLinus Torvalds spin_unlock(&cache_defer_lock); 6961da177e4SLinus Torvalds 6971da177e4SLinus Torvalds while (!list_empty(&pending)) { 6981da177e4SLinus Torvalds dreq = list_entry(pending.next, struct cache_deferred_req, recent); 6991da177e4SLinus Torvalds list_del_init(&dreq->recent); 7001da177e4SLinus Torvalds dreq->revisit(dreq, 0); 7011da177e4SLinus Torvalds } 7021da177e4SLinus Torvalds } 7031da177e4SLinus Torvalds 7041da177e4SLinus Torvalds void cache_clean_deferred(void *owner) 7051da177e4SLinus Torvalds { 7061da177e4SLinus Torvalds struct cache_deferred_req *dreq, *tmp; 7071da177e4SLinus Torvalds struct list_head pending; 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds 7101da177e4SLinus Torvalds INIT_LIST_HEAD(&pending); 7111da177e4SLinus Torvalds spin_lock(&cache_defer_lock); 7121da177e4SLinus Torvalds 7131da177e4SLinus Torvalds list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 7141da177e4SLinus Torvalds if (dreq->owner == owner) { 7156610f720SJ. Bruce Fields __unhash_deferred_req(dreq); 716e95dffa4SNeilBrown list_add(&dreq->recent, &pending); 7171da177e4SLinus Torvalds } 7181da177e4SLinus Torvalds } 7191da177e4SLinus Torvalds spin_unlock(&cache_defer_lock); 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds while (!list_empty(&pending)) { 7221da177e4SLinus Torvalds dreq = list_entry(pending.next, struct cache_deferred_req, recent); 7231da177e4SLinus Torvalds list_del_init(&dreq->recent); 7241da177e4SLinus Torvalds dreq->revisit(dreq, 1); 7251da177e4SLinus Torvalds } 7261da177e4SLinus Torvalds } 7271da177e4SLinus Torvalds 7281da177e4SLinus Torvalds /* 7291da177e4SLinus Torvalds * communicate with user-space 7301da177e4SLinus Torvalds * 7316489a8f4SKinglong Mee * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. 732a490c681SJ. Bruce Fields * On read, you get a full request, or block. 733a490c681SJ. Bruce Fields * On write, an update request is processed. 734a490c681SJ. Bruce Fields * Poll works if anything to read, and always allows write. 7351da177e4SLinus Torvalds * 7361da177e4SLinus Torvalds * Implemented by linked list of requests. Each open file has 737a490c681SJ. Bruce Fields * a ->private that also exists in this list. New requests are added 7381da177e4SLinus Torvalds * to the end and may wakeup and preceding readers. 7391da177e4SLinus Torvalds * New readers are added to the head. If, on read, an item is found with 7401da177e4SLinus Torvalds * CACHE_UPCALLING clear, we free it from the list. 7411da177e4SLinus Torvalds * 7421da177e4SLinus Torvalds */ 7431da177e4SLinus Torvalds 7441da177e4SLinus Torvalds static DEFINE_SPINLOCK(queue_lock); 7454a3e2f71SArjan van de Ven static DEFINE_MUTEX(queue_io_mutex); 7461da177e4SLinus Torvalds 7471da177e4SLinus Torvalds struct cache_queue { 7481da177e4SLinus Torvalds struct list_head list; 7491da177e4SLinus Torvalds int reader; /* if 0, then request */ 7501da177e4SLinus Torvalds }; 7511da177e4SLinus Torvalds struct cache_request { 7521da177e4SLinus Torvalds struct cache_queue q; 7531da177e4SLinus Torvalds struct cache_head *item; 7541da177e4SLinus Torvalds char * buf; 7551da177e4SLinus Torvalds int len; 7561da177e4SLinus Torvalds int readers; 7571da177e4SLinus Torvalds }; 7581da177e4SLinus Torvalds struct cache_reader { 7591da177e4SLinus Torvalds struct cache_queue q; 7601da177e4SLinus Torvalds int offset; /* if non-0, we have a refcnt on next request */ 7611da177e4SLinus Torvalds }; 7621da177e4SLinus Torvalds 763d94af6deSStanislav Kinsbursky static int cache_request(struct cache_detail *detail, 764d94af6deSStanislav Kinsbursky struct cache_request *crq) 765d94af6deSStanislav Kinsbursky { 766d94af6deSStanislav Kinsbursky char *bp = crq->buf; 767d94af6deSStanislav Kinsbursky int len = PAGE_SIZE; 768d94af6deSStanislav Kinsbursky 769d94af6deSStanislav Kinsbursky detail->cache_request(detail, crq->item, &bp, &len); 770d94af6deSStanislav Kinsbursky if (len < 0) 771d94af6deSStanislav Kinsbursky return -EAGAIN; 772d94af6deSStanislav Kinsbursky return PAGE_SIZE - len; 773d94af6deSStanislav Kinsbursky } 774d94af6deSStanislav Kinsbursky 775173912a6STrond Myklebust static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, 776173912a6STrond Myklebust loff_t *ppos, struct cache_detail *cd) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds struct cache_reader *rp = filp->private_data; 7791da177e4SLinus Torvalds struct cache_request *rq; 780496ad9aaSAl Viro struct inode *inode = file_inode(filp); 7811da177e4SLinus Torvalds int err; 7821da177e4SLinus Torvalds 7831da177e4SLinus Torvalds if (count == 0) 7841da177e4SLinus Torvalds return 0; 7851da177e4SLinus Torvalds 7865955102cSAl Viro inode_lock(inode); /* protect against multiple concurrent 7871da177e4SLinus Torvalds * readers on this file */ 7881da177e4SLinus Torvalds again: 7891da177e4SLinus Torvalds spin_lock(&queue_lock); 7901da177e4SLinus Torvalds /* need to find next request */ 7911da177e4SLinus Torvalds while (rp->q.list.next != &cd->queue && 7921da177e4SLinus Torvalds list_entry(rp->q.list.next, struct cache_queue, list) 7931da177e4SLinus Torvalds ->reader) { 7941da177e4SLinus Torvalds struct list_head *next = rp->q.list.next; 7951da177e4SLinus Torvalds list_move(&rp->q.list, next); 7961da177e4SLinus Torvalds } 7971da177e4SLinus Torvalds if (rp->q.list.next == &cd->queue) { 7981da177e4SLinus Torvalds spin_unlock(&queue_lock); 7995955102cSAl Viro inode_unlock(inode); 8000db74d9aSWeston Andros Adamson WARN_ON_ONCE(rp->offset); 8011da177e4SLinus Torvalds return 0; 8021da177e4SLinus Torvalds } 8031da177e4SLinus Torvalds rq = container_of(rp->q.list.next, struct cache_request, q.list); 8040db74d9aSWeston Andros Adamson WARN_ON_ONCE(rq->q.reader); 8051da177e4SLinus Torvalds if (rp->offset == 0) 8061da177e4SLinus Torvalds rq->readers++; 8071da177e4SLinus Torvalds spin_unlock(&queue_lock); 8081da177e4SLinus Torvalds 809d94af6deSStanislav Kinsbursky if (rq->len == 0) { 810d94af6deSStanislav Kinsbursky err = cache_request(cd, rq); 811d94af6deSStanislav Kinsbursky if (err < 0) 812d94af6deSStanislav Kinsbursky goto out; 813d94af6deSStanislav Kinsbursky rq->len = err; 814d94af6deSStanislav Kinsbursky } 815d94af6deSStanislav Kinsbursky 8161da177e4SLinus Torvalds if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { 8171da177e4SLinus Torvalds err = -EAGAIN; 8181da177e4SLinus Torvalds spin_lock(&queue_lock); 8191da177e4SLinus Torvalds list_move(&rp->q.list, &rq->q.list); 8201da177e4SLinus Torvalds spin_unlock(&queue_lock); 8211da177e4SLinus Torvalds } else { 8221da177e4SLinus Torvalds if (rp->offset + count > rq->len) 8231da177e4SLinus Torvalds count = rq->len - rp->offset; 8241da177e4SLinus Torvalds err = -EFAULT; 8251da177e4SLinus Torvalds if (copy_to_user(buf, rq->buf + rp->offset, count)) 8261da177e4SLinus Torvalds goto out; 8271da177e4SLinus Torvalds rp->offset += count; 8281da177e4SLinus Torvalds if (rp->offset >= rq->len) { 8291da177e4SLinus Torvalds rp->offset = 0; 8301da177e4SLinus Torvalds spin_lock(&queue_lock); 8311da177e4SLinus Torvalds list_move(&rp->q.list, &rq->q.list); 8321da177e4SLinus Torvalds spin_unlock(&queue_lock); 8331da177e4SLinus Torvalds } 8341da177e4SLinus Torvalds err = 0; 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds out: 8371da177e4SLinus Torvalds if (rp->offset == 0) { 8381da177e4SLinus Torvalds /* need to release rq */ 8391da177e4SLinus Torvalds spin_lock(&queue_lock); 8401da177e4SLinus Torvalds rq->readers--; 8411da177e4SLinus Torvalds if (rq->readers == 0 && 8421da177e4SLinus Torvalds !test_bit(CACHE_PENDING, &rq->item->flags)) { 8431da177e4SLinus Torvalds list_del(&rq->q.list); 8441da177e4SLinus Torvalds spin_unlock(&queue_lock); 845baab935fSNeilBrown cache_put(rq->item, cd); 8461da177e4SLinus Torvalds kfree(rq->buf); 8471da177e4SLinus Torvalds kfree(rq); 8481da177e4SLinus Torvalds } else 8491da177e4SLinus Torvalds spin_unlock(&queue_lock); 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds if (err == -EAGAIN) 8521da177e4SLinus Torvalds goto again; 8535955102cSAl Viro inode_unlock(inode); 8541da177e4SLinus Torvalds return err ? err : count; 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds 857da77005fSTrond Myklebust static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, 858da77005fSTrond Myklebust size_t count, struct cache_detail *cd) 8591da177e4SLinus Torvalds { 860da77005fSTrond Myklebust ssize_t ret; 8611da177e4SLinus Torvalds 8626d8d1749SDan Carpenter if (count == 0) 8636d8d1749SDan Carpenter return -EINVAL; 864da77005fSTrond Myklebust if (copy_from_user(kaddr, buf, count)) 8651da177e4SLinus Torvalds return -EFAULT; 866da77005fSTrond Myklebust kaddr[count] = '\0'; 867da77005fSTrond Myklebust ret = cd->cache_parse(cd, kaddr, count); 868da77005fSTrond Myklebust if (!ret) 869da77005fSTrond Myklebust ret = count; 870da77005fSTrond Myklebust return ret; 8711da177e4SLinus Torvalds } 8721da177e4SLinus Torvalds 873da77005fSTrond Myklebust static ssize_t cache_slow_downcall(const char __user *buf, 874da77005fSTrond Myklebust size_t count, struct cache_detail *cd) 875da77005fSTrond Myklebust { 8761da177e4SLinus Torvalds static char write_buf[8192]; /* protected by queue_io_mutex */ 877da77005fSTrond Myklebust ssize_t ret = -EINVAL; 878da77005fSTrond Myklebust 879da77005fSTrond Myklebust if (count >= sizeof(write_buf)) 880da77005fSTrond Myklebust goto out; 881da77005fSTrond Myklebust mutex_lock(&queue_io_mutex); 882da77005fSTrond Myklebust ret = cache_do_downcall(write_buf, buf, count, cd); 8834a3e2f71SArjan van de Ven mutex_unlock(&queue_io_mutex); 884da77005fSTrond Myklebust out: 885da77005fSTrond Myklebust return ret; 886da77005fSTrond Myklebust } 887da77005fSTrond Myklebust 888da77005fSTrond Myklebust static ssize_t cache_downcall(struct address_space *mapping, 889da77005fSTrond Myklebust const char __user *buf, 890da77005fSTrond Myklebust size_t count, struct cache_detail *cd) 891da77005fSTrond Myklebust { 892da77005fSTrond Myklebust struct page *page; 893da77005fSTrond Myklebust char *kaddr; 894da77005fSTrond Myklebust ssize_t ret = -ENOMEM; 895da77005fSTrond Myklebust 89609cbfeafSKirill A. Shutemov if (count >= PAGE_SIZE) 897da77005fSTrond Myklebust goto out_slow; 898da77005fSTrond Myklebust 899da77005fSTrond Myklebust page = find_or_create_page(mapping, 0, GFP_KERNEL); 900da77005fSTrond Myklebust if (!page) 901da77005fSTrond Myklebust goto out_slow; 902da77005fSTrond Myklebust 903da77005fSTrond Myklebust kaddr = kmap(page); 904da77005fSTrond Myklebust ret = cache_do_downcall(kaddr, buf, count, cd); 905da77005fSTrond Myklebust kunmap(page); 906da77005fSTrond Myklebust unlock_page(page); 90709cbfeafSKirill A. Shutemov put_page(page); 908da77005fSTrond Myklebust return ret; 909da77005fSTrond Myklebust out_slow: 910da77005fSTrond Myklebust return cache_slow_downcall(buf, count, cd); 911da77005fSTrond Myklebust } 9121da177e4SLinus Torvalds 913173912a6STrond Myklebust static ssize_t cache_write(struct file *filp, const char __user *buf, 914173912a6STrond Myklebust size_t count, loff_t *ppos, 915173912a6STrond Myklebust struct cache_detail *cd) 9161da177e4SLinus Torvalds { 917da77005fSTrond Myklebust struct address_space *mapping = filp->f_mapping; 918496ad9aaSAl Viro struct inode *inode = file_inode(filp); 919da77005fSTrond Myklebust ssize_t ret = -EINVAL; 9201da177e4SLinus Torvalds 921da77005fSTrond Myklebust if (!cd->cache_parse) 922da77005fSTrond Myklebust goto out; 9231da177e4SLinus Torvalds 9245955102cSAl Viro inode_lock(inode); 925da77005fSTrond Myklebust ret = cache_downcall(mapping, buf, count, cd); 9265955102cSAl Viro inode_unlock(inode); 927da77005fSTrond Myklebust out: 928da77005fSTrond Myklebust return ret; 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds 9311da177e4SLinus Torvalds static DECLARE_WAIT_QUEUE_HEAD(queue_wait); 9321da177e4SLinus Torvalds 933173912a6STrond Myklebust static unsigned int cache_poll(struct file *filp, poll_table *wait, 934173912a6STrond Myklebust struct cache_detail *cd) 9351da177e4SLinus Torvalds { 9361da177e4SLinus Torvalds unsigned int mask; 9371da177e4SLinus Torvalds struct cache_reader *rp = filp->private_data; 9381da177e4SLinus Torvalds struct cache_queue *cq; 9391da177e4SLinus Torvalds 9401da177e4SLinus Torvalds poll_wait(filp, &queue_wait, wait); 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /* alway allow write */ 9431711fd9aSAl Viro mask = POLLOUT | POLLWRNORM; 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds if (!rp) 9461da177e4SLinus Torvalds return mask; 9471da177e4SLinus Torvalds 9481da177e4SLinus Torvalds spin_lock(&queue_lock); 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds for (cq= &rp->q; &cq->list != &cd->queue; 9511da177e4SLinus Torvalds cq = list_entry(cq->list.next, struct cache_queue, list)) 9521da177e4SLinus Torvalds if (!cq->reader) { 9531da177e4SLinus Torvalds mask |= POLLIN | POLLRDNORM; 9541da177e4SLinus Torvalds break; 9551da177e4SLinus Torvalds } 9561da177e4SLinus Torvalds spin_unlock(&queue_lock); 9571da177e4SLinus Torvalds return mask; 9581da177e4SLinus Torvalds } 9591da177e4SLinus Torvalds 960173912a6STrond Myklebust static int cache_ioctl(struct inode *ino, struct file *filp, 961173912a6STrond Myklebust unsigned int cmd, unsigned long arg, 962173912a6STrond Myklebust struct cache_detail *cd) 9631da177e4SLinus Torvalds { 9641da177e4SLinus Torvalds int len = 0; 9651da177e4SLinus Torvalds struct cache_reader *rp = filp->private_data; 9661da177e4SLinus Torvalds struct cache_queue *cq; 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds if (cmd != FIONREAD || !rp) 9691da177e4SLinus Torvalds return -EINVAL; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds spin_lock(&queue_lock); 9721da177e4SLinus Torvalds 9731da177e4SLinus Torvalds /* only find the length remaining in current request, 9741da177e4SLinus Torvalds * or the length of the next request 9751da177e4SLinus Torvalds */ 9761da177e4SLinus Torvalds for (cq= &rp->q; &cq->list != &cd->queue; 9771da177e4SLinus Torvalds cq = list_entry(cq->list.next, struct cache_queue, list)) 9781da177e4SLinus Torvalds if (!cq->reader) { 9791da177e4SLinus Torvalds struct cache_request *cr = 9801da177e4SLinus Torvalds container_of(cq, struct cache_request, q); 9811da177e4SLinus Torvalds len = cr->len - rp->offset; 9821da177e4SLinus Torvalds break; 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds spin_unlock(&queue_lock); 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds return put_user(len, (int __user *)arg); 9871da177e4SLinus Torvalds } 9881da177e4SLinus Torvalds 989173912a6STrond Myklebust static int cache_open(struct inode *inode, struct file *filp, 990173912a6STrond Myklebust struct cache_detail *cd) 9911da177e4SLinus Torvalds { 9921da177e4SLinus Torvalds struct cache_reader *rp = NULL; 9931da177e4SLinus Torvalds 994f7e86ab9STrond Myklebust if (!cd || !try_module_get(cd->owner)) 995f7e86ab9STrond Myklebust return -EACCES; 9961da177e4SLinus Torvalds nonseekable_open(inode, filp); 9971da177e4SLinus Torvalds if (filp->f_mode & FMODE_READ) { 9981da177e4SLinus Torvalds rp = kmalloc(sizeof(*rp), GFP_KERNEL); 999a7823c79SAlexey Khoroshilov if (!rp) { 1000a7823c79SAlexey Khoroshilov module_put(cd->owner); 10011da177e4SLinus Torvalds return -ENOMEM; 1002a7823c79SAlexey Khoroshilov } 10031da177e4SLinus Torvalds rp->offset = 0; 10041da177e4SLinus Torvalds rp->q.reader = 1; 10051da177e4SLinus Torvalds atomic_inc(&cd->readers); 10061da177e4SLinus Torvalds spin_lock(&queue_lock); 10071da177e4SLinus Torvalds list_add(&rp->q.list, &cd->queue); 10081da177e4SLinus Torvalds spin_unlock(&queue_lock); 10091da177e4SLinus Torvalds } 10101da177e4SLinus Torvalds filp->private_data = rp; 10111da177e4SLinus Torvalds return 0; 10121da177e4SLinus Torvalds } 10131da177e4SLinus Torvalds 1014173912a6STrond Myklebust static int cache_release(struct inode *inode, struct file *filp, 1015173912a6STrond Myklebust struct cache_detail *cd) 10161da177e4SLinus Torvalds { 10171da177e4SLinus Torvalds struct cache_reader *rp = filp->private_data; 10181da177e4SLinus Torvalds 10191da177e4SLinus Torvalds if (rp) { 10201da177e4SLinus Torvalds spin_lock(&queue_lock); 10211da177e4SLinus Torvalds if (rp->offset) { 10221da177e4SLinus Torvalds struct cache_queue *cq; 10231da177e4SLinus Torvalds for (cq= &rp->q; &cq->list != &cd->queue; 10241da177e4SLinus Torvalds cq = list_entry(cq->list.next, struct cache_queue, list)) 10251da177e4SLinus Torvalds if (!cq->reader) { 10261da177e4SLinus Torvalds container_of(cq, struct cache_request, q) 10271da177e4SLinus Torvalds ->readers--; 10281da177e4SLinus Torvalds break; 10291da177e4SLinus Torvalds } 10301da177e4SLinus Torvalds rp->offset = 0; 10311da177e4SLinus Torvalds } 10321da177e4SLinus Torvalds list_del(&rp->q.list); 10331da177e4SLinus Torvalds spin_unlock(&queue_lock); 10341da177e4SLinus Torvalds 10351da177e4SLinus Torvalds filp->private_data = NULL; 10361da177e4SLinus Torvalds kfree(rp); 10371da177e4SLinus Torvalds 1038c5b29f88SNeilBrown cd->last_close = seconds_since_boot(); 10391da177e4SLinus Torvalds atomic_dec(&cd->readers); 10401da177e4SLinus Torvalds } 1041f7e86ab9STrond Myklebust module_put(cd->owner); 10421da177e4SLinus Torvalds return 0; 10431da177e4SLinus Torvalds } 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds 1047f866a819SNeilBrown static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) 10481da177e4SLinus Torvalds { 1049f9e1aedcSNeilBrown struct cache_queue *cq, *tmp; 1050f9e1aedcSNeilBrown struct cache_request *cr; 1051f9e1aedcSNeilBrown struct list_head dequeued; 1052f9e1aedcSNeilBrown 1053f9e1aedcSNeilBrown INIT_LIST_HEAD(&dequeued); 10541da177e4SLinus Torvalds spin_lock(&queue_lock); 1055f9e1aedcSNeilBrown list_for_each_entry_safe(cq, tmp, &detail->queue, list) 10561da177e4SLinus Torvalds if (!cq->reader) { 1057f9e1aedcSNeilBrown cr = container_of(cq, struct cache_request, q); 10581da177e4SLinus Torvalds if (cr->item != ch) 10591da177e4SLinus Torvalds continue; 1060f9e1aedcSNeilBrown if (test_bit(CACHE_PENDING, &ch->flags)) 1061f9e1aedcSNeilBrown /* Lost a race and it is pending again */ 1062f9e1aedcSNeilBrown break; 10631da177e4SLinus Torvalds if (cr->readers != 0) 10644013edeaSNeilBrown continue; 1065f9e1aedcSNeilBrown list_move(&cr->q.list, &dequeued); 1066f9e1aedcSNeilBrown } 10671da177e4SLinus Torvalds spin_unlock(&queue_lock); 1068f9e1aedcSNeilBrown while (!list_empty(&dequeued)) { 1069f9e1aedcSNeilBrown cr = list_entry(dequeued.next, struct cache_request, q.list); 1070f9e1aedcSNeilBrown list_del(&cr->q.list); 1071baab935fSNeilBrown cache_put(cr->item, detail); 10721da177e4SLinus Torvalds kfree(cr->buf); 10731da177e4SLinus Torvalds kfree(cr); 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds } 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds /* 10781da177e4SLinus Torvalds * Support routines for text-based upcalls. 10791da177e4SLinus Torvalds * Fields are separated by spaces. 10801da177e4SLinus Torvalds * Fields are either mangled to quote space tab newline slosh with slosh 10811da177e4SLinus Torvalds * or a hexified with a leading \x 10821da177e4SLinus Torvalds * Record is terminated with newline. 10831da177e4SLinus Torvalds * 10841da177e4SLinus Torvalds */ 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds void qword_add(char **bpp, int *lp, char *str) 10871da177e4SLinus Torvalds { 10881da177e4SLinus Torvalds char *bp = *bpp; 10891da177e4SLinus Torvalds int len = *lp; 10901b2e122dSAndy Shevchenko int ret; 10911da177e4SLinus Torvalds 10921da177e4SLinus Torvalds if (len < 0) return; 10931da177e4SLinus Torvalds 109441416f23SRasmus Villemoes ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); 109541416f23SRasmus Villemoes if (ret >= len) { 109641416f23SRasmus Villemoes bp += len; 10971b2e122dSAndy Shevchenko len = -1; 109841416f23SRasmus Villemoes } else { 109941416f23SRasmus Villemoes bp += ret; 11001b2e122dSAndy Shevchenko len -= ret; 11011da177e4SLinus Torvalds *bp++ = ' '; 11021da177e4SLinus Torvalds len--; 11031da177e4SLinus Torvalds } 11041da177e4SLinus Torvalds *bpp = bp; 11051da177e4SLinus Torvalds *lp = len; 11061da177e4SLinus Torvalds } 110724c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_add); 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds void qword_addhex(char **bpp, int *lp, char *buf, int blen) 11101da177e4SLinus Torvalds { 11111da177e4SLinus Torvalds char *bp = *bpp; 11121da177e4SLinus Torvalds int len = *lp; 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds if (len < 0) return; 11151da177e4SLinus Torvalds 11161da177e4SLinus Torvalds if (len > 2) { 11171da177e4SLinus Torvalds *bp++ = '\\'; 11181da177e4SLinus Torvalds *bp++ = 'x'; 11191da177e4SLinus Torvalds len -= 2; 11201da177e4SLinus Torvalds while (blen && len >= 2) { 1121056785eaSAndy Shevchenko bp = hex_byte_pack(bp, *buf++); 11221da177e4SLinus Torvalds len -= 2; 11231da177e4SLinus Torvalds blen--; 11241da177e4SLinus Torvalds } 11251da177e4SLinus Torvalds } 11261da177e4SLinus Torvalds if (blen || len<1) len = -1; 11271da177e4SLinus Torvalds else { 11281da177e4SLinus Torvalds *bp++ = ' '; 11291da177e4SLinus Torvalds len--; 11301da177e4SLinus Torvalds } 11311da177e4SLinus Torvalds *bpp = bp; 11321da177e4SLinus Torvalds *lp = len; 11331da177e4SLinus Torvalds } 113424c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_addhex); 11351da177e4SLinus Torvalds 11361da177e4SLinus Torvalds static void warn_no_listener(struct cache_detail *detail) 11371da177e4SLinus Torvalds { 11381da177e4SLinus Torvalds if (detail->last_warn != detail->last_close) { 11391da177e4SLinus Torvalds detail->last_warn = detail->last_close; 11401da177e4SLinus Torvalds if (detail->warn_no_listener) 11412da8ca26STrond Myklebust detail->warn_no_listener(detail, detail->last_close != 0); 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds 114506497524SJ. Bruce Fields static bool cache_listeners_exist(struct cache_detail *detail) 114606497524SJ. Bruce Fields { 114706497524SJ. Bruce Fields if (atomic_read(&detail->readers)) 114806497524SJ. Bruce Fields return true; 114906497524SJ. Bruce Fields if (detail->last_close == 0) 115006497524SJ. Bruce Fields /* This cache was never opened */ 115106497524SJ. Bruce Fields return false; 115206497524SJ. Bruce Fields if (detail->last_close < seconds_since_boot() - 30) 115306497524SJ. Bruce Fields /* 115406497524SJ. Bruce Fields * We allow for the possibility that someone might 115506497524SJ. Bruce Fields * restart a userspace daemon without restarting the 115606497524SJ. Bruce Fields * server; but after 30 seconds, we give up. 115706497524SJ. Bruce Fields */ 115806497524SJ. Bruce Fields return false; 115906497524SJ. Bruce Fields return true; 116006497524SJ. Bruce Fields } 116106497524SJ. Bruce Fields 11621da177e4SLinus Torvalds /* 1163bc74b4f5STrond Myklebust * register an upcall request to user-space and queue it up for read() by the 1164bc74b4f5STrond Myklebust * upcall daemon. 1165bc74b4f5STrond Myklebust * 11661da177e4SLinus Torvalds * Each request is at most one page long. 11671da177e4SLinus Torvalds */ 116821cd1254SStanislav Kinsbursky int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) 11691da177e4SLinus Torvalds { 11701da177e4SLinus Torvalds 11711da177e4SLinus Torvalds char *buf; 11721da177e4SLinus Torvalds struct cache_request *crq; 1173f9e1aedcSNeilBrown int ret = 0; 11741da177e4SLinus Torvalds 11752d438338SStanislav Kinsbursky if (!detail->cache_request) 11762d438338SStanislav Kinsbursky return -EINVAL; 11771da177e4SLinus Torvalds 117806497524SJ. Bruce Fields if (!cache_listeners_exist(detail)) { 11791da177e4SLinus Torvalds warn_no_listener(detail); 11801da177e4SLinus Torvalds return -EINVAL; 11811da177e4SLinus Torvalds } 1182013920ebSNeilBrown if (test_bit(CACHE_CLEANED, &h->flags)) 1183013920ebSNeilBrown /* Too late to make an upcall */ 1184013920ebSNeilBrown return -EAGAIN; 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 11871da177e4SLinus Torvalds if (!buf) 11881da177e4SLinus Torvalds return -EAGAIN; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds crq = kmalloc(sizeof (*crq), GFP_KERNEL); 11911da177e4SLinus Torvalds if (!crq) { 11921da177e4SLinus Torvalds kfree(buf); 11931da177e4SLinus Torvalds return -EAGAIN; 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 11961da177e4SLinus Torvalds crq->q.reader = 0; 11971da177e4SLinus Torvalds crq->buf = buf; 1198d94af6deSStanislav Kinsbursky crq->len = 0; 11991da177e4SLinus Torvalds crq->readers = 0; 12001da177e4SLinus Torvalds spin_lock(&queue_lock); 1201a6ab1e81SNeilBrown if (test_bit(CACHE_PENDING, &h->flags)) { 1202a6ab1e81SNeilBrown crq->item = cache_get(h); 12031da177e4SLinus Torvalds list_add_tail(&crq->q.list, &detail->queue); 1204a6ab1e81SNeilBrown } else 1205f9e1aedcSNeilBrown /* Lost a race, no longer PENDING, so don't enqueue */ 1206f9e1aedcSNeilBrown ret = -EAGAIN; 12071da177e4SLinus Torvalds spin_unlock(&queue_lock); 12081da177e4SLinus Torvalds wake_up(&queue_wait); 1209f9e1aedcSNeilBrown if (ret == -EAGAIN) { 1210f9e1aedcSNeilBrown kfree(buf); 1211f9e1aedcSNeilBrown kfree(crq); 1212f9e1aedcSNeilBrown } 1213f9e1aedcSNeilBrown return ret; 12141da177e4SLinus Torvalds } 1215bc74b4f5STrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); 12161da177e4SLinus Torvalds 12171da177e4SLinus Torvalds /* 12181da177e4SLinus Torvalds * parse a message from user-space and pass it 12191da177e4SLinus Torvalds * to an appropriate cache 12201da177e4SLinus Torvalds * Messages are, like requests, separated into fields by 12211da177e4SLinus Torvalds * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 12221da177e4SLinus Torvalds * 12231da177e4SLinus Torvalds * Message is 12241da177e4SLinus Torvalds * reply cachename expiry key ... content.... 12251da177e4SLinus Torvalds * 12261da177e4SLinus Torvalds * key and content are both parsed by cache 12271da177e4SLinus Torvalds */ 12281da177e4SLinus Torvalds 12291da177e4SLinus Torvalds int qword_get(char **bpp, char *dest, int bufsize) 12301da177e4SLinus Torvalds { 12311da177e4SLinus Torvalds /* return bytes copied, or -1 on error */ 12321da177e4SLinus Torvalds char *bp = *bpp; 12331da177e4SLinus Torvalds int len = 0; 12341da177e4SLinus Torvalds 12351da177e4SLinus Torvalds while (*bp == ' ') bp++; 12361da177e4SLinus Torvalds 12371da177e4SLinus Torvalds if (bp[0] == '\\' && bp[1] == 'x') { 12381da177e4SLinus Torvalds /* HEX STRING */ 12391da177e4SLinus Torvalds bp += 2; 1240b7052cd7SStefan Hajnoczi while (len < bufsize - 1) { 1241e7f483eaSAndy Shevchenko int h, l; 1242e7f483eaSAndy Shevchenko 1243e7f483eaSAndy Shevchenko h = hex_to_bin(bp[0]); 1244e7f483eaSAndy Shevchenko if (h < 0) 1245e7f483eaSAndy Shevchenko break; 1246e7f483eaSAndy Shevchenko 1247e7f483eaSAndy Shevchenko l = hex_to_bin(bp[1]); 1248e7f483eaSAndy Shevchenko if (l < 0) 1249e7f483eaSAndy Shevchenko break; 1250e7f483eaSAndy Shevchenko 1251e7f483eaSAndy Shevchenko *dest++ = (h << 4) | l; 1252e7f483eaSAndy Shevchenko bp += 2; 12531da177e4SLinus Torvalds len++; 12541da177e4SLinus Torvalds } 12551da177e4SLinus Torvalds } else { 12561da177e4SLinus Torvalds /* text with \nnn octal quoting */ 12571da177e4SLinus Torvalds while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { 12581da177e4SLinus Torvalds if (*bp == '\\' && 12591da177e4SLinus Torvalds isodigit(bp[1]) && (bp[1] <= '3') && 12601da177e4SLinus Torvalds isodigit(bp[2]) && 12611da177e4SLinus Torvalds isodigit(bp[3])) { 12621da177e4SLinus Torvalds int byte = (*++bp -'0'); 12631da177e4SLinus Torvalds bp++; 12641da177e4SLinus Torvalds byte = (byte << 3) | (*bp++ - '0'); 12651da177e4SLinus Torvalds byte = (byte << 3) | (*bp++ - '0'); 12661da177e4SLinus Torvalds *dest++ = byte; 12671da177e4SLinus Torvalds len++; 12681da177e4SLinus Torvalds } else { 12691da177e4SLinus Torvalds *dest++ = *bp++; 12701da177e4SLinus Torvalds len++; 12711da177e4SLinus Torvalds } 12721da177e4SLinus Torvalds } 12731da177e4SLinus Torvalds } 12741da177e4SLinus Torvalds 12751da177e4SLinus Torvalds if (*bp != ' ' && *bp != '\n' && *bp != '\0') 12761da177e4SLinus Torvalds return -1; 12771da177e4SLinus Torvalds while (*bp == ' ') bp++; 12781da177e4SLinus Torvalds *bpp = bp; 12791da177e4SLinus Torvalds *dest = '\0'; 12801da177e4SLinus Torvalds return len; 12811da177e4SLinus Torvalds } 128224c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(qword_get); 12831da177e4SLinus Torvalds 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds /* 12866489a8f4SKinglong Mee * support /proc/net/rpc/$CACHENAME/content 12871da177e4SLinus Torvalds * as a seqfile. 12881da177e4SLinus Torvalds * We call ->cache_show passing NULL for the item to 12891da177e4SLinus Torvalds * get a header, then pass each real item in the cache 12901da177e4SLinus Torvalds */ 12911da177e4SLinus Torvalds 1292c8c081b7SKinglong Mee void *cache_seq_start(struct seq_file *m, loff_t *pos) 12939a429c49SEric Dumazet __acquires(cd->hash_lock) 12941da177e4SLinus Torvalds { 12951da177e4SLinus Torvalds loff_t n = *pos; 129695c96174SEric Dumazet unsigned int hash, entry; 12971da177e4SLinus Torvalds struct cache_head *ch; 12989936f2aeSKinglong Mee struct cache_detail *cd = m->private; 12991da177e4SLinus Torvalds 13001da177e4SLinus Torvalds read_lock(&cd->hash_lock); 13011da177e4SLinus Torvalds if (!n--) 13021da177e4SLinus Torvalds return SEQ_START_TOKEN; 13031da177e4SLinus Torvalds hash = n >> 32; 13041da177e4SLinus Torvalds entry = n & ((1LL<<32) - 1); 13051da177e4SLinus Torvalds 1306129e5824SKinglong Mee hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list) 13071da177e4SLinus Torvalds if (!entry--) 13081da177e4SLinus Torvalds return ch; 13091da177e4SLinus Torvalds n &= ~((1LL<<32) - 1); 13101da177e4SLinus Torvalds do { 13111da177e4SLinus Torvalds hash++; 13121da177e4SLinus Torvalds n += 1LL<<32; 13131da177e4SLinus Torvalds } while(hash < cd->hash_size && 1314129e5824SKinglong Mee hlist_empty(&cd->hash_table[hash])); 13151da177e4SLinus Torvalds if (hash >= cd->hash_size) 13161da177e4SLinus Torvalds return NULL; 13171da177e4SLinus Torvalds *pos = n+1; 1318129e5824SKinglong Mee return hlist_entry_safe(cd->hash_table[hash].first, 1319129e5824SKinglong Mee struct cache_head, cache_list); 13201da177e4SLinus Torvalds } 1321c8c081b7SKinglong Mee EXPORT_SYMBOL_GPL(cache_seq_start); 13221da177e4SLinus Torvalds 1323c8c081b7SKinglong Mee void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) 13241da177e4SLinus Torvalds { 13251da177e4SLinus Torvalds struct cache_head *ch = p; 13261da177e4SLinus Torvalds int hash = (*pos >> 32); 13279936f2aeSKinglong Mee struct cache_detail *cd = m->private; 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds if (p == SEQ_START_TOKEN) 13301da177e4SLinus Torvalds hash = 0; 1331129e5824SKinglong Mee else if (ch->cache_list.next == NULL) { 13321da177e4SLinus Torvalds hash++; 13331da177e4SLinus Torvalds *pos += 1LL<<32; 13341da177e4SLinus Torvalds } else { 13351da177e4SLinus Torvalds ++*pos; 1336129e5824SKinglong Mee return hlist_entry_safe(ch->cache_list.next, 1337129e5824SKinglong Mee struct cache_head, cache_list); 13381da177e4SLinus Torvalds } 13391da177e4SLinus Torvalds *pos &= ~((1LL<<32) - 1); 13401da177e4SLinus Torvalds while (hash < cd->hash_size && 1341129e5824SKinglong Mee hlist_empty(&cd->hash_table[hash])) { 13421da177e4SLinus Torvalds hash++; 13431da177e4SLinus Torvalds *pos += 1LL<<32; 13441da177e4SLinus Torvalds } 13451da177e4SLinus Torvalds if (hash >= cd->hash_size) 13461da177e4SLinus Torvalds return NULL; 13471da177e4SLinus Torvalds ++*pos; 1348129e5824SKinglong Mee return hlist_entry_safe(cd->hash_table[hash].first, 1349129e5824SKinglong Mee struct cache_head, cache_list); 13501da177e4SLinus Torvalds } 1351c8c081b7SKinglong Mee EXPORT_SYMBOL_GPL(cache_seq_next); 13521da177e4SLinus Torvalds 1353c8c081b7SKinglong Mee void cache_seq_stop(struct seq_file *m, void *p) 13549a429c49SEric Dumazet __releases(cd->hash_lock) 13551da177e4SLinus Torvalds { 13569936f2aeSKinglong Mee struct cache_detail *cd = m->private; 13571da177e4SLinus Torvalds read_unlock(&cd->hash_lock); 13581da177e4SLinus Torvalds } 1359c8c081b7SKinglong Mee EXPORT_SYMBOL_GPL(cache_seq_stop); 13601da177e4SLinus Torvalds 13611da177e4SLinus Torvalds static int c_show(struct seq_file *m, void *p) 13621da177e4SLinus Torvalds { 13631da177e4SLinus Torvalds struct cache_head *cp = p; 13649936f2aeSKinglong Mee struct cache_detail *cd = m->private; 13651da177e4SLinus Torvalds 13661da177e4SLinus Torvalds if (p == SEQ_START_TOKEN) 13671da177e4SLinus Torvalds return cd->cache_show(m, cd, NULL); 13681da177e4SLinus Torvalds 13691da177e4SLinus Torvalds ifdebug(CACHE) 13704013edeaSNeilBrown seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", 1371c5b29f88SNeilBrown convert_to_wallclock(cp->expiry_time), 13722c935bc5SPeter Zijlstra kref_read(&cp->ref), cp->flags); 13731da177e4SLinus Torvalds cache_get(cp); 13741da177e4SLinus Torvalds if (cache_check(cd, cp, NULL)) 13751da177e4SLinus Torvalds /* cache_check does a cache_put on failure */ 13761da177e4SLinus Torvalds seq_printf(m, "# "); 1377200724a7SNeilBrown else { 1378200724a7SNeilBrown if (cache_is_expired(cd, cp)) 1379200724a7SNeilBrown seq_printf(m, "# "); 13801da177e4SLinus Torvalds cache_put(cp, cd); 1381200724a7SNeilBrown } 13821da177e4SLinus Torvalds 13831da177e4SLinus Torvalds return cd->cache_show(m, cd, cp); 13841da177e4SLinus Torvalds } 13851da177e4SLinus Torvalds 138656b3d975SPhilippe De Muyter static const struct seq_operations cache_content_op = { 1387c8c081b7SKinglong Mee .start = cache_seq_start, 1388c8c081b7SKinglong Mee .next = cache_seq_next, 1389c8c081b7SKinglong Mee .stop = cache_seq_stop, 13901da177e4SLinus Torvalds .show = c_show, 13911da177e4SLinus Torvalds }; 13921da177e4SLinus Torvalds 1393173912a6STrond Myklebust static int content_open(struct inode *inode, struct file *file, 1394173912a6STrond Myklebust struct cache_detail *cd) 13951da177e4SLinus Torvalds { 13969936f2aeSKinglong Mee struct seq_file *seq; 13979936f2aeSKinglong Mee int err; 13981da177e4SLinus Torvalds 1399f7e86ab9STrond Myklebust if (!cd || !try_module_get(cd->owner)) 1400f7e86ab9STrond Myklebust return -EACCES; 14019936f2aeSKinglong Mee 14029936f2aeSKinglong Mee err = seq_open(file, &cache_content_op); 14039936f2aeSKinglong Mee if (err) { 1404a5990ea1SLi Zefan module_put(cd->owner); 14059936f2aeSKinglong Mee return err; 1406a5990ea1SLi Zefan } 14071da177e4SLinus Torvalds 14089936f2aeSKinglong Mee seq = file->private_data; 14099936f2aeSKinglong Mee seq->private = cd; 1410ec931035SPavel Emelyanov return 0; 14111da177e4SLinus Torvalds } 14121da177e4SLinus Torvalds 1413f7e86ab9STrond Myklebust static int content_release(struct inode *inode, struct file *file, 1414f7e86ab9STrond Myklebust struct cache_detail *cd) 1415f7e86ab9STrond Myklebust { 14169936f2aeSKinglong Mee int ret = seq_release(inode, file); 1417f7e86ab9STrond Myklebust module_put(cd->owner); 1418f7e86ab9STrond Myklebust return ret; 1419f7e86ab9STrond Myklebust } 1420f7e86ab9STrond Myklebust 1421f7e86ab9STrond Myklebust static int open_flush(struct inode *inode, struct file *file, 1422f7e86ab9STrond Myklebust struct cache_detail *cd) 1423f7e86ab9STrond Myklebust { 1424f7e86ab9STrond Myklebust if (!cd || !try_module_get(cd->owner)) 1425f7e86ab9STrond Myklebust return -EACCES; 1426f7e86ab9STrond Myklebust return nonseekable_open(inode, file); 1427f7e86ab9STrond Myklebust } 1428f7e86ab9STrond Myklebust 1429f7e86ab9STrond Myklebust static int release_flush(struct inode *inode, struct file *file, 1430f7e86ab9STrond Myklebust struct cache_detail *cd) 1431f7e86ab9STrond Myklebust { 1432f7e86ab9STrond Myklebust module_put(cd->owner); 1433f7e86ab9STrond Myklebust return 0; 1434f7e86ab9STrond Myklebust } 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds static ssize_t read_flush(struct file *file, char __user *buf, 1437173912a6STrond Myklebust size_t count, loff_t *ppos, 1438173912a6STrond Myklebust struct cache_detail *cd) 14391da177e4SLinus Torvalds { 1440212ba906SSasha Levin char tbuf[22]; 144101b2969aSChuck Lever size_t len; 14421da177e4SLinus Torvalds 14438ccc8691SKinglong Mee len = snprintf(tbuf, sizeof(tbuf), "%lu\n", 14448ccc8691SKinglong Mee convert_to_wallclock(cd->flush_time)); 14458ccc8691SKinglong Mee return simple_read_from_buffer(buf, count, ppos, tbuf, len); 14461da177e4SLinus Torvalds } 14471da177e4SLinus Torvalds 14481da177e4SLinus Torvalds static ssize_t write_flush(struct file *file, const char __user *buf, 1449173912a6STrond Myklebust size_t count, loff_t *ppos, 1450173912a6STrond Myklebust struct cache_detail *cd) 14511da177e4SLinus Torvalds { 14521da177e4SLinus Torvalds char tbuf[20]; 1453c5b29f88SNeilBrown char *bp, *ep; 145477862036SNeil Brown time_t then, now; 1455c5b29f88SNeilBrown 14561da177e4SLinus Torvalds if (*ppos || count > sizeof(tbuf)-1) 14571da177e4SLinus Torvalds return -EINVAL; 14581da177e4SLinus Torvalds if (copy_from_user(tbuf, buf, count)) 14591da177e4SLinus Torvalds return -EFAULT; 14601da177e4SLinus Torvalds tbuf[count] = 0; 1461c5b29f88SNeilBrown simple_strtoul(tbuf, &ep, 0); 14621da177e4SLinus Torvalds if (*ep && *ep != '\n') 14631da177e4SLinus Torvalds return -EINVAL; 14641da177e4SLinus Torvalds 1465c5b29f88SNeilBrown bp = tbuf; 146677862036SNeil Brown then = get_expiry(&bp); 146777862036SNeil Brown now = seconds_since_boot(); 146877862036SNeil Brown cd->nextcheck = now; 146977862036SNeil Brown /* Can only set flush_time to 1 second beyond "now", or 147077862036SNeil Brown * possibly 1 second beyond flushtime. This is because 147177862036SNeil Brown * flush_time never goes backwards so it mustn't get too far 147277862036SNeil Brown * ahead of time. 147377862036SNeil Brown */ 147477862036SNeil Brown if (then >= now) { 147577862036SNeil Brown /* Want to flush everything, so behave like cache_purge() */ 147677862036SNeil Brown if (cd->flush_time >= now) 147777862036SNeil Brown now = cd->flush_time + 1; 147877862036SNeil Brown then = now; 147977862036SNeil Brown } 148077862036SNeil Brown 148177862036SNeil Brown cd->flush_time = then; 14821da177e4SLinus Torvalds cache_flush(); 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds *ppos += count; 14851da177e4SLinus Torvalds return count; 14861da177e4SLinus Torvalds } 14871da177e4SLinus Torvalds 1488173912a6STrond Myklebust static ssize_t cache_read_procfs(struct file *filp, char __user *buf, 1489173912a6STrond Myklebust size_t count, loff_t *ppos) 1490173912a6STrond Myklebust { 1491d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1492173912a6STrond Myklebust 1493173912a6STrond Myklebust return cache_read(filp, buf, count, ppos, cd); 1494173912a6STrond Myklebust } 1495173912a6STrond Myklebust 1496173912a6STrond Myklebust static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, 1497173912a6STrond Myklebust size_t count, loff_t *ppos) 1498173912a6STrond Myklebust { 1499d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1500173912a6STrond Myklebust 1501173912a6STrond Myklebust return cache_write(filp, buf, count, ppos, cd); 1502173912a6STrond Myklebust } 1503173912a6STrond Myklebust 1504173912a6STrond Myklebust static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) 1505173912a6STrond Myklebust { 1506d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1507173912a6STrond Myklebust 1508173912a6STrond Myklebust return cache_poll(filp, wait, cd); 1509173912a6STrond Myklebust } 1510173912a6STrond Myklebust 1511d79b6f4dSFrederic Weisbecker static long cache_ioctl_procfs(struct file *filp, 1512173912a6STrond Myklebust unsigned int cmd, unsigned long arg) 1513173912a6STrond Myklebust { 1514496ad9aaSAl Viro struct inode *inode = file_inode(filp); 1515d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1516173912a6STrond Myklebust 1517a6f8dbc6SArnd Bergmann return cache_ioctl(inode, filp, cmd, arg, cd); 1518173912a6STrond Myklebust } 1519173912a6STrond Myklebust 1520173912a6STrond Myklebust static int cache_open_procfs(struct inode *inode, struct file *filp) 1521173912a6STrond Myklebust { 1522d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1523173912a6STrond Myklebust 1524173912a6STrond Myklebust return cache_open(inode, filp, cd); 1525173912a6STrond Myklebust } 1526173912a6STrond Myklebust 1527173912a6STrond Myklebust static int cache_release_procfs(struct inode *inode, struct file *filp) 1528173912a6STrond Myklebust { 1529d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1530173912a6STrond Myklebust 1531173912a6STrond Myklebust return cache_release(inode, filp, cd); 1532173912a6STrond Myklebust } 1533173912a6STrond Myklebust 1534173912a6STrond Myklebust static const struct file_operations cache_file_operations_procfs = { 1535173912a6STrond Myklebust .owner = THIS_MODULE, 1536173912a6STrond Myklebust .llseek = no_llseek, 1537173912a6STrond Myklebust .read = cache_read_procfs, 1538173912a6STrond Myklebust .write = cache_write_procfs, 1539173912a6STrond Myklebust .poll = cache_poll_procfs, 1540d79b6f4dSFrederic Weisbecker .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ 1541173912a6STrond Myklebust .open = cache_open_procfs, 1542173912a6STrond Myklebust .release = cache_release_procfs, 15431da177e4SLinus Torvalds }; 1544173912a6STrond Myklebust 1545173912a6STrond Myklebust static int content_open_procfs(struct inode *inode, struct file *filp) 1546173912a6STrond Myklebust { 1547d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1548173912a6STrond Myklebust 1549173912a6STrond Myklebust return content_open(inode, filp, cd); 1550173912a6STrond Myklebust } 1551173912a6STrond Myklebust 1552f7e86ab9STrond Myklebust static int content_release_procfs(struct inode *inode, struct file *filp) 1553f7e86ab9STrond Myklebust { 1554d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1555f7e86ab9STrond Myklebust 1556f7e86ab9STrond Myklebust return content_release(inode, filp, cd); 1557f7e86ab9STrond Myklebust } 1558f7e86ab9STrond Myklebust 1559173912a6STrond Myklebust static const struct file_operations content_file_operations_procfs = { 1560173912a6STrond Myklebust .open = content_open_procfs, 1561173912a6STrond Myklebust .read = seq_read, 1562173912a6STrond Myklebust .llseek = seq_lseek, 1563f7e86ab9STrond Myklebust .release = content_release_procfs, 1564173912a6STrond Myklebust }; 1565173912a6STrond Myklebust 1566f7e86ab9STrond Myklebust static int open_flush_procfs(struct inode *inode, struct file *filp) 1567f7e86ab9STrond Myklebust { 1568d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1569f7e86ab9STrond Myklebust 1570f7e86ab9STrond Myklebust return open_flush(inode, filp, cd); 1571f7e86ab9STrond Myklebust } 1572f7e86ab9STrond Myklebust 1573f7e86ab9STrond Myklebust static int release_flush_procfs(struct inode *inode, struct file *filp) 1574f7e86ab9STrond Myklebust { 1575d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(inode); 1576f7e86ab9STrond Myklebust 1577f7e86ab9STrond Myklebust return release_flush(inode, filp, cd); 1578f7e86ab9STrond Myklebust } 1579f7e86ab9STrond Myklebust 1580173912a6STrond Myklebust static ssize_t read_flush_procfs(struct file *filp, char __user *buf, 1581173912a6STrond Myklebust size_t count, loff_t *ppos) 1582173912a6STrond Myklebust { 1583d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1584173912a6STrond Myklebust 1585173912a6STrond Myklebust return read_flush(filp, buf, count, ppos, cd); 1586173912a6STrond Myklebust } 1587173912a6STrond Myklebust 1588173912a6STrond Myklebust static ssize_t write_flush_procfs(struct file *filp, 1589173912a6STrond Myklebust const char __user *buf, 1590173912a6STrond Myklebust size_t count, loff_t *ppos) 1591173912a6STrond Myklebust { 1592d9dda78bSAl Viro struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1593173912a6STrond Myklebust 1594173912a6STrond Myklebust return write_flush(filp, buf, count, ppos, cd); 1595173912a6STrond Myklebust } 1596173912a6STrond Myklebust 1597173912a6STrond Myklebust static const struct file_operations cache_flush_operations_procfs = { 1598f7e86ab9STrond Myklebust .open = open_flush_procfs, 1599173912a6STrond Myklebust .read = read_flush_procfs, 1600173912a6STrond Myklebust .write = write_flush_procfs, 1601f7e86ab9STrond Myklebust .release = release_flush_procfs, 16026038f373SArnd Bergmann .llseek = no_llseek, 1603173912a6STrond Myklebust }; 1604173912a6STrond Myklebust 1605863d7d9cSKinglong Mee static void remove_cache_proc_entries(struct cache_detail *cd) 1606173912a6STrond Myklebust { 1607863d7d9cSKinglong Mee if (cd->procfs) { 1608863d7d9cSKinglong Mee proc_remove(cd->procfs); 1609863d7d9cSKinglong Mee cd->procfs = NULL; 1610863d7d9cSKinglong Mee } 1611173912a6STrond Myklebust } 1612173912a6STrond Myklebust 1613173912a6STrond Myklebust #ifdef CONFIG_PROC_FS 1614593ce16bSPavel Emelyanov static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1615173912a6STrond Myklebust { 1616173912a6STrond Myklebust struct proc_dir_entry *p; 16174f42d0d5SPavel Emelyanov struct sunrpc_net *sn; 1618173912a6STrond Myklebust 16194f42d0d5SPavel Emelyanov sn = net_generic(net, sunrpc_net_id); 1620863d7d9cSKinglong Mee cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); 1621863d7d9cSKinglong Mee if (cd->procfs == NULL) 1622173912a6STrond Myklebust goto out_nomem; 1623173912a6STrond Myklebust 1624173912a6STrond Myklebust p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, 1625863d7d9cSKinglong Mee cd->procfs, &cache_flush_operations_procfs, cd); 1626173912a6STrond Myklebust if (p == NULL) 1627173912a6STrond Myklebust goto out_nomem; 1628173912a6STrond Myklebust 16292d438338SStanislav Kinsbursky if (cd->cache_request || cd->cache_parse) { 1630173912a6STrond Myklebust p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, 1631863d7d9cSKinglong Mee cd->procfs, &cache_file_operations_procfs, cd); 1632173912a6STrond Myklebust if (p == NULL) 1633173912a6STrond Myklebust goto out_nomem; 1634173912a6STrond Myklebust } 1635173912a6STrond Myklebust if (cd->cache_show) { 1636ec168676SYanchuan Nian p = proc_create_data("content", S_IFREG|S_IRUSR, 1637863d7d9cSKinglong Mee cd->procfs, &content_file_operations_procfs, cd); 1638173912a6STrond Myklebust if (p == NULL) 1639173912a6STrond Myklebust goto out_nomem; 1640173912a6STrond Myklebust } 1641173912a6STrond Myklebust return 0; 1642173912a6STrond Myklebust out_nomem: 1643863d7d9cSKinglong Mee remove_cache_proc_entries(cd); 1644173912a6STrond Myklebust return -ENOMEM; 1645173912a6STrond Myklebust } 1646173912a6STrond Myklebust #else /* CONFIG_PROC_FS */ 1647593ce16bSPavel Emelyanov static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1648173912a6STrond Myklebust { 1649173912a6STrond Myklebust return 0; 1650173912a6STrond Myklebust } 1651173912a6STrond Myklebust #endif 1652173912a6STrond Myklebust 16538eab945cSArtem Bityutskiy void __init cache_initialize(void) 16548eab945cSArtem Bityutskiy { 1655203b42f7STejun Heo INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); 16568eab945cSArtem Bityutskiy } 16578eab945cSArtem Bityutskiy 1658593ce16bSPavel Emelyanov int cache_register_net(struct cache_detail *cd, struct net *net) 1659173912a6STrond Myklebust { 1660173912a6STrond Myklebust int ret; 1661173912a6STrond Myklebust 1662173912a6STrond Myklebust sunrpc_init_cache_detail(cd); 1663593ce16bSPavel Emelyanov ret = create_cache_proc_entries(cd, net); 1664173912a6STrond Myklebust if (ret) 1665173912a6STrond Myklebust sunrpc_destroy_cache_detail(cd); 1666173912a6STrond Myklebust return ret; 1667173912a6STrond Myklebust } 1668f5c8593bSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_register_net); 1669593ce16bSPavel Emelyanov 1670593ce16bSPavel Emelyanov void cache_unregister_net(struct cache_detail *cd, struct net *net) 1671593ce16bSPavel Emelyanov { 1672863d7d9cSKinglong Mee remove_cache_proc_entries(cd); 1673593ce16bSPavel Emelyanov sunrpc_destroy_cache_detail(cd); 1674593ce16bSPavel Emelyanov } 1675f5c8593bSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_unregister_net); 1676593ce16bSPavel Emelyanov 1677*d34971a6SBhumika Goyal struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net) 1678173912a6STrond Myklebust { 16790a402d5aSStanislav Kinsbursky struct cache_detail *cd; 1680129e5824SKinglong Mee int i; 16810a402d5aSStanislav Kinsbursky 16820a402d5aSStanislav Kinsbursky cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); 16830a402d5aSStanislav Kinsbursky if (cd == NULL) 16840a402d5aSStanislav Kinsbursky return ERR_PTR(-ENOMEM); 16850a402d5aSStanislav Kinsbursky 1686129e5824SKinglong Mee cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head), 16870a402d5aSStanislav Kinsbursky GFP_KERNEL); 16880a402d5aSStanislav Kinsbursky if (cd->hash_table == NULL) { 16890a402d5aSStanislav Kinsbursky kfree(cd); 16900a402d5aSStanislav Kinsbursky return ERR_PTR(-ENOMEM); 1691173912a6STrond Myklebust } 1692129e5824SKinglong Mee 1693129e5824SKinglong Mee for (i = 0; i < cd->hash_size; i++) 1694129e5824SKinglong Mee INIT_HLIST_HEAD(&cd->hash_table[i]); 16950a402d5aSStanislav Kinsbursky cd->net = net; 16960a402d5aSStanislav Kinsbursky return cd; 16970a402d5aSStanislav Kinsbursky } 16980a402d5aSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_create_net); 16990a402d5aSStanislav Kinsbursky 17000a402d5aSStanislav Kinsbursky void cache_destroy_net(struct cache_detail *cd, struct net *net) 17010a402d5aSStanislav Kinsbursky { 17020a402d5aSStanislav Kinsbursky kfree(cd->hash_table); 17030a402d5aSStanislav Kinsbursky kfree(cd); 17040a402d5aSStanislav Kinsbursky } 17050a402d5aSStanislav Kinsbursky EXPORT_SYMBOL_GPL(cache_destroy_net); 17068854e82dSTrond Myklebust 17078854e82dSTrond Myklebust static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, 17088854e82dSTrond Myklebust size_t count, loff_t *ppos) 17098854e82dSTrond Myklebust { 1710496ad9aaSAl Viro struct cache_detail *cd = RPC_I(file_inode(filp))->private; 17118854e82dSTrond Myklebust 17128854e82dSTrond Myklebust return cache_read(filp, buf, count, ppos, cd); 17138854e82dSTrond Myklebust } 17148854e82dSTrond Myklebust 17158854e82dSTrond Myklebust static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, 17168854e82dSTrond Myklebust size_t count, loff_t *ppos) 17178854e82dSTrond Myklebust { 1718496ad9aaSAl Viro struct cache_detail *cd = RPC_I(file_inode(filp))->private; 17198854e82dSTrond Myklebust 17208854e82dSTrond Myklebust return cache_write(filp, buf, count, ppos, cd); 17218854e82dSTrond Myklebust } 17228854e82dSTrond Myklebust 17238854e82dSTrond Myklebust static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) 17248854e82dSTrond Myklebust { 1725496ad9aaSAl Viro struct cache_detail *cd = RPC_I(file_inode(filp))->private; 17268854e82dSTrond Myklebust 17278854e82dSTrond Myklebust return cache_poll(filp, wait, cd); 17288854e82dSTrond Myklebust } 17298854e82dSTrond Myklebust 17309918ff26SFrederic Weisbecker static long cache_ioctl_pipefs(struct file *filp, 17318854e82dSTrond Myklebust unsigned int cmd, unsigned long arg) 17328854e82dSTrond Myklebust { 1733496ad9aaSAl Viro struct inode *inode = file_inode(filp); 17348854e82dSTrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 17358854e82dSTrond Myklebust 1736a6f8dbc6SArnd Bergmann return cache_ioctl(inode, filp, cmd, arg, cd); 17378854e82dSTrond Myklebust } 17388854e82dSTrond Myklebust 17398854e82dSTrond Myklebust static int cache_open_pipefs(struct inode *inode, struct file *filp) 17408854e82dSTrond Myklebust { 17418854e82dSTrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 17428854e82dSTrond Myklebust 17438854e82dSTrond Myklebust return cache_open(inode, filp, cd); 17448854e82dSTrond Myklebust } 17458854e82dSTrond Myklebust 17468854e82dSTrond Myklebust static int cache_release_pipefs(struct inode *inode, struct file *filp) 17478854e82dSTrond Myklebust { 17488854e82dSTrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 17498854e82dSTrond Myklebust 17508854e82dSTrond Myklebust return cache_release(inode, filp, cd); 17518854e82dSTrond Myklebust } 17528854e82dSTrond Myklebust 17538854e82dSTrond Myklebust const struct file_operations cache_file_operations_pipefs = { 17548854e82dSTrond Myklebust .owner = THIS_MODULE, 17558854e82dSTrond Myklebust .llseek = no_llseek, 17568854e82dSTrond Myklebust .read = cache_read_pipefs, 17578854e82dSTrond Myklebust .write = cache_write_pipefs, 17588854e82dSTrond Myklebust .poll = cache_poll_pipefs, 17599918ff26SFrederic Weisbecker .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ 17608854e82dSTrond Myklebust .open = cache_open_pipefs, 17618854e82dSTrond Myklebust .release = cache_release_pipefs, 17628854e82dSTrond Myklebust }; 17638854e82dSTrond Myklebust 17648854e82dSTrond Myklebust static int content_open_pipefs(struct inode *inode, struct file *filp) 17658854e82dSTrond Myklebust { 17668854e82dSTrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 17678854e82dSTrond Myklebust 17688854e82dSTrond Myklebust return content_open(inode, filp, cd); 17698854e82dSTrond Myklebust } 17708854e82dSTrond Myklebust 1771f7e86ab9STrond Myklebust static int content_release_pipefs(struct inode *inode, struct file *filp) 1772f7e86ab9STrond Myklebust { 1773f7e86ab9STrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 1774f7e86ab9STrond Myklebust 1775f7e86ab9STrond Myklebust return content_release(inode, filp, cd); 1776f7e86ab9STrond Myklebust } 1777f7e86ab9STrond Myklebust 17788854e82dSTrond Myklebust const struct file_operations content_file_operations_pipefs = { 17798854e82dSTrond Myklebust .open = content_open_pipefs, 17808854e82dSTrond Myklebust .read = seq_read, 17818854e82dSTrond Myklebust .llseek = seq_lseek, 1782f7e86ab9STrond Myklebust .release = content_release_pipefs, 17838854e82dSTrond Myklebust }; 17848854e82dSTrond Myklebust 1785f7e86ab9STrond Myklebust static int open_flush_pipefs(struct inode *inode, struct file *filp) 1786f7e86ab9STrond Myklebust { 1787f7e86ab9STrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 1788f7e86ab9STrond Myklebust 1789f7e86ab9STrond Myklebust return open_flush(inode, filp, cd); 1790f7e86ab9STrond Myklebust } 1791f7e86ab9STrond Myklebust 1792f7e86ab9STrond Myklebust static int release_flush_pipefs(struct inode *inode, struct file *filp) 1793f7e86ab9STrond Myklebust { 1794f7e86ab9STrond Myklebust struct cache_detail *cd = RPC_I(inode)->private; 1795f7e86ab9STrond Myklebust 1796f7e86ab9STrond Myklebust return release_flush(inode, filp, cd); 1797f7e86ab9STrond Myklebust } 1798f7e86ab9STrond Myklebust 17998854e82dSTrond Myklebust static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, 18008854e82dSTrond Myklebust size_t count, loff_t *ppos) 18018854e82dSTrond Myklebust { 1802496ad9aaSAl Viro struct cache_detail *cd = RPC_I(file_inode(filp))->private; 18038854e82dSTrond Myklebust 18048854e82dSTrond Myklebust return read_flush(filp, buf, count, ppos, cd); 18058854e82dSTrond Myklebust } 18068854e82dSTrond Myklebust 18078854e82dSTrond Myklebust static ssize_t write_flush_pipefs(struct file *filp, 18088854e82dSTrond Myklebust const char __user *buf, 18098854e82dSTrond Myklebust size_t count, loff_t *ppos) 18108854e82dSTrond Myklebust { 1811496ad9aaSAl Viro struct cache_detail *cd = RPC_I(file_inode(filp))->private; 18128854e82dSTrond Myklebust 18138854e82dSTrond Myklebust return write_flush(filp, buf, count, ppos, cd); 18148854e82dSTrond Myklebust } 18158854e82dSTrond Myklebust 18168854e82dSTrond Myklebust const struct file_operations cache_flush_operations_pipefs = { 1817f7e86ab9STrond Myklebust .open = open_flush_pipefs, 18188854e82dSTrond Myklebust .read = read_flush_pipefs, 18198854e82dSTrond Myklebust .write = write_flush_pipefs, 1820f7e86ab9STrond Myklebust .release = release_flush_pipefs, 18216038f373SArnd Bergmann .llseek = no_llseek, 18228854e82dSTrond Myklebust }; 18238854e82dSTrond Myklebust 18248854e82dSTrond Myklebust int sunrpc_cache_register_pipefs(struct dentry *parent, 182564f1426fSAl Viro const char *name, umode_t umode, 18268854e82dSTrond Myklebust struct cache_detail *cd) 18278854e82dSTrond Myklebust { 1828a95e691fSAl Viro struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); 1829a95e691fSAl Viro if (IS_ERR(dir)) 1830a95e691fSAl Viro return PTR_ERR(dir); 1831863d7d9cSKinglong Mee cd->pipefs = dir; 1832a95e691fSAl Viro return 0; 18338854e82dSTrond Myklebust } 18348854e82dSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); 18358854e82dSTrond Myklebust 18368854e82dSTrond Myklebust void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) 18378854e82dSTrond Myklebust { 1838863d7d9cSKinglong Mee if (cd->pipefs) { 1839863d7d9cSKinglong Mee rpc_remove_cache_dir(cd->pipefs); 1840863d7d9cSKinglong Mee cd->pipefs = NULL; 1841863d7d9cSKinglong Mee } 18428854e82dSTrond Myklebust } 18438854e82dSTrond Myklebust EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); 18448854e82dSTrond Myklebust 18452b477c00SNeil Brown void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) 18462b477c00SNeil Brown { 18472b477c00SNeil Brown write_lock(&cd->hash_lock); 18482b477c00SNeil Brown if (!hlist_unhashed(&h->cache_list)){ 18492b477c00SNeil Brown hlist_del_init(&h->cache_list); 18502b477c00SNeil Brown cd->entries--; 18512b477c00SNeil Brown write_unlock(&cd->hash_lock); 18522b477c00SNeil Brown cache_put(h, cd); 18532b477c00SNeil Brown } else 18542b477c00SNeil Brown write_unlock(&cd->hash_lock); 18552b477c00SNeil Brown } 18562b477c00SNeil Brown EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); 1857