1 /* 2 * Device operations for the pnfs client. 3 * 4 * Copyright (c) 2002 5 * The Regents of the University of Michigan 6 * All Rights Reserved 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * Garth Goodson <Garth.Goodson@netapp.com> 10 * 11 * Permission is granted to use, copy, create derivative works, and 12 * redistribute this software and such derivative works for any purpose, 13 * so long as the name of the University of Michigan is not used in 14 * any advertising or publicity pertaining to the use or distribution 15 * of this software without specific, written prior authorization. If 16 * the above copyright notice or any other identification of the 17 * University of Michigan is included in any copy of any portion of 18 * this software, then the disclaimer below must also be included. 19 * 20 * This software is provided as is, without representation or warranty 21 * of any kind either express or implied, including without limitation 22 * the implied warranties of merchantability, fitness for a particular 23 * purpose, or noninfringement. The Regents of the University of 24 * Michigan shall not be liable for any damages, including special, 25 * indirect, incidental, or consequential damages, with respect to any 26 * claim arising out of or in connection with the use of the software, 27 * even if it has been or is hereafter advised of the possibility of 28 * such damages. 29 */ 30 31 #include "pnfs.h" 32 33 #define NFSDBG_FACILITY NFSDBG_PNFS 34 35 /* 36 * Device ID RCU cache. A device ID is unique per server and layout type. 37 */ 38 #define NFS4_DEVICE_ID_HASH_BITS 5 39 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS) 40 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1) 41 42 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; 43 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 44 45 void 46 nfs4_print_deviceid(const struct nfs4_deviceid *id) 47 { 48 u32 *p = (u32 *)id; 49 50 dprintk("%s: device id= [%x%x%x%x]\n", __func__, 51 p[0], p[1], p[2], p[3]); 52 } 53 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 54 55 static inline u32 56 nfs4_deviceid_hash(const struct nfs4_deviceid *id) 57 { 58 unsigned char *cptr = (unsigned char *)id->data; 59 unsigned int nbytes = NFS4_DEVICEID4_SIZE; 60 u32 x = 0; 61 62 while (nbytes--) { 63 x *= 37; 64 x += *cptr++; 65 } 66 return x & NFS4_DEVICE_ID_HASH_MASK; 67 } 68 69 static struct nfs4_deviceid_node * 70 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, 71 const struct nfs_client *clp, const struct nfs4_deviceid *id, 72 long hash) 73 { 74 struct nfs4_deviceid_node *d; 75 struct hlist_node *n; 76 77 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 78 if (d->ld == ld && d->nfs_client == clp && 79 !memcmp(&d->deviceid, id, sizeof(*id))) { 80 if (atomic_read(&d->ref)) 81 return d; 82 else 83 continue; 84 } 85 return NULL; 86 } 87 88 /* 89 * Lookup a deviceid in cache and get a reference count on it if found 90 * 91 * @clp nfs_client associated with deviceid 92 * @id deviceid to look up 93 */ 94 struct nfs4_deviceid_node * 95 _find_get_deviceid(const struct pnfs_layoutdriver_type *ld, 96 const struct nfs_client *clp, const struct nfs4_deviceid *id, 97 long hash) 98 { 99 struct nfs4_deviceid_node *d; 100 101 rcu_read_lock(); 102 d = _lookup_deviceid(ld, clp, id, hash); 103 if (d && !atomic_inc_not_zero(&d->ref)) 104 d = NULL; 105 rcu_read_unlock(); 106 return d; 107 } 108 109 struct nfs4_deviceid_node * 110 nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld, 111 const struct nfs_client *clp, const struct nfs4_deviceid *id) 112 { 113 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 114 } 115 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 116 117 /* 118 * Unhash and put deviceid 119 * 120 * @clp nfs_client associated with deviceid 121 * @id the deviceid to unhash 122 * 123 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. 124 */ 125 struct nfs4_deviceid_node * 126 nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *ld, 127 const struct nfs_client *clp, const struct nfs4_deviceid *id) 128 { 129 struct nfs4_deviceid_node *d; 130 131 spin_lock(&nfs4_deviceid_lock); 132 rcu_read_lock(); 133 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 134 rcu_read_unlock(); 135 if (!d) { 136 spin_unlock(&nfs4_deviceid_lock); 137 return NULL; 138 } 139 hlist_del_init_rcu(&d->node); 140 spin_unlock(&nfs4_deviceid_lock); 141 synchronize_rcu(); 142 143 /* balance the initial ref set in pnfs_insert_deviceid */ 144 if (atomic_dec_and_test(&d->ref)) 145 return d; 146 147 return NULL; 148 } 149 EXPORT_SYMBOL_GPL(nfs4_unhash_put_deviceid); 150 151 /* 152 * Delete a deviceid from cache 153 * 154 * @clp struct nfs_client qualifying the deviceid 155 * @id deviceid to delete 156 */ 157 void 158 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, 159 const struct nfs_client *clp, const struct nfs4_deviceid *id) 160 { 161 struct nfs4_deviceid_node *d; 162 163 d = nfs4_unhash_put_deviceid(ld, clp, id); 164 if (!d) 165 return; 166 d->ld->free_deviceid_node(d); 167 } 168 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 169 170 void 171 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, 172 const struct pnfs_layoutdriver_type *ld, 173 const struct nfs_client *nfs_client, 174 const struct nfs4_deviceid *id) 175 { 176 INIT_HLIST_NODE(&d->node); 177 d->ld = ld; 178 d->nfs_client = nfs_client; 179 d->deviceid = *id; 180 atomic_set(&d->ref, 1); 181 } 182 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 183 184 /* 185 * Uniquely initialize and insert a deviceid node into cache 186 * 187 * @new new deviceid node 188 * Note that the caller must set up the following members: 189 * new->ld 190 * new->nfs_client 191 * new->deviceid 192 * 193 * @ret the inserted node, if none found, otherwise, the found entry. 194 */ 195 struct nfs4_deviceid_node * 196 nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) 197 { 198 struct nfs4_deviceid_node *d; 199 long hash; 200 201 spin_lock(&nfs4_deviceid_lock); 202 hash = nfs4_deviceid_hash(&new->deviceid); 203 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash); 204 if (d) { 205 spin_unlock(&nfs4_deviceid_lock); 206 return d; 207 } 208 209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 210 spin_unlock(&nfs4_deviceid_lock); 211 212 return new; 213 } 214 EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node); 215 216 /* 217 * Dereference a deviceid node and delete it when its reference count drops 218 * to zero. 219 * 220 * @d deviceid node to put 221 * 222 * @ret true iff the node was deleted 223 */ 224 bool 225 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) 226 { 227 if (!atomic_dec_and_lock(&d->ref, &nfs4_deviceid_lock)) 228 return false; 229 hlist_del_init_rcu(&d->node); 230 spin_unlock(&nfs4_deviceid_lock); 231 synchronize_rcu(); 232 d->ld->free_deviceid_node(d); 233 return true; 234 } 235 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 236 237 static void 238 _deviceid_purge_client(const struct nfs_client *clp, long hash) 239 { 240 struct nfs4_deviceid_node *d; 241 struct hlist_node *n, *next; 242 HLIST_HEAD(tmp); 243 244 rcu_read_lock(); 245 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 246 if (d->nfs_client == clp && atomic_read(&d->ref)) { 247 hlist_del_init_rcu(&d->node); 248 hlist_add_head(&d->node, &tmp); 249 } 250 rcu_read_unlock(); 251 252 if (hlist_empty(&tmp)) 253 return; 254 255 synchronize_rcu(); 256 hlist_for_each_entry_safe(d, n, next, &tmp, node) 257 if (atomic_dec_and_test(&d->ref)) 258 d->ld->free_deviceid_node(d); 259 } 260 261 void 262 nfs4_deviceid_purge_client(const struct nfs_client *clp) 263 { 264 long h; 265 266 spin_lock(&nfs4_deviceid_lock); 267 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 268 _deviceid_purge_client(clp, h); 269 spin_unlock(&nfs4_deviceid_lock); 270 } 271