xref: /linux/fs/nfs/pnfs_dev.c (revision c8faf11cd192214e231626c3ee973a35d8fc33f2)
1 /*
2  *  Device operations for the pnfs client.
3  *
4  *  Copyright (c) 2002
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *  Garth Goodson   <Garth.Goodson@netapp.com>
10  *
11  *  Permission is granted to use, copy, create derivative works, and
12  *  redistribute this software and such derivative works for any purpose,
13  *  so long as the name of the University of Michigan is not used in
14  *  any advertising or publicity pertaining to the use or distribution
15  *  of this software without specific, written prior authorization. If
16  *  the above copyright notice or any other identification of the
17  *  University of Michigan is included in any copy of any portion of
18  *  this software, then the disclaimer below must also be included.
19  *
20  *  This software is provided as is, without representation or warranty
21  *  of any kind either express or implied, including without limitation
22  *  the implied warranties of merchantability, fitness for a particular
23  *  purpose, or noninfringement.  The Regents of the University of
24  *  Michigan shall not be liable for any damages, including special,
25  *  indirect, incidental, or consequential damages, with respect to any
26  *  claim arising out of or in connection with the use of the software,
27  *  even if it has been or is hereafter advised of the possibility of
28  *  such damages.
29  */
30 
31 #include <linux/export.h>
32 #include <linux/nfs_fs.h>
33 #include "nfs4session.h"
34 #include "internal.h"
35 #include "pnfs.h"
36 
37 #include "nfs4trace.h"
38 
39 #define NFSDBG_FACILITY		NFSDBG_PNFS
40 
41 /*
42  * Device ID RCU cache. A device ID is unique per server and layout type.
43  */
44 #define NFS4_DEVICE_ID_HASH_BITS	5
45 #define NFS4_DEVICE_ID_HASH_SIZE	(1 << NFS4_DEVICE_ID_HASH_BITS)
46 #define NFS4_DEVICE_ID_HASH_MASK	(NFS4_DEVICE_ID_HASH_SIZE - 1)
47 
48 
49 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
50 static DEFINE_SPINLOCK(nfs4_deviceid_lock);
51 
52 #ifdef NFS_DEBUG
53 void
54 nfs4_print_deviceid(const struct nfs4_deviceid *id)
55 {
56 	u32 *p = (u32 *)id;
57 
58 	dprintk("%s: device id= [%x%x%x%x]\n", __func__,
59 		p[0], p[1], p[2], p[3]);
60 }
61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
62 #endif
63 
64 static inline u32
65 nfs4_deviceid_hash(const struct nfs4_deviceid *id)
66 {
67 	unsigned char *cptr = (unsigned char *)id->data;
68 	unsigned int nbytes = NFS4_DEVICEID4_SIZE;
69 	u32 x = 0;
70 
71 	while (nbytes--) {
72 		x *= 37;
73 		x += *cptr++;
74 	}
75 	return x & NFS4_DEVICE_ID_HASH_MASK;
76 }
77 
78 static struct nfs4_deviceid_node *
79 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
80 		 const struct nfs_client *clp, const struct nfs4_deviceid *id,
81 		 long hash)
82 {
83 	struct nfs4_deviceid_node *d;
84 
85 	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
86 		if (d->ld == ld && d->nfs_client == clp &&
87 		    !memcmp(&d->deviceid, id, sizeof(*id))) {
88 			if (atomic_read(&d->ref))
89 				return d;
90 			else
91 				continue;
92 		}
93 	return NULL;
94 }
95 
96 static struct nfs4_deviceid_node *
97 nfs4_get_device_info(struct nfs_server *server,
98 		const struct nfs4_deviceid *dev_id,
99 		const struct cred *cred, gfp_t gfp_flags)
100 {
101 	struct nfs4_deviceid_node *d = NULL;
102 	struct pnfs_device *pdev = NULL;
103 	struct page **pages = NULL;
104 	u32 max_resp_sz;
105 	int max_pages;
106 	int rc, i;
107 
108 	/*
109 	 * Use the session max response size as the basis for setting
110 	 * GETDEVICEINFO's maxcount
111 	 */
112 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
113 	max_pages = nfs_page_array_len(0, max_resp_sz);
114 	dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
115 		__func__, server, max_resp_sz, max_pages);
116 
117 	pdev = kzalloc(sizeof(*pdev), gfp_flags);
118 	if (!pdev)
119 		return NULL;
120 
121 	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
122 	if (!pages)
123 		goto out_free_pdev;
124 
125 	for (i = 0; i < max_pages; i++) {
126 		pages[i] = alloc_page(gfp_flags);
127 		if (!pages[i])
128 			goto out_free_pages;
129 	}
130 
131 	memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
132 	pdev->layout_type = server->pnfs_curr_ld->id;
133 	pdev->pages = pages;
134 	pdev->pgbase = 0;
135 	pdev->pglen = max_resp_sz;
136 	pdev->mincount = 0;
137 	pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
138 
139 	rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
140 	dprintk("%s getdevice info returns %d\n", __func__, rc);
141 	if (rc)
142 		goto out_free_pages;
143 
144 	/*
145 	 * Found new device, need to decode it and then add it to the
146 	 * list of known devices for this mountpoint.
147 	 */
148 	d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
149 			gfp_flags);
150 	if (d && pdev->nocache)
151 		set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
152 
153 out_free_pages:
154 	while (--i >= 0)
155 		__free_page(pages[i]);
156 	kfree(pages);
157 out_free_pdev:
158 	kfree(pdev);
159 	dprintk("<-- %s d %p\n", __func__, d);
160 	return d;
161 }
162 
163 /*
164  * Lookup a deviceid in cache and get a reference count on it if found
165  *
166  * @clp nfs_client associated with deviceid
167  * @id deviceid to look up
168  */
169 static struct nfs4_deviceid_node *
170 __nfs4_find_get_deviceid(struct nfs_server *server,
171 		const struct nfs4_deviceid *id, long hash)
172 {
173 	struct nfs4_deviceid_node *d;
174 
175 	rcu_read_lock();
176 	d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
177 			hash);
178 	if (d != NULL && !atomic_inc_not_zero(&d->ref))
179 		d = NULL;
180 	rcu_read_unlock();
181 	return d;
182 }
183 
184 struct nfs4_deviceid_node *
185 nfs4_find_get_deviceid(struct nfs_server *server,
186 		const struct nfs4_deviceid *id, const struct cred *cred,
187 		gfp_t gfp_mask)
188 {
189 	long hash = nfs4_deviceid_hash(id);
190 	struct nfs4_deviceid_node *d, *new;
191 
192 	d = __nfs4_find_get_deviceid(server, id, hash);
193 	if (d)
194 		goto found;
195 
196 	new = nfs4_get_device_info(server, id, cred, gfp_mask);
197 	if (!new) {
198 		trace_nfs4_find_deviceid(server, id, -ENOENT);
199 		return new;
200 	}
201 
202 	spin_lock(&nfs4_deviceid_lock);
203 	d = __nfs4_find_get_deviceid(server, id, hash);
204 	if (d) {
205 		spin_unlock(&nfs4_deviceid_lock);
206 		server->pnfs_curr_ld->free_deviceid_node(new);
207 	} else {
208 		atomic_inc(&new->ref);
209 		hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
210 		spin_unlock(&nfs4_deviceid_lock);
211 		d = new;
212 	}
213 found:
214 	trace_nfs4_find_deviceid(server, id, 0);
215 	return d;
216 }
217 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
218 
219 /*
220  * Remove a deviceid from cache
221  *
222  * @clp nfs_client associated with deviceid
223  * @id the deviceid to unhash
224  *
225  * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
226  */
227 void
228 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
229 			 const struct nfs_client *clp, const struct nfs4_deviceid *id)
230 {
231 	struct nfs4_deviceid_node *d;
232 
233 	spin_lock(&nfs4_deviceid_lock);
234 	rcu_read_lock();
235 	d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
236 	rcu_read_unlock();
237 	if (!d) {
238 		spin_unlock(&nfs4_deviceid_lock);
239 		return;
240 	}
241 	hlist_del_init_rcu(&d->node);
242 	clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
243 	spin_unlock(&nfs4_deviceid_lock);
244 
245 	/* balance the initial ref set in pnfs_insert_deviceid */
246 	nfs4_put_deviceid_node(d);
247 }
248 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
249 
250 void
251 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
252 			const struct nfs4_deviceid *id)
253 {
254 	INIT_HLIST_NODE(&d->node);
255 	INIT_HLIST_NODE(&d->tmpnode);
256 	d->ld = server->pnfs_curr_ld;
257 	d->nfs_client = server->nfs_client;
258 	d->flags = 0;
259 	d->deviceid = *id;
260 	atomic_set(&d->ref, 1);
261 }
262 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
263 
264 /*
265  * Dereference a deviceid node and delete it when its reference count drops
266  * to zero.
267  *
268  * @d deviceid node to put
269  *
270  * return true iff the node was deleted
271  * Note that since the test for d->ref == 0 is sufficient to establish
272  * that the node is no longer hashed in the global device id cache.
273  */
274 bool
275 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
276 {
277 	if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
278 		if (atomic_add_unless(&d->ref, -1, 2))
279 			return false;
280 		nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
281 	}
282 	if (!atomic_dec_and_test(&d->ref))
283 		return false;
284 	trace_nfs4_deviceid_free(d->nfs_client, &d->deviceid);
285 	d->ld->free_deviceid_node(d);
286 	return true;
287 }
288 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
289 
290 void
291 nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
292 {
293 	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
294 		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
295 		smp_mb__after_atomic();
296 	}
297 }
298 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available);
299 
300 void
301 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
302 {
303 	node->timestamp_unavailable = jiffies;
304 	smp_mb__before_atomic();
305 	set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
306 	smp_mb__after_atomic();
307 }
308 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
309 
310 bool
311 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
312 {
313 	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
314 		unsigned long start, end;
315 
316 		end = jiffies;
317 		start = end - PNFS_DEVICE_RETRY_TIMEOUT;
318 		if (time_in_range(node->timestamp_unavailable, start, end))
319 			return true;
320 		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
321 		smp_mb__after_atomic();
322 	}
323 	return false;
324 }
325 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
326 
327 static void
328 _deviceid_purge_client(const struct nfs_client *clp, long hash)
329 {
330 	struct nfs4_deviceid_node *d;
331 	HLIST_HEAD(tmp);
332 
333 	spin_lock(&nfs4_deviceid_lock);
334 	rcu_read_lock();
335 	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
336 		if (d->nfs_client == clp && atomic_read(&d->ref)) {
337 			hlist_del_init_rcu(&d->node);
338 			hlist_add_head(&d->tmpnode, &tmp);
339 			clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
340 		}
341 	rcu_read_unlock();
342 	spin_unlock(&nfs4_deviceid_lock);
343 
344 	if (hlist_empty(&tmp))
345 		return;
346 
347 	while (!hlist_empty(&tmp)) {
348 		d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
349 		hlist_del(&d->tmpnode);
350 		nfs4_put_deviceid_node(d);
351 	}
352 }
353 
354 void
355 nfs4_deviceid_purge_client(const struct nfs_client *clp)
356 {
357 	long h;
358 
359 	if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
360 		return;
361 	for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
362 		_deviceid_purge_client(clp, h);
363 }
364 
365 /*
366  * Stop use of all deviceids associated with an nfs_client
367  */
368 void
369 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
370 {
371 	struct nfs4_deviceid_node *d;
372 	int i;
373 
374 	rcu_read_lock();
375 	for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
376 		hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
377 			if (d->nfs_client == clp)
378 				set_bit(NFS_DEVICEID_INVALID, &d->flags);
379 	}
380 	rcu_read_unlock();
381 }
382