1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
4 * Copyright (C) 2024 NeilBrown <neilb@suse.de>
5 */
6
7 #include <linux/module.h>
8 #include <linux/list.h>
9 #include <linux/nfslocalio.h>
10 #include <linux/nfs3.h>
11 #include <linux/nfs4.h>
12 #include <linux/nfs_fs.h>
13 #include <net/netns/generic.h>
14
15 #include "localio_trace.h"
16
17 MODULE_LICENSE("GPL");
18 MODULE_DESCRIPTION("NFS localio protocol bypass support");
19
20 static DEFINE_SPINLOCK(nfs_uuids_lock);
21
22 /*
23 * Global list of nfs_uuid_t instances
24 * that is protected by nfs_uuids_lock.
25 */
26 static LIST_HEAD(nfs_uuids);
27
28 /*
29 * Lock ordering:
30 * 1: nfs_uuid->lock
31 * 2: nfs_uuids_lock
32 * 3: nfs_uuid->list_lock (aka nn->local_clients_lock)
33 *
34 * May skip locks in select cases, but never hold multiple
35 * locks out of order.
36 */
37
nfs_uuid_init(nfs_uuid_t * nfs_uuid)38 void nfs_uuid_init(nfs_uuid_t *nfs_uuid)
39 {
40 RCU_INIT_POINTER(nfs_uuid->net, NULL);
41 nfs_uuid->dom = NULL;
42 nfs_uuid->list_lock = NULL;
43 INIT_LIST_HEAD(&nfs_uuid->list);
44 INIT_LIST_HEAD(&nfs_uuid->files);
45 spin_lock_init(&nfs_uuid->lock);
46 nfs_uuid->nfs3_localio_probe_count = 0;
47 }
48 EXPORT_SYMBOL_GPL(nfs_uuid_init);
49
nfs_uuid_begin(nfs_uuid_t * nfs_uuid)50 bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
51 {
52 spin_lock(&nfs_uuid->lock);
53 if (rcu_access_pointer(nfs_uuid->net)) {
54 /* This nfs_uuid is already in use */
55 spin_unlock(&nfs_uuid->lock);
56 return false;
57 }
58
59 spin_lock(&nfs_uuids_lock);
60 if (!list_empty(&nfs_uuid->list)) {
61 /* This nfs_uuid is already in use */
62 spin_unlock(&nfs_uuids_lock);
63 spin_unlock(&nfs_uuid->lock);
64 return false;
65 }
66 list_add_tail(&nfs_uuid->list, &nfs_uuids);
67 spin_unlock(&nfs_uuids_lock);
68
69 uuid_gen(&nfs_uuid->uuid);
70 spin_unlock(&nfs_uuid->lock);
71
72 return true;
73 }
74 EXPORT_SYMBOL_GPL(nfs_uuid_begin);
75
nfs_uuid_end(nfs_uuid_t * nfs_uuid)76 void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
77 {
78 if (!rcu_access_pointer(nfs_uuid->net)) {
79 spin_lock(&nfs_uuid->lock);
80 if (!rcu_access_pointer(nfs_uuid->net)) {
81 /* Not local, remove from nfs_uuids */
82 spin_lock(&nfs_uuids_lock);
83 list_del_init(&nfs_uuid->list);
84 spin_unlock(&nfs_uuids_lock);
85 }
86 spin_unlock(&nfs_uuid->lock);
87 }
88 }
89 EXPORT_SYMBOL_GPL(nfs_uuid_end);
90
nfs_uuid_lookup_locked(const uuid_t * uuid)91 static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
92 {
93 nfs_uuid_t *nfs_uuid;
94
95 list_for_each_entry(nfs_uuid, &nfs_uuids, list)
96 if (uuid_equal(&nfs_uuid->uuid, uuid))
97 return nfs_uuid;
98
99 return NULL;
100 }
101
102 static struct module *nfsd_mod;
103
nfs_uuid_is_local(const uuid_t * uuid,struct list_head * list,spinlock_t * list_lock,struct net * net,struct auth_domain * dom,struct module * mod)104 void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
105 spinlock_t *list_lock, struct net *net,
106 struct auth_domain *dom, struct module *mod)
107 {
108 nfs_uuid_t *nfs_uuid;
109
110 spin_lock(&nfs_uuids_lock);
111 nfs_uuid = nfs_uuid_lookup_locked(uuid);
112 if (!nfs_uuid) {
113 spin_unlock(&nfs_uuids_lock);
114 return;
115 }
116
117 /*
118 * We don't hold a ref on the net, but instead put
119 * ourselves on @list (nn->local_clients) so the net
120 * pointer can be invalidated.
121 */
122 spin_lock(list_lock); /* list_lock is nn->local_clients_lock */
123 list_move(&nfs_uuid->list, list);
124 spin_unlock(list_lock);
125
126 spin_unlock(&nfs_uuids_lock);
127 /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */
128 spin_lock(&nfs_uuid->lock);
129
130 __module_get(mod);
131 nfsd_mod = mod;
132
133 nfs_uuid->list_lock = list_lock;
134 kref_get(&dom->ref);
135 nfs_uuid->dom = dom;
136 rcu_assign_pointer(nfs_uuid->net, net);
137 spin_unlock(&nfs_uuid->lock);
138 }
139 EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
140
nfs_localio_enable_client(struct nfs_client * clp)141 void nfs_localio_enable_client(struct nfs_client *clp)
142 {
143 /* nfs_uuid_is_local() does the actual enablement */
144 trace_nfs_localio_enable_client(clp);
145 }
146 EXPORT_SYMBOL_GPL(nfs_localio_enable_client);
147
148 /*
149 * Cleanup the nfs_uuid_t embedded in an nfs_client.
150 * This is the long-form of nfs_uuid_init().
151 */
nfs_uuid_put(nfs_uuid_t * nfs_uuid)152 static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid)
153 {
154 struct nfs_file_localio *nfl;
155
156 spin_lock(&nfs_uuid->lock);
157 if (unlikely(!rcu_access_pointer(nfs_uuid->net))) {
158 spin_unlock(&nfs_uuid->lock);
159 return false;
160 }
161 RCU_INIT_POINTER(nfs_uuid->net, NULL);
162
163 if (nfs_uuid->dom) {
164 auth_domain_put(nfs_uuid->dom);
165 nfs_uuid->dom = NULL;
166 }
167
168 /* Walk list of files and ensure their last references dropped */
169
170 while ((nfl = list_first_entry_or_null(&nfs_uuid->files,
171 struct nfs_file_localio,
172 list)) != NULL) {
173 /* If nfs_uuid is already NULL, nfs_close_local_fh is
174 * closing and we must wait, else we unlink and close.
175 */
176 if (rcu_access_pointer(nfl->nfs_uuid) == NULL) {
177 /* nfs_close_local_fh() is doing the
178 * close and we must wait. until it unlinks
179 */
180 wait_var_event_spinlock(nfs_uuid,
181 list_first_entry_or_null(
182 &nfs_uuid->files,
183 struct nfs_file_localio,
184 list) != nfl,
185 &nfs_uuid->lock);
186 continue;
187 }
188
189 /* Remove nfl from nfs_uuid->files list */
190 list_del_init(&nfl->list);
191 spin_unlock(&nfs_uuid->lock);
192
193 nfs_to_nfsd_file_put_local(&nfl->ro_file);
194 nfs_to_nfsd_file_put_local(&nfl->rw_file);
195 cond_resched();
196
197 spin_lock(&nfs_uuid->lock);
198 /* Now we can allow racing nfs_close_local_fh() to
199 * skip the locking.
200 */
201 store_release_wake_up(&nfl->nfs_uuid, RCU_INITIALIZER(NULL));
202 }
203
204 /* Remove client from nn->local_clients */
205 if (nfs_uuid->list_lock) {
206 spin_lock(nfs_uuid->list_lock);
207 BUG_ON(list_empty(&nfs_uuid->list));
208 list_del_init(&nfs_uuid->list);
209 spin_unlock(nfs_uuid->list_lock);
210 nfs_uuid->list_lock = NULL;
211 }
212
213 module_put(nfsd_mod);
214 spin_unlock(&nfs_uuid->lock);
215
216 return true;
217 }
218
nfs_localio_disable_client(struct nfs_client * clp)219 void nfs_localio_disable_client(struct nfs_client *clp)
220 {
221 if (nfs_uuid_put(&clp->cl_uuid))
222 trace_nfs_localio_disable_client(clp);
223 }
224 EXPORT_SYMBOL_GPL(nfs_localio_disable_client);
225
nfs_localio_invalidate_clients(struct list_head * nn_local_clients,spinlock_t * nn_local_clients_lock)226 void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
227 spinlock_t *nn_local_clients_lock)
228 {
229 LIST_HEAD(local_clients);
230 nfs_uuid_t *nfs_uuid, *tmp;
231 struct nfs_client *clp;
232
233 spin_lock(nn_local_clients_lock);
234 list_splice_init(nn_local_clients, &local_clients);
235 spin_unlock(nn_local_clients_lock);
236 list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) {
237 if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock))
238 break;
239 clp = container_of(nfs_uuid, struct nfs_client, cl_uuid);
240 nfs_localio_disable_client(clp);
241 }
242 }
243 EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients);
244
nfs_uuid_add_file(nfs_uuid_t * nfs_uuid,struct nfs_file_localio * nfl)245 static int nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl)
246 {
247 int ret = 0;
248
249 /* Add nfl to nfs_uuid->files if it isn't already */
250 spin_lock(&nfs_uuid->lock);
251 if (rcu_access_pointer(nfs_uuid->net) == NULL) {
252 ret = -ENXIO;
253 } else if (list_empty(&nfl->list)) {
254 rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid);
255 list_add_tail(&nfl->list, &nfs_uuid->files);
256 }
257 spin_unlock(&nfs_uuid->lock);
258 return ret;
259 }
260
261 /*
262 * Caller is responsible for calling nfsd_net_put and
263 * nfsd_file_put (via nfs_to_nfsd_file_put_local).
264 */
nfs_open_local_fh(nfs_uuid_t * uuid,struct rpc_clnt * rpc_clnt,const struct cred * cred,const struct nfs_fh * nfs_fh,struct nfs_file_localio * nfl,struct nfsd_file __rcu ** pnf,const fmode_t fmode)265 struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
266 struct rpc_clnt *rpc_clnt, const struct cred *cred,
267 const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl,
268 struct nfsd_file __rcu **pnf,
269 const fmode_t fmode)
270 {
271 struct net *net;
272 struct nfsd_file *localio;
273
274 /*
275 * Not running in nfsd context, so must safely get reference on nfsd_serv.
276 * But the server may already be shutting down, if so disallow new localio.
277 * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
278 * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe
279 * and if it succeeds we will have an implied reference to the net.
280 *
281 * Otherwise NFS may not have ref on NFSD and therefore cannot safely
282 * make 'nfs_to' calls.
283 */
284 rcu_read_lock();
285 net = rcu_dereference(uuid->net);
286 if (!net || !nfs_to->nfsd_net_try_get(net)) {
287 rcu_read_unlock();
288 return ERR_PTR(-ENXIO);
289 }
290 rcu_read_unlock();
291 /* We have an implied reference to net thanks to nfsd_net_try_get */
292 localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred,
293 nfs_fh, pnf, fmode);
294 if (!IS_ERR(localio) && nfs_uuid_add_file(uuid, nfl) < 0) {
295 /* Delete the cached file when racing with nfs_uuid_put() */
296 nfs_to_nfsd_file_put_local(pnf);
297 }
298 nfs_to_nfsd_net_put(net);
299
300 return localio;
301 }
302 EXPORT_SYMBOL_GPL(nfs_open_local_fh);
303
nfs_close_local_fh(struct nfs_file_localio * nfl)304 void nfs_close_local_fh(struct nfs_file_localio *nfl)
305 {
306 nfs_uuid_t *nfs_uuid;
307
308 rcu_read_lock();
309 nfs_uuid = rcu_dereference(nfl->nfs_uuid);
310 if (!nfs_uuid) {
311 /* regular (non-LOCALIO) NFS will hammer this */
312 rcu_read_unlock();
313 return;
314 }
315
316 spin_lock(&nfs_uuid->lock);
317 if (!rcu_access_pointer(nfl->nfs_uuid)) {
318 /* nfs_uuid_put has finished here */
319 spin_unlock(&nfs_uuid->lock);
320 rcu_read_unlock();
321 return;
322 }
323 if (list_empty(&nfl->list)) {
324 /* nfs_uuid_put() has started closing files, wait for it
325 * to finished
326 */
327 spin_unlock(&nfs_uuid->lock);
328 rcu_read_unlock();
329 wait_var_event(&nfl->nfs_uuid,
330 rcu_access_pointer(nfl->nfs_uuid) == NULL);
331 return;
332 }
333 /* tell nfs_uuid_put() to wait for us */
334 RCU_INIT_POINTER(nfl->nfs_uuid, NULL);
335 spin_unlock(&nfs_uuid->lock);
336 rcu_read_unlock();
337
338 nfs_to_nfsd_file_put_local(&nfl->ro_file);
339 nfs_to_nfsd_file_put_local(&nfl->rw_file);
340
341 /* Remove nfl from nfs_uuid->files list and signal nfs_uuid_put()
342 * that we are done. The moment we drop the spinlock the
343 * nfs_uuid could be freed.
344 */
345 spin_lock(&nfs_uuid->lock);
346 list_del_init(&nfl->list);
347 wake_up_var_locked(nfs_uuid, &nfs_uuid->lock);
348 spin_unlock(&nfs_uuid->lock);
349 }
350 EXPORT_SYMBOL_GPL(nfs_close_local_fh);
351
352 /*
353 * The NFS LOCALIO code needs to call into NFSD using various symbols,
354 * but cannot be statically linked, because that will make the NFS
355 * module always depend on the NFSD module.
356 *
357 * 'nfs_to' provides NFS access to NFSD functions needed for LOCALIO,
358 * its lifetime is tightly coupled to the NFSD module and will always
359 * be available to NFS LOCALIO because any successful client<->server
360 * LOCALIO handshake results in a reference on the NFSD module (above),
361 * so NFS implicitly holds a reference to the NFSD module and its
362 * functions in the 'nfs_to' nfsd_localio_operations cannot disappear.
363 *
364 * If the last NFS client using LOCALIO disconnects (and its reference
365 * on NFSD dropped) then NFSD could be unloaded, resulting in 'nfs_to'
366 * functions being invalid pointers. But if NFSD isn't loaded then NFS
367 * will not be able to handshake with NFSD and will have no cause to
368 * try to call 'nfs_to' function pointers. If/when NFSD is reloaded it
369 * will reinitialize the 'nfs_to' function pointers and make LOCALIO
370 * possible.
371 */
372 const struct nfsd_localio_operations *nfs_to;
373 EXPORT_SYMBOL_GPL(nfs_to);
374