xref: /linux/fs/nfsd/nfs4callback.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  *  Copyright (c) 2001 The Regents of the University of Michigan.
3  *  All rights reserved.
4  *
5  *  Kendrick Smith <kmsmith@umich.edu>
6  *  Andy Adamson <andros@umich.edu>
7  *
8  *  Redistribution and use in source and binary forms, with or without
9  *  modification, are permitted provided that the following conditions
10  *  are met:
11  *
12  *  1. Redistributions of source code must retain the above copyright
13  *     notice, this list of conditions and the following disclaimer.
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  *  3. Neither the name of the University nor the names of its
18  *     contributors may be used to endorse or promote products derived
19  *     from this software without specific prior written permission.
20  *
21  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/sunrpc/clnt.h>
35 #include <linux/sunrpc/xprt.h>
36 #include <linux/sunrpc/svc_xprt.h>
37 #include <linux/slab.h>
38 #include "nfsd.h"
39 #include "state.h"
40 #include "netns.h"
41 #include "xdr4cb.h"
42 #include "xdr4.h"
43 
44 #define NFSDDBG_FACILITY                NFSDDBG_PROC
45 
46 static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
47 
48 #define NFSPROC4_CB_NULL 0
49 #define NFSPROC4_CB_COMPOUND 1
50 
51 /* Index of predefined Linux callback client operations */
52 
53 struct nfs4_cb_compound_hdr {
54 	/* args */
55 	u32		ident;	/* minorversion 0 only */
56 	u32		nops;
57 	__be32		*nops_p;
58 	u32		minorversion;
59 	/* res */
60 	int		status;
61 };
62 
63 /*
64  * Handle decode buffer overflows out-of-line.
65  */
66 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
67 {
68 	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
69 		"Remaining buffer length is %tu words.\n",
70 		func, xdr->end - xdr->p);
71 }
72 
73 static __be32 *xdr_encode_empty_array(__be32 *p)
74 {
75 	*p++ = xdr_zero;
76 	return p;
77 }
78 
79 /*
80  * Encode/decode NFSv4 CB basic data types
81  *
82  * Basic NFSv4 callback data types are defined in section 15 of RFC
83  * 3530: "Network File System (NFS) version 4 Protocol" and section
84  * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
85  * 1 Protocol"
86  */
87 
88 /*
89  *	nfs_cb_opnum4
90  *
91  *	enum nfs_cb_opnum4 {
92  *		OP_CB_GETATTR		= 3,
93  *		  ...
94  *	};
95  */
96 enum nfs_cb_opnum4 {
97 	OP_CB_GETATTR			= 3,
98 	OP_CB_RECALL			= 4,
99 	OP_CB_LAYOUTRECALL		= 5,
100 	OP_CB_NOTIFY			= 6,
101 	OP_CB_PUSH_DELEG		= 7,
102 	OP_CB_RECALL_ANY		= 8,
103 	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
104 	OP_CB_RECALL_SLOT		= 10,
105 	OP_CB_SEQUENCE			= 11,
106 	OP_CB_WANTS_CANCELLED		= 12,
107 	OP_CB_NOTIFY_LOCK		= 13,
108 	OP_CB_NOTIFY_DEVICEID		= 14,
109 	OP_CB_OFFLOAD			= 15,
110 	OP_CB_ILLEGAL			= 10044
111 };
112 
113 static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
114 {
115 	__be32 *p;
116 
117 	p = xdr_reserve_space(xdr, 4);
118 	*p = cpu_to_be32(op);
119 }
120 
121 /*
122  * nfs_fh4
123  *
124  *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
125  */
126 static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
127 {
128 	u32 length = fh->fh_size;
129 	__be32 *p;
130 
131 	BUG_ON(length > NFS4_FHSIZE);
132 	p = xdr_reserve_space(xdr, 4 + length);
133 	xdr_encode_opaque(p, &fh->fh_base, length);
134 }
135 
136 /*
137  * stateid4
138  *
139  *	struct stateid4 {
140  *		uint32_t	seqid;
141  *		opaque		other[12];
142  *	};
143  */
144 static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
145 {
146 	__be32 *p;
147 
148 	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
149 	*p++ = cpu_to_be32(sid->si_generation);
150 	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
151 }
152 
153 /*
154  * sessionid4
155  *
156  *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
157  */
158 static void encode_sessionid4(struct xdr_stream *xdr,
159 			      const struct nfsd4_session *session)
160 {
161 	__be32 *p;
162 
163 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
164 	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
165 					NFS4_MAX_SESSIONID_LEN);
166 }
167 
168 /*
169  * nfsstat4
170  */
171 static const struct {
172 	int stat;
173 	int errno;
174 } nfs_cb_errtbl[] = {
175 	{ NFS4_OK,		0		},
176 	{ NFS4ERR_PERM,		-EPERM		},
177 	{ NFS4ERR_NOENT,	-ENOENT		},
178 	{ NFS4ERR_IO,		-EIO		},
179 	{ NFS4ERR_NXIO,		-ENXIO		},
180 	{ NFS4ERR_ACCESS,	-EACCES		},
181 	{ NFS4ERR_EXIST,	-EEXIST		},
182 	{ NFS4ERR_XDEV,		-EXDEV		},
183 	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
184 	{ NFS4ERR_ISDIR,	-EISDIR		},
185 	{ NFS4ERR_INVAL,	-EINVAL		},
186 	{ NFS4ERR_FBIG,		-EFBIG		},
187 	{ NFS4ERR_NOSPC,	-ENOSPC		},
188 	{ NFS4ERR_ROFS,		-EROFS		},
189 	{ NFS4ERR_MLINK,	-EMLINK		},
190 	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
191 	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
192 	{ NFS4ERR_DQUOT,	-EDQUOT		},
193 	{ NFS4ERR_STALE,	-ESTALE		},
194 	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
195 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
196 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
197 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
198 	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
199 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
200 	{ NFS4ERR_LOCKED,	-EAGAIN		},
201 	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
202 	{ NFS4ERR_SYMLINK,	-ELOOP		},
203 	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
204 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
205 	{ -1,			-EIO		}
206 };
207 
208 /*
209  * If we cannot translate the error, the recovery routines should
210  * handle it.
211  *
212  * Note: remaining NFSv4 error codes have values > 10000, so should
213  * not conflict with native Linux error codes.
214  */
215 static int nfs_cb_stat_to_errno(int status)
216 {
217 	int i;
218 
219 	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
220 		if (nfs_cb_errtbl[i].stat == status)
221 			return nfs_cb_errtbl[i].errno;
222 	}
223 
224 	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
225 	return -status;
226 }
227 
228 static int decode_cb_op_status(struct xdr_stream *xdr,
229 			       enum nfs_cb_opnum4 expected, int *status)
230 {
231 	__be32 *p;
232 	u32 op;
233 
234 	p = xdr_inline_decode(xdr, 4 + 4);
235 	if (unlikely(p == NULL))
236 		goto out_overflow;
237 	op = be32_to_cpup(p++);
238 	if (unlikely(op != expected))
239 		goto out_unexpected;
240 	*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
241 	return 0;
242 out_overflow:
243 	print_overflow_msg(__func__, xdr);
244 	return -EIO;
245 out_unexpected:
246 	dprintk("NFSD: Callback server returned operation %d but "
247 		"we issued a request for %d\n", op, expected);
248 	return -EIO;
249 }
250 
251 /*
252  * CB_COMPOUND4args
253  *
254  *	struct CB_COMPOUND4args {
255  *		utf8str_cs	tag;
256  *		uint32_t	minorversion;
257  *		uint32_t	callback_ident;
258  *		nfs_cb_argop4	argarray<>;
259  *	};
260 */
261 static void encode_cb_compound4args(struct xdr_stream *xdr,
262 				    struct nfs4_cb_compound_hdr *hdr)
263 {
264 	__be32 * p;
265 
266 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
267 	p = xdr_encode_empty_array(p);		/* empty tag */
268 	*p++ = cpu_to_be32(hdr->minorversion);
269 	*p++ = cpu_to_be32(hdr->ident);
270 
271 	hdr->nops_p = p;
272 	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
273 }
274 
275 /*
276  * Update argarray element count
277  */
278 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
279 {
280 	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
281 	*hdr->nops_p = cpu_to_be32(hdr->nops);
282 }
283 
284 /*
285  * CB_COMPOUND4res
286  *
287  *	struct CB_COMPOUND4res {
288  *		nfsstat4	status;
289  *		utf8str_cs	tag;
290  *		nfs_cb_resop4	resarray<>;
291  *	};
292  */
293 static int decode_cb_compound4res(struct xdr_stream *xdr,
294 				  struct nfs4_cb_compound_hdr *hdr)
295 {
296 	u32 length;
297 	__be32 *p;
298 
299 	p = xdr_inline_decode(xdr, 4 + 4);
300 	if (unlikely(p == NULL))
301 		goto out_overflow;
302 	hdr->status = be32_to_cpup(p++);
303 	/* Ignore the tag */
304 	length = be32_to_cpup(p++);
305 	p = xdr_inline_decode(xdr, length + 4);
306 	if (unlikely(p == NULL))
307 		goto out_overflow;
308 	p += XDR_QUADLEN(length);
309 	hdr->nops = be32_to_cpup(p);
310 	return 0;
311 out_overflow:
312 	print_overflow_msg(__func__, xdr);
313 	return -EIO;
314 }
315 
316 /*
317  * CB_RECALL4args
318  *
319  *	struct CB_RECALL4args {
320  *		stateid4	stateid;
321  *		bool		truncate;
322  *		nfs_fh4		fh;
323  *	};
324  */
325 static void encode_cb_recall4args(struct xdr_stream *xdr,
326 				  const struct nfs4_delegation *dp,
327 				  struct nfs4_cb_compound_hdr *hdr)
328 {
329 	__be32 *p;
330 
331 	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
332 	encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
333 
334 	p = xdr_reserve_space(xdr, 4);
335 	*p++ = xdr_zero;			/* truncate */
336 
337 	encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
338 
339 	hdr->nops++;
340 }
341 
342 /*
343  * CB_SEQUENCE4args
344  *
345  *	struct CB_SEQUENCE4args {
346  *		sessionid4		csa_sessionid;
347  *		sequenceid4		csa_sequenceid;
348  *		slotid4			csa_slotid;
349  *		slotid4			csa_highest_slotid;
350  *		bool			csa_cachethis;
351  *		referring_call_list4	csa_referring_call_lists<>;
352  *	};
353  */
354 static void encode_cb_sequence4args(struct xdr_stream *xdr,
355 				    const struct nfsd4_callback *cb,
356 				    struct nfs4_cb_compound_hdr *hdr)
357 {
358 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
359 	__be32 *p;
360 
361 	if (hdr->minorversion == 0)
362 		return;
363 
364 	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
365 	encode_sessionid4(xdr, session);
366 
367 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
368 	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
369 	*p++ = xdr_zero;			/* csa_slotid */
370 	*p++ = xdr_zero;			/* csa_highest_slotid */
371 	*p++ = xdr_zero;			/* csa_cachethis */
372 	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
373 
374 	hdr->nops++;
375 }
376 
377 /*
378  * CB_SEQUENCE4resok
379  *
380  *	struct CB_SEQUENCE4resok {
381  *		sessionid4	csr_sessionid;
382  *		sequenceid4	csr_sequenceid;
383  *		slotid4		csr_slotid;
384  *		slotid4		csr_highest_slotid;
385  *		slotid4		csr_target_highest_slotid;
386  *	};
387  *
388  *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
389  *	case NFS4_OK:
390  *		CB_SEQUENCE4resok	csr_resok4;
391  *	default:
392  *		void;
393  *	};
394  *
395  * Our current back channel implmentation supports a single backchannel
396  * with a single slot.
397  */
398 static int decode_cb_sequence4resok(struct xdr_stream *xdr,
399 				    struct nfsd4_callback *cb)
400 {
401 	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
402 	int status = -ESERVERFAULT;
403 	__be32 *p;
404 	u32 dummy;
405 
406 	/*
407 	 * If the server returns different values for sessionID, slotID or
408 	 * sequence number, the server is looney tunes.
409 	 */
410 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
411 	if (unlikely(p == NULL))
412 		goto out_overflow;
413 
414 	if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
415 		dprintk("NFS: %s Invalid session id\n", __func__);
416 		goto out;
417 	}
418 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
419 
420 	dummy = be32_to_cpup(p++);
421 	if (dummy != session->se_cb_seq_nr) {
422 		dprintk("NFS: %s Invalid sequence number\n", __func__);
423 		goto out;
424 	}
425 
426 	dummy = be32_to_cpup(p++);
427 	if (dummy != 0) {
428 		dprintk("NFS: %s Invalid slotid\n", __func__);
429 		goto out;
430 	}
431 
432 	/*
433 	 * FIXME: process highest slotid and target highest slotid
434 	 */
435 	status = 0;
436 out:
437 	cb->cb_seq_status = status;
438 	return status;
439 out_overflow:
440 	print_overflow_msg(__func__, xdr);
441 	status = -EIO;
442 	goto out;
443 }
444 
445 static int decode_cb_sequence4res(struct xdr_stream *xdr,
446 				  struct nfsd4_callback *cb)
447 {
448 	int status;
449 
450 	if (cb->cb_clp->cl_minorversion == 0)
451 		return 0;
452 
453 	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
454 	if (unlikely(status || cb->cb_seq_status))
455 		return status;
456 
457 	return decode_cb_sequence4resok(xdr, cb);
458 }
459 
460 /*
461  * NFSv4.0 and NFSv4.1 XDR encode functions
462  *
463  * NFSv4.0 callback argument types are defined in section 15 of RFC
464  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
465  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
466  * Protocol".
467  */
468 
469 /*
470  * NB: Without this zero space reservation, callbacks over krb5p fail
471  */
472 static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
473 				 const void *__unused)
474 {
475 	xdr_reserve_space(xdr, 0);
476 }
477 
478 /*
479  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
480  */
481 static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
482 				   const void *data)
483 {
484 	const struct nfsd4_callback *cb = data;
485 	const struct nfs4_delegation *dp = cb_to_delegation(cb);
486 	struct nfs4_cb_compound_hdr hdr = {
487 		.ident = cb->cb_clp->cl_cb_ident,
488 		.minorversion = cb->cb_clp->cl_minorversion,
489 	};
490 
491 	encode_cb_compound4args(xdr, &hdr);
492 	encode_cb_sequence4args(xdr, cb, &hdr);
493 	encode_cb_recall4args(xdr, dp, &hdr);
494 	encode_cb_nops(&hdr);
495 }
496 
497 
498 /*
499  * NFSv4.0 and NFSv4.1 XDR decode functions
500  *
501  * NFSv4.0 callback result types are defined in section 15 of RFC
502  * 3530: "Network File System (NFS) version 4 Protocol" and section 20
503  * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
504  * Protocol".
505  */
506 
507 static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
508 				void *__unused)
509 {
510 	return 0;
511 }
512 
513 /*
514  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
515  */
516 static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
517 				  struct xdr_stream *xdr,
518 				  void *data)
519 {
520 	struct nfsd4_callback *cb = data;
521 	struct nfs4_cb_compound_hdr hdr;
522 	int status;
523 
524 	status = decode_cb_compound4res(xdr, &hdr);
525 	if (unlikely(status))
526 		return status;
527 
528 	if (cb != NULL) {
529 		status = decode_cb_sequence4res(xdr, cb);
530 		if (unlikely(status || cb->cb_seq_status))
531 			return status;
532 	}
533 
534 	return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
535 }
536 
537 #ifdef CONFIG_NFSD_PNFS
538 /*
539  * CB_LAYOUTRECALL4args
540  *
541  *	struct layoutrecall_file4 {
542  *		nfs_fh4         lor_fh;
543  *		offset4         lor_offset;
544  *		length4         lor_length;
545  *		stateid4        lor_stateid;
546  *	};
547  *
548  *	union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
549  *	case LAYOUTRECALL4_FILE:
550  *		layoutrecall_file4 lor_layout;
551  *	case LAYOUTRECALL4_FSID:
552  *		fsid4              lor_fsid;
553  *	case LAYOUTRECALL4_ALL:
554  *		void;
555  *	};
556  *
557  *	struct CB_LAYOUTRECALL4args {
558  *		layouttype4             clora_type;
559  *		layoutiomode4           clora_iomode;
560  *		bool                    clora_changed;
561  *		layoutrecall4           clora_recall;
562  *	};
563  */
564 static void encode_cb_layout4args(struct xdr_stream *xdr,
565 				  const struct nfs4_layout_stateid *ls,
566 				  struct nfs4_cb_compound_hdr *hdr)
567 {
568 	__be32 *p;
569 
570 	BUG_ON(hdr->minorversion == 0);
571 
572 	p = xdr_reserve_space(xdr, 5 * 4);
573 	*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
574 	*p++ = cpu_to_be32(ls->ls_layout_type);
575 	*p++ = cpu_to_be32(IOMODE_ANY);
576 	*p++ = cpu_to_be32(1);
577 	*p = cpu_to_be32(RETURN_FILE);
578 
579 	encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
580 
581 	p = xdr_reserve_space(xdr, 2 * 8);
582 	p = xdr_encode_hyper(p, 0);
583 	xdr_encode_hyper(p, NFS4_MAX_UINT64);
584 
585 	encode_stateid4(xdr, &ls->ls_recall_sid);
586 
587 	hdr->nops++;
588 }
589 
590 static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
591 				   struct xdr_stream *xdr,
592 				   const void *data)
593 {
594 	const struct nfsd4_callback *cb = data;
595 	const struct nfs4_layout_stateid *ls =
596 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
597 	struct nfs4_cb_compound_hdr hdr = {
598 		.ident = 0,
599 		.minorversion = cb->cb_clp->cl_minorversion,
600 	};
601 
602 	encode_cb_compound4args(xdr, &hdr);
603 	encode_cb_sequence4args(xdr, cb, &hdr);
604 	encode_cb_layout4args(xdr, ls, &hdr);
605 	encode_cb_nops(&hdr);
606 }
607 
608 static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
609 				  struct xdr_stream *xdr,
610 				  void *data)
611 {
612 	struct nfsd4_callback *cb = data;
613 	struct nfs4_cb_compound_hdr hdr;
614 	int status;
615 
616 	status = decode_cb_compound4res(xdr, &hdr);
617 	if (unlikely(status))
618 		return status;
619 
620 	if (cb) {
621 		status = decode_cb_sequence4res(xdr, cb);
622 		if (unlikely(status || cb->cb_seq_status))
623 			return status;
624 	}
625 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
626 }
627 #endif /* CONFIG_NFSD_PNFS */
628 
629 static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
630 {
631 	__be32	*p;
632 
633 	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
634 	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
635 	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
636 }
637 
638 static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
639 					struct xdr_stream *xdr,
640 					const void *data)
641 {
642 	const struct nfsd4_callback *cb = data;
643 	const struct nfsd4_blocked_lock *nbl =
644 		container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
645 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
646 	struct nfs4_cb_compound_hdr hdr = {
647 		.ident = 0,
648 		.minorversion = cb->cb_clp->cl_minorversion,
649 	};
650 
651 	__be32 *p;
652 
653 	BUG_ON(hdr.minorversion == 0);
654 
655 	encode_cb_compound4args(xdr, &hdr);
656 	encode_cb_sequence4args(xdr, cb, &hdr);
657 
658 	p = xdr_reserve_space(xdr, 4);
659 	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
660 	encode_nfs_fh4(xdr, &nbl->nbl_fh);
661 	encode_stateowner(xdr, &lo->lo_owner);
662 	hdr.nops++;
663 
664 	encode_cb_nops(&hdr);
665 }
666 
667 static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
668 					struct xdr_stream *xdr,
669 					void *data)
670 {
671 	struct nfsd4_callback *cb = data;
672 	struct nfs4_cb_compound_hdr hdr;
673 	int status;
674 
675 	status = decode_cb_compound4res(xdr, &hdr);
676 	if (unlikely(status))
677 		return status;
678 
679 	if (cb) {
680 		status = decode_cb_sequence4res(xdr, cb);
681 		if (unlikely(status || cb->cb_seq_status))
682 			return status;
683 	}
684 	return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
685 }
686 
687 /*
688  * struct write_response4 {
689  *	stateid4	wr_callback_id<1>;
690  *	length4		wr_count;
691  *	stable_how4	wr_committed;
692  *	verifier4	wr_writeverf;
693  * };
694  * union offload_info4 switch (nfsstat4 coa_status) {
695  *	case NFS4_OK:
696  *		write_response4	coa_resok4;
697  *	default:
698  *	length4		coa_bytes_copied;
699  * };
700  * struct CB_OFFLOAD4args {
701  *	nfs_fh4		coa_fh;
702  *	stateid4	coa_stateid;
703  *	offload_info4	coa_offload_info;
704  * };
705  */
706 static void encode_offload_info4(struct xdr_stream *xdr,
707 				 __be32 nfserr,
708 				 const struct nfsd4_copy *cp)
709 {
710 	__be32 *p;
711 
712 	p = xdr_reserve_space(xdr, 4);
713 	*p++ = nfserr;
714 	if (!nfserr) {
715 		p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
716 		p = xdr_encode_empty_array(p);
717 		p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
718 		*p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
719 		p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
720 					    NFS4_VERIFIER_SIZE);
721 	} else {
722 		p = xdr_reserve_space(xdr, 8);
723 		/* We always return success if bytes were written */
724 		p = xdr_encode_hyper(p, 0);
725 	}
726 }
727 
728 static void encode_cb_offload4args(struct xdr_stream *xdr,
729 				   __be32 nfserr,
730 				   const struct knfsd_fh *fh,
731 				   const struct nfsd4_copy *cp,
732 				   struct nfs4_cb_compound_hdr *hdr)
733 {
734 	__be32 *p;
735 
736 	p = xdr_reserve_space(xdr, 4);
737 	*p++ = cpu_to_be32(OP_CB_OFFLOAD);
738 	encode_nfs_fh4(xdr, fh);
739 	encode_stateid4(xdr, &cp->cp_res.cb_stateid);
740 	encode_offload_info4(xdr, nfserr, cp);
741 
742 	hdr->nops++;
743 }
744 
745 static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
746 				    struct xdr_stream *xdr,
747 				    const void *data)
748 {
749 	const struct nfsd4_callback *cb = data;
750 	const struct nfsd4_copy *cp =
751 		container_of(cb, struct nfsd4_copy, cp_cb);
752 	struct nfs4_cb_compound_hdr hdr = {
753 		.ident = 0,
754 		.minorversion = cb->cb_clp->cl_minorversion,
755 	};
756 
757 	encode_cb_compound4args(xdr, &hdr);
758 	encode_cb_sequence4args(xdr, cb, &hdr);
759 	encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
760 	encode_cb_nops(&hdr);
761 }
762 
763 static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
764 				   struct xdr_stream *xdr,
765 				   void *data)
766 {
767 	struct nfsd4_callback *cb = data;
768 	struct nfs4_cb_compound_hdr hdr;
769 	int status;
770 
771 	status = decode_cb_compound4res(xdr, &hdr);
772 	if (unlikely(status))
773 		return status;
774 
775 	if (cb) {
776 		status = decode_cb_sequence4res(xdr, cb);
777 		if (unlikely(status || cb->cb_seq_status))
778 			return status;
779 	}
780 	return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
781 }
782 /*
783  * RPC procedure tables
784  */
785 #define PROC(proc, call, argtype, restype)				\
786 [NFSPROC4_CLNT_##proc] = {						\
787 	.p_proc    = NFSPROC4_CB_##call,				\
788 	.p_encode  = nfs4_xdr_enc_##argtype,		\
789 	.p_decode  = nfs4_xdr_dec_##restype,				\
790 	.p_arglen  = NFS4_enc_##argtype##_sz,				\
791 	.p_replen  = NFS4_dec_##restype##_sz,				\
792 	.p_statidx = NFSPROC4_CB_##call,				\
793 	.p_name    = #proc,						\
794 }
795 
796 static const struct rpc_procinfo nfs4_cb_procedures[] = {
797 	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
798 	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
799 #ifdef CONFIG_NFSD_PNFS
800 	PROC(CB_LAYOUT,	COMPOUND,	cb_layout,	cb_layout),
801 #endif
802 	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
803 	PROC(CB_OFFLOAD,	COMPOUND,	cb_offload,	cb_offload),
804 };
805 
806 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
807 static const struct rpc_version nfs_cb_version4 = {
808 /*
809  * Note on the callback rpc program version number: despite language in rfc
810  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
811  * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
812  * in practice that appears to be what implementations use.  The section
813  * 18.36.3 language is expected to be fixed in an erratum.
814  */
815 	.number			= 1,
816 	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
817 	.procs			= nfs4_cb_procedures,
818 	.counts			= nfs4_cb_counts,
819 };
820 
821 static const struct rpc_version *nfs_cb_version[2] = {
822 	[1] = &nfs_cb_version4,
823 };
824 
825 static const struct rpc_program cb_program;
826 
827 static struct rpc_stat cb_stats = {
828 	.program		= &cb_program
829 };
830 
831 #define NFS4_CALLBACK 0x40000000
832 static const struct rpc_program cb_program = {
833 	.name			= "nfs4_cb",
834 	.number			= NFS4_CALLBACK,
835 	.nrvers			= ARRAY_SIZE(nfs_cb_version),
836 	.version		= nfs_cb_version,
837 	.stats			= &cb_stats,
838 	.pipe_dir_name		= "nfsd4_cb",
839 };
840 
841 static int max_cb_time(struct net *net)
842 {
843 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
844 	return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
845 }
846 
847 static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
848 {
849 	if (clp->cl_minorversion == 0) {
850 		client->cl_principal = clp->cl_cred.cr_targ_princ ?
851 			clp->cl_cred.cr_targ_princ : "nfs";
852 
853 		return get_cred(rpc_machine_cred());
854 	} else {
855 		struct cred *kcred;
856 
857 		kcred = prepare_kernel_cred(NULL);
858 		if (!kcred)
859 			return NULL;
860 
861 		kcred->uid = ses->se_cb_sec.uid;
862 		kcred->gid = ses->se_cb_sec.gid;
863 		return kcred;
864 	}
865 }
866 
867 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
868 {
869 	int maxtime = max_cb_time(clp->net);
870 	struct rpc_timeout	timeparms = {
871 		.to_initval	= maxtime,
872 		.to_retries	= 0,
873 		.to_maxval	= maxtime,
874 	};
875 	struct rpc_create_args args = {
876 		.net		= clp->net,
877 		.address	= (struct sockaddr *) &conn->cb_addr,
878 		.addrsize	= conn->cb_addrlen,
879 		.saddress	= (struct sockaddr *) &conn->cb_saddr,
880 		.timeout	= &timeparms,
881 		.program	= &cb_program,
882 		.version	= 1,
883 		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
884 	};
885 	struct rpc_clnt *client;
886 	const struct cred *cred;
887 
888 	if (clp->cl_minorversion == 0) {
889 		if (!clp->cl_cred.cr_principal &&
890 				(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
891 			return -EINVAL;
892 		args.client_name = clp->cl_cred.cr_principal;
893 		args.prognumber	= conn->cb_prog;
894 		args.protocol = XPRT_TRANSPORT_TCP;
895 		args.authflavor = clp->cl_cred.cr_flavor;
896 		clp->cl_cb_ident = conn->cb_ident;
897 	} else {
898 		if (!conn->cb_xprt)
899 			return -EINVAL;
900 		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
901 		clp->cl_cb_session = ses;
902 		args.bc_xprt = conn->cb_xprt;
903 		args.prognumber = clp->cl_cb_session->se_cb_prog;
904 		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
905 				XPRT_TRANSPORT_BC;
906 		args.authflavor = ses->se_cb_sec.flavor;
907 	}
908 	/* Create RPC client */
909 	client = rpc_create(&args);
910 	if (IS_ERR(client)) {
911 		dprintk("NFSD: couldn't create callback client: %ld\n",
912 			PTR_ERR(client));
913 		return PTR_ERR(client);
914 	}
915 	cred = get_backchannel_cred(clp, client, ses);
916 	if (IS_ERR(cred)) {
917 		rpc_shutdown_client(client);
918 		return PTR_ERR(cred);
919 	}
920 	clp->cl_cb_client = client;
921 	clp->cl_cb_cred = cred;
922 	return 0;
923 }
924 
925 static void warn_no_callback_path(struct nfs4_client *clp, int reason)
926 {
927 	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
928 		(int)clp->cl_name.len, clp->cl_name.data, reason);
929 }
930 
931 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
932 {
933 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
934 		return;
935 	clp->cl_cb_state = NFSD4_CB_DOWN;
936 	warn_no_callback_path(clp, reason);
937 }
938 
939 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
940 {
941 	if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
942 		return;
943 	clp->cl_cb_state = NFSD4_CB_FAULT;
944 	warn_no_callback_path(clp, reason);
945 }
946 
947 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
948 {
949 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
950 
951 	if (task->tk_status)
952 		nfsd4_mark_cb_down(clp, task->tk_status);
953 	else
954 		clp->cl_cb_state = NFSD4_CB_UP;
955 }
956 
957 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
958 	/* XXX: release method to ensure we set the cb channel down if
959 	 * necessary on early failure? */
960 	.rpc_call_done = nfsd4_cb_probe_done,
961 };
962 
963 static struct workqueue_struct *callback_wq;
964 
965 /*
966  * Poke the callback thread to process any updates to the callback
967  * parameters, and send a null probe.
968  */
969 void nfsd4_probe_callback(struct nfs4_client *clp)
970 {
971 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
972 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
973 	nfsd4_run_cb(&clp->cl_cb_null);
974 }
975 
976 void nfsd4_probe_callback_sync(struct nfs4_client *clp)
977 {
978 	nfsd4_probe_callback(clp);
979 	flush_workqueue(callback_wq);
980 }
981 
982 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
983 {
984 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
985 	spin_lock(&clp->cl_lock);
986 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
987 	spin_unlock(&clp->cl_lock);
988 }
989 
990 /*
991  * There's currently a single callback channel slot.
992  * If the slot is available, then mark it busy.  Otherwise, set the
993  * thread for sleeping on the callback RPC wait queue.
994  */
995 static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
996 {
997 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
998 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
999 		/* Race breaker */
1000 		if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
1001 			dprintk("%s slot is busy\n", __func__);
1002 			return false;
1003 		}
1004 		rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
1005 	}
1006 	return true;
1007 }
1008 
1009 /*
1010  * TODO: cb_sequence should support referring call lists, cachethis, multiple
1011  * slots, and mark callback channel down on communication errors.
1012  */
1013 static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
1014 {
1015 	struct nfsd4_callback *cb = calldata;
1016 	struct nfs4_client *clp = cb->cb_clp;
1017 	u32 minorversion = clp->cl_minorversion;
1018 
1019 	/*
1020 	 * cb_seq_status is only set in decode_cb_sequence4res,
1021 	 * and so will remain 1 if an rpc level failure occurs.
1022 	 */
1023 	cb->cb_seq_status = 1;
1024 	cb->cb_status = 0;
1025 	if (minorversion) {
1026 		if (!nfsd41_cb_get_slot(clp, task))
1027 			return;
1028 	}
1029 	rpc_call_start(task);
1030 }
1031 
1032 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
1033 {
1034 	struct nfs4_client *clp = cb->cb_clp;
1035 	struct nfsd4_session *session = clp->cl_cb_session;
1036 	bool ret = true;
1037 
1038 	if (!clp->cl_minorversion) {
1039 		/*
1040 		 * If the backchannel connection was shut down while this
1041 		 * task was queued, we need to resubmit it after setting up
1042 		 * a new backchannel connection.
1043 		 *
1044 		 * Note that if we lost our callback connection permanently
1045 		 * the submission code will error out, so we don't need to
1046 		 * handle that case here.
1047 		 */
1048 		if (task->tk_flags & RPC_TASK_KILLED)
1049 			goto need_restart;
1050 
1051 		return true;
1052 	}
1053 
1054 	switch (cb->cb_seq_status) {
1055 	case 0:
1056 		/*
1057 		 * No need for lock, access serialized in nfsd4_cb_prepare
1058 		 *
1059 		 * RFC5661 20.9.3
1060 		 * If CB_SEQUENCE returns an error, then the state of the slot
1061 		 * (sequence ID, cached reply) MUST NOT change.
1062 		 */
1063 		++session->se_cb_seq_nr;
1064 		break;
1065 	case -ESERVERFAULT:
1066 		++session->se_cb_seq_nr;
1067 		/* Fall through */
1068 	case 1:
1069 	case -NFS4ERR_BADSESSION:
1070 		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
1071 		ret = false;
1072 		break;
1073 	case -NFS4ERR_DELAY:
1074 		if (!rpc_restart_call(task))
1075 			goto out;
1076 
1077 		rpc_delay(task, 2 * HZ);
1078 		return false;
1079 	case -NFS4ERR_BADSLOT:
1080 		goto retry_nowait;
1081 	case -NFS4ERR_SEQ_MISORDERED:
1082 		if (session->se_cb_seq_nr != 1) {
1083 			session->se_cb_seq_nr = 1;
1084 			goto retry_nowait;
1085 		}
1086 		break;
1087 	default:
1088 		dprintk("%s: unprocessed error %d\n", __func__,
1089 			cb->cb_seq_status);
1090 	}
1091 
1092 	clear_bit(0, &clp->cl_cb_slot_busy);
1093 	rpc_wake_up_next(&clp->cl_cb_waitq);
1094 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
1095 		clp->cl_cb_session->se_cb_seq_nr);
1096 
1097 	if (task->tk_flags & RPC_TASK_KILLED)
1098 		goto need_restart;
1099 out:
1100 	return ret;
1101 retry_nowait:
1102 	if (rpc_restart_call_prepare(task))
1103 		ret = false;
1104 	goto out;
1105 need_restart:
1106 	task->tk_status = 0;
1107 	cb->cb_need_restart = true;
1108 	return false;
1109 }
1110 
1111 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
1112 {
1113 	struct nfsd4_callback *cb = calldata;
1114 	struct nfs4_client *clp = cb->cb_clp;
1115 
1116 	dprintk("%s: minorversion=%d\n", __func__,
1117 		clp->cl_minorversion);
1118 
1119 	if (!nfsd4_cb_sequence_done(task, cb))
1120 		return;
1121 
1122 	if (cb->cb_status) {
1123 		WARN_ON_ONCE(task->tk_status);
1124 		task->tk_status = cb->cb_status;
1125 	}
1126 
1127 	switch (cb->cb_ops->done(cb, task)) {
1128 	case 0:
1129 		task->tk_status = 0;
1130 		rpc_restart_call_prepare(task);
1131 		return;
1132 	case 1:
1133 		break;
1134 	case -1:
1135 		/* Network partition? */
1136 		nfsd4_mark_cb_down(clp, task->tk_status);
1137 		break;
1138 	default:
1139 		BUG();
1140 	}
1141 }
1142 
1143 static void nfsd4_cb_release(void *calldata)
1144 {
1145 	struct nfsd4_callback *cb = calldata;
1146 
1147 	if (cb->cb_need_restart)
1148 		nfsd4_run_cb(cb);
1149 	else
1150 		cb->cb_ops->release(cb);
1151 
1152 }
1153 
1154 static const struct rpc_call_ops nfsd4_cb_ops = {
1155 	.rpc_call_prepare = nfsd4_cb_prepare,
1156 	.rpc_call_done = nfsd4_cb_done,
1157 	.rpc_release = nfsd4_cb_release,
1158 };
1159 
1160 int nfsd4_create_callback_queue(void)
1161 {
1162 	callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
1163 	if (!callback_wq)
1164 		return -ENOMEM;
1165 	return 0;
1166 }
1167 
1168 void nfsd4_destroy_callback_queue(void)
1169 {
1170 	destroy_workqueue(callback_wq);
1171 }
1172 
1173 /* must be called under the state lock */
1174 void nfsd4_shutdown_callback(struct nfs4_client *clp)
1175 {
1176 	set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
1177 	/*
1178 	 * Note this won't actually result in a null callback;
1179 	 * instead, nfsd4_run_cb_null() will detect the killed
1180 	 * client, destroy the rpc client, and stop:
1181 	 */
1182 	nfsd4_run_cb(&clp->cl_cb_null);
1183 	flush_workqueue(callback_wq);
1184 }
1185 
1186 /* requires cl_lock: */
1187 static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
1188 {
1189 	struct nfsd4_session *s;
1190 	struct nfsd4_conn *c;
1191 
1192 	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
1193 		list_for_each_entry(c, &s->se_conns, cn_persession) {
1194 			if (c->cn_flags & NFS4_CDFC4_BACK)
1195 				return c;
1196 		}
1197 	}
1198 	return NULL;
1199 }
1200 
1201 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1202 {
1203 	struct nfs4_cb_conn conn;
1204 	struct nfs4_client *clp = cb->cb_clp;
1205 	struct nfsd4_session *ses = NULL;
1206 	struct nfsd4_conn *c;
1207 	int err;
1208 
1209 	/*
1210 	 * This is either an update, or the client dying; in either case,
1211 	 * kill the old client:
1212 	 */
1213 	if (clp->cl_cb_client) {
1214 		rpc_shutdown_client(clp->cl_cb_client);
1215 		clp->cl_cb_client = NULL;
1216 		put_cred(clp->cl_cb_cred);
1217 		clp->cl_cb_cred = NULL;
1218 	}
1219 	if (clp->cl_cb_conn.cb_xprt) {
1220 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1221 		clp->cl_cb_conn.cb_xprt = NULL;
1222 	}
1223 	if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
1224 		return;
1225 	spin_lock(&clp->cl_lock);
1226 	/*
1227 	 * Only serialized callback code is allowed to clear these
1228 	 * flags; main nfsd code can only set them:
1229 	 */
1230 	BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
1231 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
1232 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1233 	c = __nfsd4_find_backchannel(clp);
1234 	if (c) {
1235 		svc_xprt_get(c->cn_xprt);
1236 		conn.cb_xprt = c->cn_xprt;
1237 		ses = c->cn_session;
1238 	}
1239 	spin_unlock(&clp->cl_lock);
1240 
1241 	err = setup_callback_client(clp, &conn, ses);
1242 	if (err) {
1243 		nfsd4_mark_cb_down(clp, err);
1244 		return;
1245 	}
1246 }
1247 
1248 static void
1249 nfsd4_run_cb_work(struct work_struct *work)
1250 {
1251 	struct nfsd4_callback *cb =
1252 		container_of(work, struct nfsd4_callback, cb_work);
1253 	struct nfs4_client *clp = cb->cb_clp;
1254 	struct rpc_clnt *clnt;
1255 
1256 	if (cb->cb_need_restart) {
1257 		cb->cb_need_restart = false;
1258 	} else {
1259 		if (cb->cb_ops && cb->cb_ops->prepare)
1260 			cb->cb_ops->prepare(cb);
1261 	}
1262 
1263 	if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1264 		nfsd4_process_cb_update(cb);
1265 
1266 	clnt = clp->cl_cb_client;
1267 	if (!clnt) {
1268 		/* Callback channel broken, or client killed; give up: */
1269 		if (cb->cb_ops && cb->cb_ops->release)
1270 			cb->cb_ops->release(cb);
1271 		return;
1272 	}
1273 
1274 	/*
1275 	 * Don't send probe messages for 4.1 or later.
1276 	 */
1277 	if (!cb->cb_ops && clp->cl_minorversion) {
1278 		clp->cl_cb_state = NFSD4_CB_UP;
1279 		return;
1280 	}
1281 
1282 	cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1283 	rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1284 			cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1285 }
1286 
1287 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1288 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
1289 {
1290 	cb->cb_clp = clp;
1291 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1292 	cb->cb_msg.rpc_argp = cb;
1293 	cb->cb_msg.rpc_resp = cb;
1294 	cb->cb_ops = ops;
1295 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1296 	cb->cb_seq_status = 1;
1297 	cb->cb_status = 0;
1298 	cb->cb_need_restart = false;
1299 }
1300 
1301 void nfsd4_run_cb(struct nfsd4_callback *cb)
1302 {
1303 	queue_work(callback_wq, &cb->cb_work);
1304 }
1305