1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2019 Nexenta by DDN, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/socket.h>
30 #include <sys/syslog.h>
31 #include <sys/systm.h>
32 #include <sys/unistd.h>
33 #include <sys/queue.h>
34 #include <sys/sdt.h>
35 #include <netinet/in.h>
36
37 #include <rpc/rpc.h>
38 #include <rpc/xdr.h>
39 #include <rpc/pmap_prot.h>
40 #include <rpc/pmap_clnt.h>
41 #include <rpc/rpcb_prot.h>
42
43 #include <rpcsvc/nlm_prot.h>
44 #include <rpcsvc/sm_inter.h>
45
46 #include "nlm_impl.h"
47
48 /*
49 * The following errors codes from nlm_null_rpc indicate that the port we have
50 * cached for the client's NLM service is stale and that we need to establish
51 * a new RPC client.
52 */
53 #define NLM_STALE_CLNT(_status) \
54 ((_status) == RPC_PROGUNAVAIL || \
55 (_status) == RPC_PROGVERSMISMATCH || \
56 (_status) == RPC_PROCUNAVAIL || \
57 (_status) == RPC_CANTCONNECT || \
58 (_status) == RPC_XPRTFAILED)
59
60 static struct kmem_cache *nlm_rpch_cache = NULL;
61
62 static int nlm_rpch_ctor(void *, void *, int);
63 static void nlm_rpch_dtor(void *, void *);
64 static void destroy_rpch(nlm_rpc_t *);
65 static nlm_rpc_t *get_nlm_rpc_fromcache(struct nlm_host *, int);
66 static void update_host_rpcbinding(struct nlm_host *, int);
67 static int refresh_nlm_rpc(struct nlm_host *, nlm_rpc_t *);
68 static void nlm_host_rele_rpc_locked(struct nlm_host *, nlm_rpc_t *);
69
70 static nlm_rpc_t *
get_nlm_rpc_fromcache(struct nlm_host * hostp,int vers)71 get_nlm_rpc_fromcache(struct nlm_host *hostp, int vers)
72 {
73 nlm_rpc_t *rpcp;
74 bool_t found = FALSE;
75
76 ASSERT(MUTEX_HELD(&hostp->nh_lock));
77 if (TAILQ_EMPTY(&hostp->nh_rpchc))
78 return (NULL);
79
80 TAILQ_FOREACH(rpcp, &hostp->nh_rpchc, nr_link) {
81 if (rpcp->nr_vers == vers) {
82 found = TRUE;
83 break;
84 }
85 }
86
87 if (!found)
88 return (NULL);
89
90 TAILQ_REMOVE(&hostp->nh_rpchc, rpcp, nr_link);
91 return (rpcp);
92 }
93
94 /*
95 * Update host's RPC binding (host->nh_addr).
96 * The function is executed by only one thread at time.
97 */
98 static void
update_host_rpcbinding(struct nlm_host * hostp,int vers)99 update_host_rpcbinding(struct nlm_host *hostp, int vers)
100 {
101 enum clnt_stat stat;
102
103 ASSERT(MUTEX_HELD(&hostp->nh_lock));
104
105 /*
106 * Mark RPC binding state as "update in progress" in order
107 * to say other threads that they need to wait until binding
108 * is fully updated.
109 */
110 hostp->nh_rpcb_state = NRPCB_UPDATE_INPROGRESS;
111 hostp->nh_rpcb_ustat = RPC_SUCCESS;
112 mutex_exit(&hostp->nh_lock);
113
114 stat = rpcbind_getaddr(&hostp->nh_knc, NLM_PROG, vers, &hostp->nh_addr);
115 mutex_enter(&hostp->nh_lock);
116
117 hostp->nh_rpcb_state = ((stat == RPC_SUCCESS) ?
118 NRPCB_UPDATED : NRPCB_NEED_UPDATE);
119
120 hostp->nh_rpcb_ustat = stat;
121 cv_broadcast(&hostp->nh_rpcb_cv);
122 }
123
124 /*
125 * Refresh RPC handle taken from host handles cache.
126 * This function is called when an RPC handle is either
127 * uninitialized or was initialized using a binding that's
128 * no longer current.
129 */
130 static int
refresh_nlm_rpc(struct nlm_host * hostp,nlm_rpc_t * rpcp)131 refresh_nlm_rpc(struct nlm_host *hostp, nlm_rpc_t *rpcp)
132 {
133 uint32_t zero = 0;
134 int ret;
135
136 if (rpcp->nr_handle == NULL) {
137 bool_t clset = TRUE;
138
139 ret = clnt_tli_kcreate(&hostp->nh_knc, &hostp->nh_addr,
140 NLM_PROG, rpcp->nr_vers, 0, NLM_RPC_RETRIES,
141 CRED(), &rpcp->nr_handle);
142
143 /*
144 * Set the client's CLSET_NODELAYONERR option to true. The
145 * RPC clnt_call interface creates an artificial delay for
146 * certain call errors in order to prevent RPC consumers
147 * from getting into tight retry loops. Since this function is
148 * called by the NLM service routines we would like to avoid
149 * this artificial delay when possible. We do not retry if the
150 * NULL request fails so it is safe for us to turn this option
151 * on.
152 */
153 if (clnt_control(rpcp->nr_handle, CLSET_NODELAYONERR,
154 (char *)&clset) == FALSE) {
155 NLM_ERR("Unable to set CLSET_NODELAYONERR\n");
156 }
157 } else {
158 ret = clnt_tli_kinit(rpcp->nr_handle, &hostp->nh_knc,
159 &hostp->nh_addr, 0, NLM_RPC_RETRIES, CRED());
160 if (ret == 0) {
161 enum clnt_stat stat;
162
163 /*
164 * Check whether host's RPC binding is still
165 * fresh, i.e. if remote program is still sits
166 * on the same port we assume. Call NULL proc
167 * to do it.
168 *
169 * Note: Even though we set no delay on error on the
170 * client handle the call to nlm_null_rpc can still
171 * delay for 10 seconds before returning an error. For
172 * example the no delay on error option is not honored
173 * for RPC_XPRTFAILED errors (see clnt_cots_kcallit).
174 */
175 stat = nlm_null_rpc(rpcp->nr_handle, rpcp->nr_vers);
176 if (NLM_STALE_CLNT(stat)) {
177 ret = ESTALE;
178 }
179 /*
180 * Need to reset the XID after the null call above,
181 * otherwise we'll reuse the XID from that call.
182 */
183 (void) CLNT_CONTROL(rpcp->nr_handle, CLSET_XID,
184 (char *)&zero);
185 }
186 }
187
188 return (ret);
189 }
190
191 /*
192 * Get RPC handle that can be used to talk to the NLM
193 * of given version running on given host.
194 * Saves obtained RPC handle to rpcpp argument.
195 *
196 * If error occures, return nonzero error code.
197 */
198 int
nlm_host_get_rpc(struct nlm_host * hostp,int vers,nlm_rpc_t ** rpcpp)199 nlm_host_get_rpc(struct nlm_host *hostp, int vers, nlm_rpc_t **rpcpp)
200 {
201 nlm_rpc_t *rpcp = NULL;
202 int rc;
203
204 mutex_enter(&hostp->nh_lock);
205
206 /*
207 * If this handle is either uninitialized, or was
208 * initialized using binding that's now stale
209 * do the init or re-init.
210 * See comments to enum nlm_rpcb_state for more
211 * details.
212 */
213 again:
214 while (hostp->nh_rpcb_state != NRPCB_UPDATED) {
215 if (hostp->nh_rpcb_state == NRPCB_UPDATE_INPROGRESS) {
216 rc = cv_wait_sig(&hostp->nh_rpcb_cv, &hostp->nh_lock);
217 if (rc == 0) {
218 mutex_exit(&hostp->nh_lock);
219 rc = EINTR;
220 goto errout;
221 }
222 }
223
224 /*
225 * Check if RPC binding was marked for update.
226 * If so, start RPC binding update operation.
227 * NOTE: the operation can be executed by only
228 * one thread at time.
229 */
230 if (hostp->nh_rpcb_state == NRPCB_NEED_UPDATE)
231 update_host_rpcbinding(hostp, vers);
232
233 /*
234 * Check if RPC error occured during RPC binding
235 * update operation. If so, report a correspoding
236 * error.
237 */
238 if (hostp->nh_rpcb_ustat != RPC_SUCCESS) {
239 mutex_exit(&hostp->nh_lock);
240 rc = ENOENT;
241 goto errout;
242 }
243 }
244
245 rpcp = get_nlm_rpc_fromcache(hostp, vers);
246 mutex_exit(&hostp->nh_lock);
247 if (rpcp == NULL) {
248 /*
249 * There weren't any RPC handles in a host
250 * cache. No luck, just create a new one.
251 */
252 rpcp = kmem_cache_alloc(nlm_rpch_cache, KM_SLEEP);
253 rpcp->nr_vers = vers;
254 }
255
256 /*
257 * Refresh RPC binding
258 */
259 rc = refresh_nlm_rpc(hostp, rpcp);
260 if (rc != 0) {
261 if (rc == ESTALE) {
262 /*
263 * Host's RPC binding is stale, we have
264 * to update it. Put the RPC handle back
265 * to the cache and mark the host as
266 * "need update".
267 */
268 mutex_enter(&hostp->nh_lock);
269 hostp->nh_rpcb_state = NRPCB_NEED_UPDATE;
270 nlm_host_rele_rpc_locked(hostp, rpcp);
271 goto again;
272 }
273
274 destroy_rpch(rpcp);
275 goto errout;
276 }
277
278 DTRACE_PROBE2(end, struct nlm_host *, hostp,
279 nlm_rpc_t *, rpcp);
280
281 *rpcpp = rpcp;
282 return (0);
283
284 errout:
285 NLM_ERR("Can't get RPC client handle for: %s", hostp->nh_name);
286 return (rc);
287 }
288
289 void
nlm_host_rele_rpc(struct nlm_host * hostp,nlm_rpc_t * rpcp)290 nlm_host_rele_rpc(struct nlm_host *hostp, nlm_rpc_t *rpcp)
291 {
292 mutex_enter(&hostp->nh_lock);
293 nlm_host_rele_rpc_locked(hostp, rpcp);
294 mutex_exit(&hostp->nh_lock);
295 }
296
297 static void
nlm_host_rele_rpc_locked(struct nlm_host * hostp,nlm_rpc_t * rpcp)298 nlm_host_rele_rpc_locked(struct nlm_host *hostp, nlm_rpc_t *rpcp)
299 {
300 ASSERT(mutex_owned(&hostp->nh_lock));
301 TAILQ_INSERT_HEAD(&hostp->nh_rpchc, rpcp, nr_link);
302 }
303
304 /*
305 * The function invalidates host's RPC binding by marking it
306 * as not fresh. In this case another time thread tries to
307 * get RPC handle from host's handles cache, host's RPC binding
308 * will be updated.
309 *
310 * The function should be executed when RPC call invoked via
311 * handle taken from RPC cache returns RPC_PROCUNAVAIL.
312 */
313 void
nlm_host_invalidate_binding(struct nlm_host * hostp)314 nlm_host_invalidate_binding(struct nlm_host *hostp)
315 {
316 mutex_enter(&hostp->nh_lock);
317 hostp->nh_rpcb_state = NRPCB_NEED_UPDATE;
318 mutex_exit(&hostp->nh_lock);
319 }
320
321 void
nlm_rpc_init(void)322 nlm_rpc_init(void)
323 {
324 nlm_rpch_cache = kmem_cache_create("nlm_rpch_cache",
325 sizeof (nlm_rpc_t), 0, nlm_rpch_ctor, nlm_rpch_dtor,
326 NULL, NULL, NULL, 0);
327 }
328
329 void
nlm_rpc_cache_destroy(struct nlm_host * hostp)330 nlm_rpc_cache_destroy(struct nlm_host *hostp)
331 {
332 nlm_rpc_t *rpcp;
333
334 /*
335 * There's no need to lock host's mutex here,
336 * nlm_rpc_cache_destroy() should be called from
337 * only one place: nlm_host_destroy, when all
338 * resources host owns are already cleaned up.
339 * So there shouldn't be any raises.
340 */
341 while ((rpcp = TAILQ_FIRST(&hostp->nh_rpchc)) != NULL) {
342 TAILQ_REMOVE(&hostp->nh_rpchc, rpcp, nr_link);
343 destroy_rpch(rpcp);
344 }
345 }
346
347 /* ARGSUSED */
348 static int
nlm_rpch_ctor(void * datap,void * cdrarg,int kmflags)349 nlm_rpch_ctor(void *datap, void *cdrarg, int kmflags)
350 {
351 nlm_rpc_t *rpcp = (nlm_rpc_t *)datap;
352
353 bzero(rpcp, sizeof (*rpcp));
354 return (0);
355 }
356
357 /* ARGSUSED */
358 static void
nlm_rpch_dtor(void * datap,void * cdrarg)359 nlm_rpch_dtor(void *datap, void *cdrarg)
360 {
361 nlm_rpc_t *rpcp = (nlm_rpc_t *)datap;
362 ASSERT(rpcp->nr_handle == NULL);
363 }
364
365 static void
destroy_rpch(nlm_rpc_t * rpcp)366 destroy_rpch(nlm_rpc_t *rpcp)
367 {
368 if (rpcp->nr_handle != NULL) {
369 AUTH_DESTROY(rpcp->nr_handle->cl_auth);
370 CLNT_DESTROY(rpcp->nr_handle);
371 rpcp->nr_handle = NULL;
372 }
373
374 kmem_cache_free(nlm_rpch_cache, rpcp);
375 }
376