xref: /titanic_41/usr/src/uts/common/crypto/api/kcf_cbufcall.c (revision bdb9230ac765cb7af3fc1f4119caf2c5720dceb3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2003 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * crypto_bufcall(9F) group of routines.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/sunddi.h>
35 #include <sys/callb.h>
36 #include <sys/ksynch.h>
37 #include <sys/systm.h>
38 #include <sys/taskq_impl.h>
39 #include <sys/crypto/api.h>
40 #include <sys/crypto/sched_impl.h>
41 
42 /*
43  * All pending crypto bufcalls are put on a list. cbuf_list_lock
44  * protects changes to this list.
45  *
46  * The following locking order is maintained in the code - The
47  * global cbuf_list_lock followed by the individual lock
48  * in a crypto bufcall structure (kc_lock).
49  */
50 kmutex_t	cbuf_list_lock;
51 kcondvar_t	cbuf_list_cv;	/* cv the service thread waits on */
52 static kcf_cbuf_elem_t *cbuf_list_head;
53 static kcf_cbuf_elem_t *cbuf_list_tail;
54 
55 /*
56  * Allocate and return a handle to be used for crypto_bufcall().
57  * Can be called from user context only.
58  */
59 crypto_bc_t
60 crypto_bufcall_alloc(void)
61 {
62 	kcf_cbuf_elem_t *cbufp;
63 
64 	cbufp = kmem_zalloc(sizeof (kcf_cbuf_elem_t), KM_SLEEP);
65 	mutex_init(&cbufp->kc_lock, NULL, MUTEX_DEFAULT, NULL);
66 	cv_init(&cbufp->kc_cv, NULL, CV_DEFAULT, NULL);
67 	cbufp->kc_state = CBUF_FREE;
68 
69 	return (cbufp);
70 }
71 
72 /*
73  * Free the handle if possible. Returns CRYPTO_SUCCESS if the handle
74  * is freed. Else it returns CRYPTO_BUSY.
75  *
76  * The client should do a crypto_unbufcall() if it receives a
77  * CRYPTO_BUSY.
78  *
79  * Can be called both from user and interrupt context.
80  */
81 int
82 crypto_bufcall_free(crypto_bc_t bc)
83 {
84 	kcf_cbuf_elem_t *cbufp = (kcf_cbuf_elem_t *)bc;
85 
86 	mutex_enter(&cbufp->kc_lock);
87 	if (cbufp->kc_state != CBUF_FREE) {
88 		mutex_exit(&cbufp->kc_lock);
89 		return (CRYPTO_BUSY);
90 	}
91 	mutex_exit(&cbufp->kc_lock);
92 
93 	mutex_destroy(&cbufp->kc_lock);
94 	cv_destroy(&cbufp->kc_cv);
95 	kmem_free(cbufp, sizeof (kcf_cbuf_elem_t));
96 
97 	return (CRYPTO_SUCCESS);
98 }
99 
100 /*
101  * Schedule func() to be called when queue space is available to
102  * submit a crypto request.
103  *
104  * Can be called both from user and interrupt context.
105  */
106 int
107 crypto_bufcall(crypto_bc_t bc, void (*func)(void *arg), void *arg)
108 {
109 	kcf_cbuf_elem_t *cbufp;
110 
111 	cbufp = (kcf_cbuf_elem_t *)bc;
112 	if (cbufp == NULL || func == NULL) {
113 		return (CRYPTO_ARGUMENTS_BAD);
114 	}
115 
116 	mutex_enter(&cbuf_list_lock);
117 	mutex_enter(&cbufp->kc_lock);
118 	if (cbufp->kc_state != CBUF_FREE) {
119 		mutex_exit(&cbufp->kc_lock);
120 		mutex_exit(&cbuf_list_lock);
121 		return (CRYPTO_BUSY);
122 	}
123 
124 	cbufp->kc_state = CBUF_WAITING;
125 	cbufp->kc_func = func;
126 	cbufp->kc_arg = arg;
127 	cbufp->kc_prev = cbufp->kc_next = NULL;
128 
129 	if (cbuf_list_head == NULL) {
130 		cbuf_list_head = cbuf_list_tail = cbufp;
131 	} else {
132 		cbuf_list_tail->kc_next = cbufp;
133 		cbufp->kc_prev = cbuf_list_tail;
134 		cbuf_list_tail = cbufp;
135 	}
136 
137 	/*
138 	 * Signal the crypto_bufcall_service thread to start
139 	 * working on this crypto bufcall request.
140 	 */
141 	cv_signal(&cbuf_list_cv);
142 	mutex_exit(&cbufp->kc_lock);
143 	mutex_exit(&cbuf_list_lock);
144 
145 	return (CRYPTO_SUCCESS);
146 }
147 
148 /*
149  * Cancel a pending crypto bufcall request. If the bufcall
150  * is currently executing, we wait till it is complete.
151  *
152  * Can only be called from user context.
153  */
154 int
155 crypto_unbufcall(crypto_bc_t bc)
156 {
157 	kcf_cbuf_elem_t *cbufp = (kcf_cbuf_elem_t *)bc;
158 
159 	mutex_enter(&cbuf_list_lock);
160 	mutex_enter(&cbufp->kc_lock);
161 
162 	if (cbufp->kc_state == CBUF_WAITING) {
163 		kcf_cbuf_elem_t *nextp = cbufp->kc_next;
164 		kcf_cbuf_elem_t *prevp = cbufp->kc_prev;
165 
166 		if (nextp != NULL)
167 			nextp->kc_prev = prevp;
168 		else
169 			cbuf_list_tail = prevp;
170 
171 		if (prevp != NULL)
172 			prevp->kc_next = nextp;
173 		else
174 			cbuf_list_head = nextp;
175 		cbufp->kc_state = CBUF_FREE;
176 	} else if (cbufp->kc_state == CBUF_RUNNING) {
177 		mutex_exit(&cbuf_list_lock);
178 		/*
179 		 * crypto_bufcall_service thread is working
180 		 * on this element. We will wait for that
181 		 * thread to signal us when done.
182 		 */
183 		while (cbufp->kc_state == CBUF_RUNNING)
184 			cv_wait(&cbufp->kc_cv, &cbufp->kc_lock);
185 		mutex_exit(&cbufp->kc_lock);
186 
187 		return (CRYPTO_SUCCESS);
188 	}
189 
190 	mutex_exit(&cbufp->kc_lock);
191 	mutex_exit(&cbuf_list_lock);
192 
193 	return (CRYPTO_SUCCESS);
194 }
195 
196 /*
197  * We sample the number of jobs. We do not hold the lock
198  * as it is not necessary to get the exact count.
199  */
200 #define	KCF_GSWQ_AVAIL	(gswq->gs_maxjobs - gswq->gs_njobs)
201 
202 /*
203  * One queue space each for init, update, and final.
204  */
205 #define	GSWQ_MINFREE	3
206 
207 /*
208  * Go through the list of crypto bufcalls and do the necessary
209  * callbacks.
210  */
211 static void
212 kcf_run_cbufcalls(void)
213 {
214 	kcf_cbuf_elem_t *cbufp;
215 	int count;
216 
217 	mutex_enter(&cbuf_list_lock);
218 
219 	/*
220 	 * Get estimate of available queue space from KCF_GSWQ_AVAIL.
221 	 * We can call 'n' crypto bufcall callback functions where
222 	 * n * GSWQ_MINFREE <= available queue space.
223 	 *
224 	 * TO DO - Extend the check to taskqs of hardware providers.
225 	 * For now, we handle only the software providers.
226 	 */
227 	count = KCF_GSWQ_AVAIL;
228 	while ((cbufp = cbuf_list_head) != NULL) {
229 		if (GSWQ_MINFREE <= count) {
230 			count -= GSWQ_MINFREE;
231 			mutex_enter(&cbufp->kc_lock);
232 			cbuf_list_head = cbufp->kc_next;
233 			cbufp->kc_state = CBUF_RUNNING;
234 			mutex_exit(&cbufp->kc_lock);
235 			mutex_exit(&cbuf_list_lock);
236 
237 			(*cbufp->kc_func)(cbufp->kc_arg);
238 
239 			mutex_enter(&cbufp->kc_lock);
240 			cbufp->kc_state = CBUF_FREE;
241 			cv_broadcast(&cbufp->kc_cv);
242 			mutex_exit(&cbufp->kc_lock);
243 
244 			mutex_enter(&cbuf_list_lock);
245 		} else {
246 			/*
247 			 * There is not enough queue space in this
248 			 * round. We bail out and try again
249 			 * later.
250 			 */
251 			break;
252 		}
253 	}
254 	if (cbuf_list_head == NULL)
255 		cbuf_list_tail = NULL;
256 
257 	mutex_exit(&cbuf_list_lock);
258 }
259 
260 /*
261  * Background processing of crypto bufcalls.
262  */
263 void
264 crypto_bufcall_service(void)
265 {
266 	callb_cpr_t	cprinfo;
267 
268 	CALLB_CPR_INIT(&cprinfo, &cbuf_list_lock, callb_generic_cpr,
269 	    "crypto_bufcall_service");
270 
271 	mutex_enter(&cbuf_list_lock);
272 
273 	for (;;) {
274 		if (cbuf_list_head != NULL && KCF_GSWQ_AVAIL >= GSWQ_MINFREE) {
275 			mutex_exit(&cbuf_list_lock);
276 			kcf_run_cbufcalls();
277 			mutex_enter(&cbuf_list_lock);
278 		}
279 
280 		if (cbuf_list_head != NULL) {
281 			/*
282 			 * Wait 30 seconds for queue space to become available.
283 			 * This number is reasonable as it does not cause
284 			 * much CPU overhead. We could wait on a condition
285 			 * variable and the global software dequeue routine can
286 			 * signal us. But, it adds overhead to that routine
287 			 * which we want to avoid. Also, the client is prepared
288 			 * to wait any way.
289 			 */
290 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
291 			mutex_exit(&cbuf_list_lock);
292 			delay(30 * drv_usectohz(1000000));
293 			mutex_enter(&cbuf_list_lock);
294 			CALLB_CPR_SAFE_END(&cprinfo, &cbuf_list_lock);
295 		}
296 
297 		/* Wait for new work to arrive */
298 		if (cbuf_list_head == NULL) {
299 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
300 			cv_wait(&cbuf_list_cv, &cbuf_list_lock);
301 			CALLB_CPR_SAFE_END(&cprinfo, &cbuf_list_lock);
302 		}
303 	}
304 }
305