xref: /illumos-gate/usr/src/uts/common/crypto/io/viorand.c (revision 9164a50bf932130cbb5097a16f6986873ce0e6e5)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Toomas Soome <tsoome@me.com>
14  */
15 
16 /*
17  * Virtio random data device.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/modctl.h>
22 #include <sys/conf.h>
23 #include <sys/sunddi.h>
24 #include <sys/sysmacros.h>
25 #include <sys/crypto/spi.h>
26 #include <sys/time.h>
27 #include <virtio.h>
28 
29 #define	VIORAND_FEATURES	0
30 #define	VIORAND_RQ		0
31 
32 typedef struct viorand_state viorand_state_t;
33 
34 typedef struct viorand_rdbuf {
35 	viorand_state_t		*rb_viornd;
36 	virtio_dma_t		*rb_dma;
37 	virtio_chain_t		*rb_chain;
38 	size_t			rb_recv_len;
39 	uchar_t			*rb_req_buf;
40 	size_t			rb_req_len;
41 	crypto_req_handle_t	rb_req_handle;
42 	list_node_t		rb_link;
43 } viorand_rdbuf_t;
44 
45 struct viorand_state {
46 	dev_info_t	*vio_dip;
47 	kmutex_t	vio_mutex;
48 	kcondvar_t	vio_cv;
49 	crypto_kcf_provider_handle_t vio_handle;
50 	taskq_t		*vio_taskq;
51 	virtio_t	*vio_virtio;
52 	virtio_queue_t	*vio_rq;
53 	uint64_t	vio_features;
54 
55 	uint_t		vio_rdbufs_capacity;
56 	uint_t		vio_rdbufs_alloc;
57 	list_t		vio_rdbufs_free;	/* Free list */
58 	viorand_rdbuf_t	*vio_rdbuf_mem;
59 };
60 
61 static const ddi_dma_attr_t viorand_dma_attr = {
62 	.dma_attr_version =	DMA_ATTR_V0,
63 	.dma_attr_addr_lo =	0x0000000000000000,
64 	.dma_attr_addr_hi =	0xFFFFFFFFFFFFFFFF,
65 	.dma_attr_count_max =	0x00000000FFFFFFFF,
66 	.dma_attr_align =	1,
67 	.dma_attr_burstsizes =	1,
68 	.dma_attr_minxfer =	1,
69 	.dma_attr_maxxfer =	0x00000000FFFFFFFF,
70 	.dma_attr_seg =		0x00000000FFFFFFFF,
71 	.dma_attr_sgllen =	64,
72 	.dma_attr_granular =	1,
73 	.dma_attr_flags =	0
74 };
75 
76 /* If set, we do not allow to detach ourselves. */
77 boolean_t virtio_registered = B_TRUE;
78 static void *viorand_statep;
79 
80 static int viorand_attach(dev_info_t *, ddi_attach_cmd_t);
81 static int viorand_detach(dev_info_t *, ddi_detach_cmd_t);
82 static int viorand_quiesce(dev_info_t *);
83 
84 static void viorand_provider_status(crypto_provider_handle_t, uint_t *);
85 static int viorand_generate_random(crypto_provider_handle_t,
86     crypto_session_id_t, uchar_t *, size_t, crypto_req_handle_t);
87 
88 static uint_t viorand_interrupt(caddr_t, caddr_t);
89 
90 /*
91  * Module linkage information for the kernel.
92  */
93 
94 static struct dev_ops devops = {
95 	.devo_rev = DEVO_REV,
96 	.devo_refcnt = 0,
97 	.devo_getinfo = ddi_no_info,
98 	.devo_identify = nulldev,
99 	.devo_probe = nulldev,
100 	.devo_attach = viorand_attach,
101 	.devo_detach = viorand_detach,
102 	.devo_reset = nodev,
103 	.devo_cb_ops = NULL,
104 	.devo_bus_ops = NULL,
105 	.devo_power = NULL,
106 	.devo_quiesce = viorand_quiesce
107 };
108 
109 static struct modldrv modldrv = {
110 	.drv_modops = &mod_driverops,
111 	.drv_linkinfo = "VirtIO Random Number Driver",
112 	.drv_dev_ops = &devops
113 };
114 
115 static struct modlcrypto modlcrypto = {
116 	.crypto_modops = &mod_cryptoops,
117 	.crypto_linkinfo = "VirtIO Random Number Provider"
118 };
119 
120 static struct modlinkage modlinkage = {
121 	.ml_rev = MODREV_1,
122 	.ml_linkage = { &modldrv, &modlcrypto, NULL }
123 };
124 
125 /*
126  * CSPI information (entry points, provider info, etc.)
127  */
128 static void viorand_provider_status(crypto_provider_handle_t, uint_t *);
129 
130 static crypto_control_ops_t viorand_control_ops = {
131 	.provider_status = viorand_provider_status
132 };
133 
134 static int viorand_generate_random(crypto_provider_handle_t,
135     crypto_session_id_t, uchar_t *, size_t, crypto_req_handle_t);
136 
137 static crypto_random_number_ops_t viorand_random_number_ops = {
138 	.generate_random = viorand_generate_random
139 };
140 
141 static crypto_ops_t viorand_crypto_ops = {
142 	.co_control_ops = &viorand_control_ops,
143 	.co_random_ops = &viorand_random_number_ops
144 };
145 
146 static crypto_provider_info_t viorand_prov_info = {
147 	.pi_interface_version = CRYPTO_SPI_VERSION_1,
148 	.pi_provider_description = "VirtIO Random Number Provider",
149 	.pi_provider_type = CRYPTO_HW_PROVIDER,
150 	.pi_ops_vector = &viorand_crypto_ops,
151 };
152 
153 /*
154  * DDI entry points.
155  */
156 int
157 _init(void)
158 {
159 	int error;
160 
161 	error = ddi_soft_state_init(&viorand_statep,
162 	    sizeof (viorand_state_t), 0);
163 	if (error != 0)
164 		return (error);
165 
166 	return (mod_install(&modlinkage));
167 }
168 
169 int
170 _fini(void)
171 {
172 	int error;
173 
174 	error = mod_remove(&modlinkage);
175 	if (error == 0)
176 		ddi_soft_state_fini(&viorand_statep);
177 
178 	return (error);
179 }
180 
181 int
182 _info(struct modinfo *modinfop)
183 {
184 	return (mod_info(&modlinkage, modinfop));
185 }
186 
187 /*
188  * return buffer from free list.
189  */
190 static viorand_rdbuf_t *
191 viorand_rbuf_alloc(viorand_state_t *state)
192 {
193 	viorand_rdbuf_t *rb;
194 
195 	VERIFY(MUTEX_HELD(&state->vio_mutex));
196 
197 	while ((rb = list_remove_head(&state->vio_rdbufs_free)) == NULL)
198 		cv_wait(&state->vio_cv, &state->vio_mutex);
199 
200 	state->vio_rdbufs_alloc++;
201 	return (rb);
202 }
203 
204 /*
205  * return buffer to free list
206  */
207 static void
208 viorand_rbuf_free(viorand_state_t *state, viorand_rdbuf_t *rb)
209 {
210 	VERIFY(MUTEX_HELD(&state->vio_mutex));
211 	VERIFY3U(state->vio_rdbufs_alloc, >, 0);
212 
213 	state->vio_rdbufs_alloc--;
214 	virtio_chain_clear(rb->rb_chain);
215 	if (rb->rb_dma != NULL) {
216 		virtio_dma_free(rb->rb_dma);
217 		rb->rb_dma = NULL;
218 	}
219 	list_insert_head(&state->vio_rdbufs_free, rb);
220 }
221 
222 /*
223  * Free all allocated buffers. This is called to clean everything up,
224  * so we do not want to leave anything around.
225  */
226 static void
227 viorand_free_bufs(viorand_state_t *state)
228 {
229 	VERIFY(MUTEX_HELD(&state->vio_mutex));
230 
231 	for (uint_t i = 0; i < state->vio_rdbufs_capacity; i++) {
232 		viorand_rdbuf_t *rb = &state->vio_rdbuf_mem[i];
233 
234 		if (rb->rb_dma != NULL) {
235 			virtio_dma_free(rb->rb_dma);
236 			rb->rb_dma = NULL;
237 		}
238 
239 		if (rb->rb_chain != NULL) {
240 			virtio_chain_free(rb->rb_chain);
241 			rb->rb_chain = NULL;
242 		}
243 	}
244 
245 	if (state->vio_rdbuf_mem != NULL) {
246 		kmem_free(state->vio_rdbuf_mem,
247 		    sizeof (viorand_rdbuf_t) * state->vio_rdbufs_capacity);
248 		state->vio_rdbuf_mem = NULL;
249 		state->vio_rdbufs_capacity = 0;
250 		state->vio_rdbufs_alloc = 0;
251 	}
252 }
253 
254 static int
255 viorand_alloc_bufs(viorand_state_t *state)
256 {
257 	VERIFY(MUTEX_HELD(&state->vio_mutex));
258 
259 	state->vio_rdbufs_capacity = virtio_queue_size(state->vio_rq);
260 	state->vio_rdbuf_mem = kmem_zalloc(sizeof (viorand_rdbuf_t) *
261 	    state->vio_rdbufs_capacity, KM_SLEEP);
262 	list_create(&state->vio_rdbufs_free, sizeof (viorand_rdbuf_t),
263 	    offsetof(viorand_rdbuf_t, rb_link));
264 
265 	/* Put everything in free list. */
266 	for (uint_t i = 0; i < state->vio_rdbufs_capacity; i++)
267 		list_insert_tail(&state->vio_rdbufs_free,
268 		    &state->vio_rdbuf_mem[i]);
269 
270 	for (viorand_rdbuf_t *rb = list_head(&state->vio_rdbufs_free);
271 	    rb != NULL; rb = list_next(&state->vio_rdbufs_free, rb)) {
272 		rb->rb_viornd = state;
273 		rb->rb_chain = virtio_chain_alloc(state->vio_rq, KM_SLEEP);
274 		if (rb->rb_chain == NULL)
275 			goto fail;
276 
277 		virtio_chain_data_set(rb->rb_chain, rb);
278 	}
279 	return (0);
280 
281 fail:
282 	viorand_free_bufs(state);
283 	return (ENOMEM);
284 }
285 
286 static int
287 viorand_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
288 {
289 	int instance = ddi_get_instance(dip);
290 	int rv = 0;
291 	viorand_state_t *state;
292 	virtio_t *vio;
293 
294 	switch (cmd) {
295 	case DDI_ATTACH:
296 		break;
297 
298 	case DDI_RESUME:
299 	default:
300 		return (DDI_FAILURE);
301 	}
302 
303 	if (ddi_soft_state_zalloc(viorand_statep, instance) != DDI_SUCCESS)
304 		return (DDI_FAILURE);
305 
306 	vio = virtio_init(dip, VIORAND_FEATURES, B_TRUE);
307 	if (vio == NULL) {
308 		ddi_soft_state_free(viorand_statep, instance);
309 		return (DDI_FAILURE);
310 	}
311 
312 	state = ddi_get_soft_state(viorand_statep, instance);
313 	state->vio_dip = dip;
314 	state->vio_virtio = vio;
315 	state->vio_rq = virtio_queue_alloc(vio, VIORAND_RQ, "requestq",
316 	    viorand_interrupt, state, B_FALSE, 1);
317 	if (state->vio_rq == NULL) {
318 		virtio_fini(state->vio_virtio, B_TRUE);
319 		ddi_soft_state_free(viorand_statep, instance);
320 		return (DDI_FAILURE);
321 	}
322 
323 	if (virtio_init_complete(state->vio_virtio, VIRTIO_ANY_INTR_TYPE) !=
324 	    DDI_SUCCESS) {
325 		virtio_fini(state->vio_virtio, B_TRUE);
326 		ddi_soft_state_free(viorand_statep, instance);
327 		return (DDI_FAILURE);
328 	}
329 
330 	cv_init(&state->vio_cv, NULL, CV_DRIVER, NULL);
331 	mutex_init(&state->vio_mutex, NULL, MUTEX_DRIVER, virtio_intr_pri(vio));
332 	mutex_enter(&state->vio_mutex);
333 
334 	if (viorand_alloc_bufs(state) != 0) {
335 		mutex_exit(&state->vio_mutex);
336 		dev_err(dip, CE_WARN, "failed to allocate memory");
337 		goto fail;
338 	}
339 	mutex_exit(&state->vio_mutex);
340 
341 	viorand_prov_info.pi_provider_dev.pd_hw = dip;
342 	viorand_prov_info.pi_provider_handle = state;
343 
344 	if (virtio_interrupts_enable(state->vio_virtio) != DDI_SUCCESS)
345 		goto fail;
346 
347 	rv = crypto_register_provider(&viorand_prov_info, &state->vio_handle);
348 	if (rv == CRYPTO_SUCCESS) {
349 		return (DDI_SUCCESS);
350 	}
351 
352 fail:
353 	virtio_interrupts_disable(state->vio_virtio);
354 	mutex_enter(&state->vio_mutex);
355 	viorand_free_bufs(state);
356 	mutex_exit(&state->vio_mutex);
357 	cv_destroy(&state->vio_cv);
358 	mutex_destroy(&state->vio_mutex);
359 	virtio_fini(state->vio_virtio, B_TRUE);
360 	ddi_soft_state_free(viorand_statep, instance);
361 	return (DDI_FAILURE);
362 }
363 
364 static int
365 viorand_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
366 {
367 	int instance = ddi_get_instance(dip);
368 	viorand_state_t *state = ddi_get_soft_state(viorand_statep, instance);
369 
370 	switch (cmd) {
371 	case DDI_DETACH:
372 		if (!virtio_registered)
373 			break;
374 
375 		/* FALLTHROUGH */
376 	case DDI_SUSPEND:
377 	default:
378 		return (DDI_FAILURE);
379 	}
380 
381 	if (crypto_unregister_provider(state->vio_handle) != CRYPTO_SUCCESS)
382 		return (DDI_FAILURE);
383 
384 	virtio_interrupts_disable(state->vio_virtio);
385 	virtio_shutdown(state->vio_virtio);
386 
387 	mutex_enter(&state->vio_mutex);
388 	for (;;) {
389 		virtio_chain_t *vic;
390 
391 		vic = virtio_queue_evacuate(state->vio_rq);
392 		if (vic == NULL)
393 			break;
394 
395 		viorand_rbuf_free(state, virtio_chain_data(vic));
396 	}
397 
398 	viorand_free_bufs(state);
399 	mutex_exit(&state->vio_mutex);
400 	cv_destroy(&state->vio_cv);
401 	mutex_destroy(&state->vio_mutex);
402 	(void) virtio_fini(state->vio_virtio, B_FALSE);
403 	ddi_soft_state_free(viorand_statep, instance);
404 	return (DDI_SUCCESS);
405 }
406 
407 static int
408 viorand_quiesce(dev_info_t *dip)
409 {
410 	int instance = ddi_get_instance(dip);
411 	viorand_state_t *state = ddi_get_soft_state(viorand_statep, instance);
412 
413 	if (state == NULL)
414 		return (DDI_FAILURE);
415 
416 	return (virtio_quiesce(state->vio_virtio));
417 }
418 
419 /*
420  * Control entry points.
421  */
422 static void
423 viorand_provider_status(crypto_provider_handle_t provider __unused,
424     uint_t *status)
425 {
426 	*status = CRYPTO_PROVIDER_READY;
427 }
428 
429 static boolean_t
430 viorand_submit_request(viorand_rdbuf_t *rb)
431 {
432 	if (virtio_chain_append(rb->rb_chain,
433 	    virtio_dma_cookie_pa(rb->rb_dma, 0),
434 	    rb->rb_req_len,
435 	    VIRTIO_DIR_DEVICE_WRITES) != DDI_SUCCESS) {
436 		return (B_FALSE);
437 	}
438 
439 	virtio_dma_sync(rb->rb_dma, DDI_DMA_SYNC_FORDEV);
440 	virtio_chain_submit(rb->rb_chain, B_TRUE);
441 	return (B_TRUE);
442 }
443 
444 /* We got portion of data, process it */
445 static void
446 viorand_process_data(viorand_rdbuf_t *rb)
447 {
448 	size_t len;
449 	int error = CRYPTO_SUCCESS;
450 
451 	len = MIN(rb->rb_req_len, rb->rb_recv_len);
452 	bcopy(virtio_dma_va(rb->rb_dma, 0), rb->rb_req_buf, len);
453 	bzero(virtio_dma_va(rb->rb_dma, 0), len);
454 	if (len < rb->rb_req_len) {
455 		rb->rb_req_len -= len;
456 		rb->rb_req_buf += len;
457 		/* Try to get reminder */
458 		if (viorand_submit_request(rb))
459 			return;
460 
461 		/* Release our buffer and return error */
462 		viorand_rbuf_free(rb->rb_viornd, rb);
463 		error = CRYPTO_HOST_MEMORY;
464 	} else {
465 		/* Got all the data, free our buffer */
466 		viorand_rbuf_free(rb->rb_viornd, rb);
467 	}
468 	crypto_op_notification(rb->rb_req_handle, error);
469 }
470 
471 static uint_t
472 viorand_interrupt(caddr_t a, caddr_t b __unused)
473 {
474 	viorand_state_t *state = (viorand_state_t *)a;
475 	virtio_chain_t *vic;
476 	boolean_t notify = B_FALSE;
477 
478 	mutex_enter(&state->vio_mutex);
479 	while ((vic = virtio_queue_poll(state->vio_rq)) != NULL) {
480 		/* Actual received len and our read buffer */
481 		size_t len = virtio_chain_received_length(vic);
482 		viorand_rdbuf_t *rb = virtio_chain_data(vic);
483 
484 		virtio_dma_sync(rb->rb_dma, DDI_DMA_SYNC_FORCPU);
485 		rb->rb_recv_len = len;
486 		viorand_process_data(rb);
487 		notify = B_TRUE;
488 	}
489 	if (notify)
490 		cv_broadcast(&state->vio_cv);
491 	mutex_exit(&state->vio_mutex);
492 	if (notify)
493 		return (DDI_INTR_CLAIMED);
494 	return (DDI_INTR_UNCLAIMED);
495 }
496 
497 /*
498  * Random number entry point.
499  */
500 static int
501 viorand_generate_random(crypto_provider_handle_t provider,
502     crypto_session_id_t sid __unused, uchar_t *buf, size_t len,
503     crypto_req_handle_t req)
504 {
505 	viorand_state_t *state = provider;
506 	viorand_rdbuf_t *rb;
507 
508 	mutex_enter(&state->vio_mutex);
509 	rb = viorand_rbuf_alloc(state);
510 	mutex_exit(&state->vio_mutex);
511 
512 	rb->rb_req_buf = buf;
513 	rb->rb_req_len = len;
514 	rb->rb_req_handle = req;
515 
516 	rb->rb_dma = virtio_dma_alloc(state->vio_virtio, len,
517 	    &viorand_dma_attr, DDI_DMA_READ | DDI_DMA_STREAMING, KM_SLEEP);
518 	if (rb->rb_dma == NULL) {
519 		goto error;
520 	}
521 
522 	if (viorand_submit_request(rb))
523 		return (CRYPTO_QUEUED);
524 
525 error:
526 	mutex_enter(&state->vio_mutex);
527 	viorand_rbuf_free(state, rb);
528 	mutex_exit(&state->vio_mutex);
529 	return (CRYPTO_HOST_MEMORY);
530 }
531