16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 36810ad6fSSam Leffler * 46810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 56810ad6fSSam Leffler * modification, are permitted provided that the following conditions 66810ad6fSSam Leffler * are met: 76810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 86810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 96810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 106810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 116810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 126810ad6fSSam Leffler * 136810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 146810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 156810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 166810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 176810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 186810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 196810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 206810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 216810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 226810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 236810ad6fSSam Leffler */ 246810ad6fSSam Leffler 256810ad6fSSam Leffler #include <sys/cdefs.h> 266810ad6fSSam Leffler __FBSDID("$FreeBSD$"); 276810ad6fSSam Leffler 286810ad6fSSam Leffler /* 296810ad6fSSam Leffler * Cryptographic Subsystem. 306810ad6fSSam Leffler * 316810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 326810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 336810ad6fSSam Leffler * code remains. 346810ad6fSSam Leffler */ 356810ad6fSSam Leffler 3660727d8bSWarner Losh /*- 37091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38091d81d1SSam Leffler * 39091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 40091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 41091d81d1SSam Leffler * supported the development of this code. 42091d81d1SSam Leffler * 43091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 46091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 47091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 48091d81d1SSam Leffler * modification of this software. 49091d81d1SSam Leffler * 50091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54091d81d1SSam Leffler * PURPOSE. 55091d81d1SSam Leffler */ 562c446514SDavid E. O'Brien 57c0341432SJohn Baldwin #include "opt_compat.h" 586810ad6fSSam Leffler #include "opt_ddb.h" 596810ad6fSSam Leffler 60091d81d1SSam Leffler #include <sys/param.h> 61091d81d1SSam Leffler #include <sys/systm.h> 627290cb47SMark Johnston #include <sys/counter.h> 63091d81d1SSam Leffler #include <sys/kernel.h> 64091d81d1SSam Leffler #include <sys/kthread.h> 65ec5c0e5bSAllan Jude #include <sys/linker.h> 66091d81d1SSam Leffler #include <sys/lock.h> 675dba30f1SPoul-Henning Kamp #include <sys/module.h> 68091d81d1SSam Leffler #include <sys/mutex.h> 69091d81d1SSam Leffler #include <sys/malloc.h> 709c0e3d3aSJohn Baldwin #include <sys/mbuf.h> 71091d81d1SSam Leffler #include <sys/proc.h> 72c0341432SJohn Baldwin #include <sys/refcount.h> 73df21ad6eSBjoern A. Zeeb #include <sys/sdt.h> 7439bbca6fSFabien Thomas #include <sys/smp.h> 75091d81d1SSam Leffler #include <sys/sysctl.h> 7639bbca6fSFabien Thomas #include <sys/taskqueue.h> 779c0e3d3aSJohn Baldwin #include <sys/uio.h> 78091d81d1SSam Leffler 796810ad6fSSam Leffler #include <ddb/ddb.h> 806810ad6fSSam Leffler 81e6f6d0c9SAlan Somers #include <machine/vmparam.h> 82091d81d1SSam Leffler #include <vm/uma.h> 83e6f6d0c9SAlan Somers 84ec5c0e5bSAllan Jude #include <crypto/intake.h> 85091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 86c0341432SJohn Baldwin #include <opencrypto/xform_auth.h> 87c0341432SJohn Baldwin #include <opencrypto/xform_enc.h> 88091d81d1SSam Leffler 896810ad6fSSam Leffler #include <sys/kobj.h> 906810ad6fSSam Leffler #include <sys/bus.h> 916810ad6fSSam Leffler #include "cryptodev_if.h" 926810ad6fSSam Leffler 936ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 9404c49e68SKonstantin Belousov #include <machine/pcb.h> 9504c49e68SKonstantin Belousov #endif 9604c49e68SKonstantin Belousov 97df21ad6eSBjoern A. Zeeb SDT_PROVIDER_DEFINE(opencrypto); 98df21ad6eSBjoern A. Zeeb 99091d81d1SSam Leffler /* 100091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 10176681661SJohn Baldwin * crypto_drivers table with crypto_get_driverid(). 102091d81d1SSam Leffler */ 103091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 104091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 105091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 1066810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 1076810ad6fSSam Leffler 1086810ad6fSSam Leffler /* 1096810ad6fSSam Leffler * Crypto device/driver capabilities structure. 1106810ad6fSSam Leffler * 1116810ad6fSSam Leffler * Synchronization: 1126810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 1136810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 1146810ad6fSSam Leffler * Not tagged fields are read-only. 1156810ad6fSSam Leffler */ 1166810ad6fSSam Leffler struct cryptocap { 117c0341432SJohn Baldwin device_t cc_dev; 118c0341432SJohn Baldwin uint32_t cc_hid; 119d3d79e96SJohn Baldwin uint32_t cc_sessions; /* (d) # of sessions */ 1206810ad6fSSam Leffler 1216810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1226810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1236810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1241b0909d5SConrad Meyer size_t cc_session_size; 125c0341432SJohn Baldwin volatile int cc_refs; 1266810ad6fSSam Leffler }; 127c0341432SJohn Baldwin 128c0341432SJohn Baldwin static struct cryptocap **crypto_drivers = NULL; 129c0341432SJohn Baldwin static int crypto_drivers_size = 0; 130c0341432SJohn Baldwin 131c0341432SJohn Baldwin struct crypto_session { 132c0341432SJohn Baldwin struct cryptocap *cap; 133c0341432SJohn Baldwin struct crypto_session_params csp; 13498d788c8SMark Johnston uint64_t id; 1358adcc757SMark Johnston /* Driver softc follows. */ 136c0341432SJohn Baldwin }; 137091d81d1SSam Leffler 1383a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 13939bbca6fSFabien Thomas static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 140091d81d1SSam Leffler static struct mtx crypto_q_mtx; 141091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 142091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 143091d81d1SSam Leffler 14433f3bad3SJohn Baldwin SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 145c0341432SJohn Baldwin "In-kernel cryptography"); 146c0341432SJohn Baldwin 147091d81d1SSam Leffler /* 14839bbca6fSFabien Thomas * Taskqueue used to dispatch the crypto requests 14939bbca6fSFabien Thomas * that have the CRYPTO_F_ASYNC flag 150091d81d1SSam Leffler */ 15139bbca6fSFabien Thomas static struct taskqueue *crypto_tq; 15239bbca6fSFabien Thomas 15339bbca6fSFabien Thomas /* 15439bbca6fSFabien Thomas * Crypto seq numbers are operated on with modular arithmetic 15539bbca6fSFabien Thomas */ 15639bbca6fSFabien Thomas #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 15739bbca6fSFabien Thomas 15839bbca6fSFabien Thomas struct crypto_ret_worker { 15939bbca6fSFabien Thomas struct mtx crypto_ret_mtx; 16039bbca6fSFabien Thomas 16139bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 16239bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 16339bbca6fSFabien Thomas 164d3d79e96SJohn Baldwin uint32_t reorder_ops; /* total ordered sym jobs received */ 165d3d79e96SJohn Baldwin uint32_t reorder_cur_seq; /* current sym job dispatched */ 16639bbca6fSFabien Thomas 16739bbca6fSFabien Thomas struct proc *cryptoretproc; 16839bbca6fSFabien Thomas }; 16939bbca6fSFabien Thomas static struct crypto_ret_worker *crypto_ret_workers = NULL; 17039bbca6fSFabien Thomas 17139bbca6fSFabien Thomas #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 17239bbca6fSFabien Thomas #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 17339bbca6fSFabien Thomas #define FOREACH_CRYPTO_RETW(w) \ 17439bbca6fSFabien Thomas for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 17539bbca6fSFabien Thomas 17639bbca6fSFabien Thomas #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 17739bbca6fSFabien Thomas #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 17839bbca6fSFabien Thomas 17939bbca6fSFabien Thomas static int crypto_workers_num = 0; 180c0341432SJohn Baldwin SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 181c0341432SJohn Baldwin &crypto_workers_num, 0, 182c0341432SJohn Baldwin "Number of crypto workers used to dispatch crypto jobs"); 183c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 18439bbca6fSFabien Thomas SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 18539bbca6fSFabien Thomas &crypto_workers_num, 0, 18639bbca6fSFabien Thomas "Number of crypto workers used to dispatch crypto jobs"); 187c0341432SJohn Baldwin #endif 188091d81d1SSam Leffler 189091d81d1SSam Leffler static uma_zone_t cryptop_zone; 190091d81d1SSam Leffler 191c0341432SJohn Baldwin int crypto_devallowsoft = 0; 192*9e0c0512SMark Johnston SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN, 193c0341432SJohn Baldwin &crypto_devallowsoft, 0, 194c0341432SJohn Baldwin "Enable use of software crypto by /dev/crypto"); 195c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 196*9e0c0512SMark Johnston SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN, 197091d81d1SSam Leffler &crypto_devallowsoft, 0, 1986c20d7a3SJohn-Mark Gurney "Enable/disable use of software crypto by /dev/crypto"); 199c0341432SJohn Baldwin #endif 200091d81d1SSam Leffler 201091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 202091d81d1SSam Leffler 20351e45326SSam Leffler static void crypto_proc(void); 20451e45326SSam Leffler static struct proc *cryptoproc; 20539bbca6fSFabien Thomas static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 20651e45326SSam Leffler static void crypto_destroy(void); 2074acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 20839bbca6fSFabien Thomas static void crypto_task_invoke(void *ctx, int pending); 20939bbca6fSFabien Thomas static void crypto_batch_enqueue(struct cryptop *crp); 21051e45326SSam Leffler 2117290cb47SMark Johnston static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 2127290cb47SMark Johnston SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 2137290cb47SMark Johnston cryptostats, nitems(cryptostats), 2147290cb47SMark Johnston "Crypto system statistics"); 2157290cb47SMark Johnston 2167290cb47SMark Johnston #define CRYPTOSTAT_INC(stat) do { \ 2177290cb47SMark Johnston counter_u64_add( \ 2187290cb47SMark Johnston cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 2197290cb47SMark Johnston 1); \ 2207290cb47SMark Johnston } while (0) 2217290cb47SMark Johnston 2227290cb47SMark Johnston static void 2237290cb47SMark Johnston cryptostats_init(void *arg __unused) 2247290cb47SMark Johnston { 2257290cb47SMark Johnston COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 2267290cb47SMark Johnston } 2277290cb47SMark Johnston SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 2287290cb47SMark Johnston 2297290cb47SMark Johnston static void 2307290cb47SMark Johnston cryptostats_fini(void *arg __unused) 2317290cb47SMark Johnston { 2327290cb47SMark Johnston COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 2337290cb47SMark Johnston } 2347290cb47SMark Johnston SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 2357290cb47SMark Johnston NULL); 2367d1853eeSSam Leffler 237ec5c0e5bSAllan Jude /* Try to avoid directly exposing the key buffer as a symbol */ 238ec5c0e5bSAllan Jude static struct keybuf *keybuf; 239ec5c0e5bSAllan Jude 240ec5c0e5bSAllan Jude static struct keybuf empty_keybuf = { 241ec5c0e5bSAllan Jude .kb_nents = 0 242ec5c0e5bSAllan Jude }; 243ec5c0e5bSAllan Jude 244ec5c0e5bSAllan Jude /* Obtain the key buffer from boot metadata */ 245ec5c0e5bSAllan Jude static void 246ec5c0e5bSAllan Jude keybuf_init(void) 247ec5c0e5bSAllan Jude { 248ec5c0e5bSAllan Jude caddr_t kmdp; 249ec5c0e5bSAllan Jude 250ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf kernel"); 251ec5c0e5bSAllan Jude 252ec5c0e5bSAllan Jude if (kmdp == NULL) 253ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf64 kernel"); 254ec5c0e5bSAllan Jude 255ec5c0e5bSAllan Jude keybuf = (struct keybuf *)preload_search_info(kmdp, 256ec5c0e5bSAllan Jude MODINFO_METADATA | MODINFOMD_KEYBUF); 257ec5c0e5bSAllan Jude 258ec5c0e5bSAllan Jude if (keybuf == NULL) 259ec5c0e5bSAllan Jude keybuf = &empty_keybuf; 260ec5c0e5bSAllan Jude } 261ec5c0e5bSAllan Jude 262ec5c0e5bSAllan Jude /* It'd be nice if we could store these in some kind of secure memory... */ 2635973f492SJohn Baldwin struct keybuf * 2645973f492SJohn Baldwin get_keybuf(void) 2655973f492SJohn Baldwin { 266ec5c0e5bSAllan Jude 267ec5c0e5bSAllan Jude return (keybuf); 268ec5c0e5bSAllan Jude } 269ec5c0e5bSAllan Jude 270c0341432SJohn Baldwin static struct cryptocap * 271c0341432SJohn Baldwin cap_ref(struct cryptocap *cap) 272c0341432SJohn Baldwin { 273c0341432SJohn Baldwin 274c0341432SJohn Baldwin refcount_acquire(&cap->cc_refs); 275c0341432SJohn Baldwin return (cap); 276c0341432SJohn Baldwin } 277c0341432SJohn Baldwin 278c0341432SJohn Baldwin static void 279c0341432SJohn Baldwin cap_rele(struct cryptocap *cap) 280c0341432SJohn Baldwin { 281c0341432SJohn Baldwin 282c0341432SJohn Baldwin if (refcount_release(&cap->cc_refs) == 0) 283c0341432SJohn Baldwin return; 284c0341432SJohn Baldwin 285c0341432SJohn Baldwin KASSERT(cap->cc_sessions == 0, 286c0341432SJohn Baldwin ("freeing crypto driver with active sessions")); 287c0341432SJohn Baldwin 288c0341432SJohn Baldwin free(cap, M_CRYPTO_DATA); 289c0341432SJohn Baldwin } 290c0341432SJohn Baldwin 29151e45326SSam Leffler static int 292091d81d1SSam Leffler crypto_init(void) 293091d81d1SSam Leffler { 29439bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 29551e45326SSam Leffler int error; 296091d81d1SSam Leffler 2973569ae7fSSam Leffler mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 2983569ae7fSSam Leffler MTX_DEF|MTX_QUIET); 299091d81d1SSam Leffler 300091d81d1SSam Leffler TAILQ_INIT(&crp_q); 3013569ae7fSSam Leffler mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 302091d81d1SSam Leffler 303e5587cbbSMark Johnston cryptop_zone = uma_zcreate("cryptop", 304e5587cbbSMark Johnston sizeof(struct cryptop), NULL, NULL, NULL, NULL, 30551e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 3061b0909d5SConrad Meyer 307c0341432SJohn Baldwin crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 308c0341432SJohn Baldwin crypto_drivers = malloc(crypto_drivers_size * 309e5587cbbSMark Johnston sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 31051e45326SSam Leffler 31139bbca6fSFabien Thomas if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 31239bbca6fSFabien Thomas crypto_workers_num = mp_ncpus; 31339bbca6fSFabien Thomas 31439bbca6fSFabien Thomas crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 31539bbca6fSFabien Thomas taskqueue_thread_enqueue, &crypto_tq); 31639bbca6fSFabien Thomas 31739bbca6fSFabien Thomas taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 31839bbca6fSFabien Thomas "crypto"); 31939bbca6fSFabien Thomas 3203745c395SJulian Elischer error = kproc_create((void (*)(void *)) crypto_proc, NULL, 32151e45326SSam Leffler &cryptoproc, 0, 0, "crypto"); 32251e45326SSam Leffler if (error) { 32351e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 32451e45326SSam Leffler error); 32551e45326SSam Leffler goto bad; 32651e45326SSam Leffler } 32751e45326SSam Leffler 328e5587cbbSMark Johnston crypto_ret_workers = mallocarray(crypto_workers_num, 329e5587cbbSMark Johnston sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 33039bbca6fSFabien Thomas 33139bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 33239bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 33339bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_q); 33439bbca6fSFabien Thomas 33539bbca6fSFabien Thomas ret_worker->reorder_ops = 0; 33639bbca6fSFabien Thomas ret_worker->reorder_cur_seq = 0; 33739bbca6fSFabien Thomas 33839bbca6fSFabien Thomas mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 33939bbca6fSFabien Thomas 34039bbca6fSFabien Thomas error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 34139bbca6fSFabien Thomas &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 34251e45326SSam Leffler if (error) { 34351e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 34451e45326SSam Leffler error); 34551e45326SSam Leffler goto bad; 34651e45326SSam Leffler } 34739bbca6fSFabien Thomas } 348ec5c0e5bSAllan Jude 349ec5c0e5bSAllan Jude keybuf_init(); 350ec5c0e5bSAllan Jude 35151e45326SSam Leffler return 0; 35251e45326SSam Leffler bad: 35351e45326SSam Leffler crypto_destroy(); 35451e45326SSam Leffler return error; 35551e45326SSam Leffler } 35651e45326SSam Leffler 35751e45326SSam Leffler /* 35851e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 35951e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 36051e45326SSam Leffler * are sure the threads have terminated before we release 36151e45326SSam Leffler * the data structures they use. See crypto_finis below 36251e45326SSam Leffler * for the other half of this song-and-dance. 36351e45326SSam Leffler */ 36451e45326SSam Leffler static void 36551e45326SSam Leffler crypto_terminate(struct proc **pp, void *q) 36651e45326SSam Leffler { 36751e45326SSam Leffler struct proc *p; 36851e45326SSam Leffler 36951e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 37051e45326SSam Leffler p = *pp; 37151e45326SSam Leffler *pp = NULL; 37251e45326SSam Leffler if (p) { 37351e45326SSam Leffler wakeup_one(q); 37451e45326SSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 37551e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 37651e45326SSam Leffler msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 37751e45326SSam Leffler PROC_UNLOCK(p); 37851e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 37951e45326SSam Leffler } 38051e45326SSam Leffler } 38151e45326SSam Leffler 38251e45326SSam Leffler static void 383d588dc7dSMark Johnston hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, 384d588dc7dSMark Johnston void *auth_ctx, uint8_t padval) 385c0341432SJohn Baldwin { 386c0341432SJohn Baldwin uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 387c0341432SJohn Baldwin u_int i; 388c0341432SJohn Baldwin 389c0341432SJohn Baldwin KASSERT(axf->blocksize <= sizeof(hmac_key), 390c0341432SJohn Baldwin ("Invalid HMAC block size %d", axf->blocksize)); 391c0341432SJohn Baldwin 392c0341432SJohn Baldwin /* 393c0341432SJohn Baldwin * If the key is larger than the block size, use the digest of 394c0341432SJohn Baldwin * the key as the key instead. 395c0341432SJohn Baldwin */ 396c0341432SJohn Baldwin memset(hmac_key, 0, sizeof(hmac_key)); 397c0341432SJohn Baldwin if (klen > axf->blocksize) { 398c0341432SJohn Baldwin axf->Init(auth_ctx); 399c0341432SJohn Baldwin axf->Update(auth_ctx, key, klen); 400c0341432SJohn Baldwin axf->Final(hmac_key, auth_ctx); 401c0341432SJohn Baldwin klen = axf->hashsize; 402c0341432SJohn Baldwin } else 403c0341432SJohn Baldwin memcpy(hmac_key, key, klen); 404c0341432SJohn Baldwin 405c0341432SJohn Baldwin for (i = 0; i < axf->blocksize; i++) 406c0341432SJohn Baldwin hmac_key[i] ^= padval; 407c0341432SJohn Baldwin 408c0341432SJohn Baldwin axf->Init(auth_ctx); 409c0341432SJohn Baldwin axf->Update(auth_ctx, hmac_key, axf->blocksize); 41017a831eaSJohn Baldwin explicit_bzero(hmac_key, sizeof(hmac_key)); 411c0341432SJohn Baldwin } 412c0341432SJohn Baldwin 413c0341432SJohn Baldwin void 414d588dc7dSMark Johnston hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, 415c0341432SJohn Baldwin void *auth_ctx) 416c0341432SJohn Baldwin { 417c0341432SJohn Baldwin 418c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 419c0341432SJohn Baldwin } 420c0341432SJohn Baldwin 421c0341432SJohn Baldwin void 422d588dc7dSMark Johnston hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, 423c0341432SJohn Baldwin void *auth_ctx) 424c0341432SJohn Baldwin { 425c0341432SJohn Baldwin 426c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 427c0341432SJohn Baldwin } 428c0341432SJohn Baldwin 429c0341432SJohn Baldwin static void 43051e45326SSam Leffler crypto_destroy(void) 43151e45326SSam Leffler { 43239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 433c0341432SJohn Baldwin int i; 43439bbca6fSFabien Thomas 43551e45326SSam Leffler /* 43651e45326SSam Leffler * Terminate any crypto threads. 43751e45326SSam Leffler */ 43839bbca6fSFabien Thomas if (crypto_tq != NULL) 43939bbca6fSFabien Thomas taskqueue_drain_all(crypto_tq); 44051e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 44151e45326SSam Leffler crypto_terminate(&cryptoproc, &crp_q); 44239bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 44339bbca6fSFabien Thomas crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 44451e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 44551e45326SSam Leffler 44651e45326SSam Leffler /* XXX flush queues??? */ 44751e45326SSam Leffler 44851e45326SSam Leffler /* 44951e45326SSam Leffler * Reclaim dynamically allocated resources. 45051e45326SSam Leffler */ 451c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 452c0341432SJohn Baldwin if (crypto_drivers[i] != NULL) 453c0341432SJohn Baldwin cap_rele(crypto_drivers[i]); 454c0341432SJohn Baldwin } 45551e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 45651e45326SSam Leffler 45751e45326SSam Leffler if (cryptop_zone != NULL) 45851e45326SSam Leffler uma_zdestroy(cryptop_zone); 45951e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 46039bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 46139bbca6fSFabien Thomas mtx_destroy(&ret_worker->crypto_ret_mtx); 46239bbca6fSFabien Thomas free(crypto_ret_workers, M_CRYPTO_DATA); 46339bbca6fSFabien Thomas if (crypto_tq != NULL) 46439bbca6fSFabien Thomas taskqueue_free(crypto_tq); 46551e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 466091d81d1SSam Leffler } 467f544a528SMark Murray 4681b0909d5SConrad Meyer uint32_t 4691b0909d5SConrad Meyer crypto_ses2hid(crypto_session_t crypto_session) 4701b0909d5SConrad Meyer { 471c0341432SJohn Baldwin return (crypto_session->cap->cc_hid); 4721b0909d5SConrad Meyer } 4731b0909d5SConrad Meyer 4741b0909d5SConrad Meyer uint32_t 4751b0909d5SConrad Meyer crypto_ses2caps(crypto_session_t crypto_session) 4761b0909d5SConrad Meyer { 477c0341432SJohn Baldwin return (crypto_session->cap->cc_flags & 0xff000000); 4781b0909d5SConrad Meyer } 4791b0909d5SConrad Meyer 4801b0909d5SConrad Meyer void * 4811b0909d5SConrad Meyer crypto_get_driver_session(crypto_session_t crypto_session) 4821b0909d5SConrad Meyer { 483d1816248SMark Johnston return (crypto_session + 1); 4841b0909d5SConrad Meyer } 4851b0909d5SConrad Meyer 486c0341432SJohn Baldwin const struct crypto_session_params * 487c0341432SJohn Baldwin crypto_get_params(crypto_session_t crypto_session) 488c0341432SJohn Baldwin { 489c0341432SJohn Baldwin return (&crypto_session->csp); 490c0341432SJohn Baldwin } 491c0341432SJohn Baldwin 492d8787d4fSMark Johnston const struct auth_hash * 493c0341432SJohn Baldwin crypto_auth_hash(const struct crypto_session_params *csp) 494c0341432SJohn Baldwin { 495c0341432SJohn Baldwin 496c0341432SJohn Baldwin switch (csp->csp_auth_alg) { 497c0341432SJohn Baldwin case CRYPTO_SHA1_HMAC: 498c0341432SJohn Baldwin return (&auth_hash_hmac_sha1); 499c0341432SJohn Baldwin case CRYPTO_SHA2_224_HMAC: 500c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_224); 501c0341432SJohn Baldwin case CRYPTO_SHA2_256_HMAC: 502c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_256); 503c0341432SJohn Baldwin case CRYPTO_SHA2_384_HMAC: 504c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_384); 505c0341432SJohn Baldwin case CRYPTO_SHA2_512_HMAC: 506c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_512); 507c0341432SJohn Baldwin case CRYPTO_NULL_HMAC: 508c0341432SJohn Baldwin return (&auth_hash_null); 509c0341432SJohn Baldwin case CRYPTO_RIPEMD160_HMAC: 510c0341432SJohn Baldwin return (&auth_hash_hmac_ripemd_160); 511c0341432SJohn Baldwin case CRYPTO_SHA1: 512c0341432SJohn Baldwin return (&auth_hash_sha1); 513c0341432SJohn Baldwin case CRYPTO_SHA2_224: 514c0341432SJohn Baldwin return (&auth_hash_sha2_224); 515c0341432SJohn Baldwin case CRYPTO_SHA2_256: 516c0341432SJohn Baldwin return (&auth_hash_sha2_256); 517c0341432SJohn Baldwin case CRYPTO_SHA2_384: 518c0341432SJohn Baldwin return (&auth_hash_sha2_384); 519c0341432SJohn Baldwin case CRYPTO_SHA2_512: 520c0341432SJohn Baldwin return (&auth_hash_sha2_512); 521c0341432SJohn Baldwin case CRYPTO_AES_NIST_GMAC: 522c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 523c0341432SJohn Baldwin case 128 / 8: 524c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_128); 525c0341432SJohn Baldwin case 192 / 8: 526c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_192); 527c0341432SJohn Baldwin case 256 / 8: 528c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_256); 529c0341432SJohn Baldwin default: 530c0341432SJohn Baldwin return (NULL); 531c0341432SJohn Baldwin } 532c0341432SJohn Baldwin case CRYPTO_BLAKE2B: 533c0341432SJohn Baldwin return (&auth_hash_blake2b); 534c0341432SJohn Baldwin case CRYPTO_BLAKE2S: 535c0341432SJohn Baldwin return (&auth_hash_blake2s); 536c0341432SJohn Baldwin case CRYPTO_POLY1305: 537c0341432SJohn Baldwin return (&auth_hash_poly1305); 538c0341432SJohn Baldwin case CRYPTO_AES_CCM_CBC_MAC: 539c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 540c0341432SJohn Baldwin case 128 / 8: 541c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_128); 542c0341432SJohn Baldwin case 192 / 8: 543c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_192); 544c0341432SJohn Baldwin case 256 / 8: 545c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_256); 546c0341432SJohn Baldwin default: 547c0341432SJohn Baldwin return (NULL); 548c0341432SJohn Baldwin } 549c0341432SJohn Baldwin default: 550c0341432SJohn Baldwin return (NULL); 551c0341432SJohn Baldwin } 552c0341432SJohn Baldwin } 553c0341432SJohn Baldwin 554d8787d4fSMark Johnston const struct enc_xform * 555c0341432SJohn Baldwin crypto_cipher(const struct crypto_session_params *csp) 556c0341432SJohn Baldwin { 557c0341432SJohn Baldwin 558c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 559c0341432SJohn Baldwin case CRYPTO_RIJNDAEL128_CBC: 560c0341432SJohn Baldwin return (&enc_xform_rijndael128); 561c0341432SJohn Baldwin case CRYPTO_AES_XTS: 562c0341432SJohn Baldwin return (&enc_xform_aes_xts); 563c0341432SJohn Baldwin case CRYPTO_AES_ICM: 564c0341432SJohn Baldwin return (&enc_xform_aes_icm); 565c0341432SJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 566c0341432SJohn Baldwin return (&enc_xform_aes_nist_gcm); 567c0341432SJohn Baldwin case CRYPTO_CAMELLIA_CBC: 568c0341432SJohn Baldwin return (&enc_xform_camellia); 569c0341432SJohn Baldwin case CRYPTO_NULL_CBC: 570c0341432SJohn Baldwin return (&enc_xform_null); 571c0341432SJohn Baldwin case CRYPTO_CHACHA20: 572c0341432SJohn Baldwin return (&enc_xform_chacha20); 573c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 574c0341432SJohn Baldwin return (&enc_xform_ccm); 575fc8fc743SJohn Baldwin case CRYPTO_CHACHA20_POLY1305: 576fc8fc743SJohn Baldwin return (&enc_xform_chacha20_poly1305); 577c0341432SJohn Baldwin default: 578c0341432SJohn Baldwin return (NULL); 579c0341432SJohn Baldwin } 580c0341432SJohn Baldwin } 581c0341432SJohn Baldwin 5826810ad6fSSam Leffler static struct cryptocap * 583d3d79e96SJohn Baldwin crypto_checkdriver(uint32_t hid) 5846810ad6fSSam Leffler { 5856810ad6fSSam Leffler 586c0341432SJohn Baldwin return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 587f544a528SMark Murray } 588f544a528SMark Murray 589091d81d1SSam Leffler /* 5906810ad6fSSam Leffler * Select a driver for a new session that supports the specified 5916810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 592091d81d1SSam Leffler */ 5936810ad6fSSam Leffler static struct cryptocap * 594c0341432SJohn Baldwin crypto_select_driver(const struct crypto_session_params *csp, int flags) 5956810ad6fSSam Leffler { 5966810ad6fSSam Leffler struct cryptocap *cap, *best; 597c0341432SJohn Baldwin int best_match, error, hid; 5986810ad6fSSam Leffler 5996810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 600091d81d1SSam Leffler 6016810ad6fSSam Leffler best = NULL; 602c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 603091d81d1SSam Leffler /* 604c0341432SJohn Baldwin * If there is no driver for this slot, or the driver 605c0341432SJohn Baldwin * is not appropriate (hardware or software based on 606c0341432SJohn Baldwin * match), then skip. 607091d81d1SSam Leffler */ 608c0341432SJohn Baldwin cap = crypto_drivers[hid]; 609c0341432SJohn Baldwin if (cap == NULL || 610c0341432SJohn Baldwin (cap->cc_flags & flags) == 0) 611091d81d1SSam Leffler continue; 612091d81d1SSam Leffler 613c0341432SJohn Baldwin error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 614c0341432SJohn Baldwin if (error >= 0) 615c0341432SJohn Baldwin continue; 616c0341432SJohn Baldwin 617c0341432SJohn Baldwin /* 618c0341432SJohn Baldwin * Use the driver with the highest probe value. 619c0341432SJohn Baldwin * Hardware drivers use a higher probe value than 620c0341432SJohn Baldwin * software. In case of a tie, prefer the driver with 621c0341432SJohn Baldwin * the fewest active sessions. 622c0341432SJohn Baldwin */ 623c0341432SJohn Baldwin if (best == NULL || error > best_match || 624c0341432SJohn Baldwin (error == best_match && 625c0341432SJohn Baldwin cap->cc_sessions < best->cc_sessions)) { 6266810ad6fSSam Leffler best = cap; 627c0341432SJohn Baldwin best_match = error; 6286810ad6fSSam Leffler } 6296810ad6fSSam Leffler } 6306810ad6fSSam Leffler return best; 6316810ad6fSSam Leffler } 632091d81d1SSam Leffler 633ad557055SJohn Baldwin static enum alg_type { 634ad557055SJohn Baldwin ALG_NONE = 0, 635ad557055SJohn Baldwin ALG_CIPHER, 636ad557055SJohn Baldwin ALG_DIGEST, 637ad557055SJohn Baldwin ALG_KEYED_DIGEST, 638ad557055SJohn Baldwin ALG_COMPRESSION, 639ad557055SJohn Baldwin ALG_AEAD 640ad557055SJohn Baldwin } alg_types[] = { 641ad557055SJohn Baldwin [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 642ad557055SJohn Baldwin [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 643ad557055SJohn Baldwin [CRYPTO_AES_CBC] = ALG_CIPHER, 644ad557055SJohn Baldwin [CRYPTO_SHA1] = ALG_DIGEST, 645ad557055SJohn Baldwin [CRYPTO_NULL_HMAC] = ALG_DIGEST, 646ad557055SJohn Baldwin [CRYPTO_NULL_CBC] = ALG_CIPHER, 647ad557055SJohn Baldwin [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 648ad557055SJohn Baldwin [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 649ad557055SJohn Baldwin [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 650ad557055SJohn Baldwin [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 651ad557055SJohn Baldwin [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 652ad557055SJohn Baldwin [CRYPTO_AES_XTS] = ALG_CIPHER, 653ad557055SJohn Baldwin [CRYPTO_AES_ICM] = ALG_CIPHER, 654ad557055SJohn Baldwin [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 655ad557055SJohn Baldwin [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 656ad557055SJohn Baldwin [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 657ad557055SJohn Baldwin [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 658ad557055SJohn Baldwin [CRYPTO_CHACHA20] = ALG_CIPHER, 659ad557055SJohn Baldwin [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 660ad557055SJohn Baldwin [CRYPTO_RIPEMD160] = ALG_DIGEST, 661ad557055SJohn Baldwin [CRYPTO_SHA2_224] = ALG_DIGEST, 662ad557055SJohn Baldwin [CRYPTO_SHA2_256] = ALG_DIGEST, 663ad557055SJohn Baldwin [CRYPTO_SHA2_384] = ALG_DIGEST, 664ad557055SJohn Baldwin [CRYPTO_SHA2_512] = ALG_DIGEST, 665ad557055SJohn Baldwin [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 666ad557055SJohn Baldwin [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 667ad557055SJohn Baldwin [CRYPTO_AES_CCM_16] = ALG_AEAD, 668fc8fc743SJohn Baldwin [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD, 669ad557055SJohn Baldwin }; 670ad557055SJohn Baldwin 671ad557055SJohn Baldwin static enum alg_type 672ad557055SJohn Baldwin alg_type(int alg) 673ad557055SJohn Baldwin { 674ad557055SJohn Baldwin 675ad557055SJohn Baldwin if (alg < nitems(alg_types)) 676ad557055SJohn Baldwin return (alg_types[alg]); 677ad557055SJohn Baldwin return (ALG_NONE); 678ad557055SJohn Baldwin } 679ad557055SJohn Baldwin 680c0341432SJohn Baldwin static bool 681c0341432SJohn Baldwin alg_is_compression(int alg) 682c0341432SJohn Baldwin { 683c0341432SJohn Baldwin 684ad557055SJohn Baldwin return (alg_type(alg) == ALG_COMPRESSION); 685c0341432SJohn Baldwin } 686c0341432SJohn Baldwin 687c0341432SJohn Baldwin static bool 688c0341432SJohn Baldwin alg_is_cipher(int alg) 689c0341432SJohn Baldwin { 690c0341432SJohn Baldwin 691ad557055SJohn Baldwin return (alg_type(alg) == ALG_CIPHER); 692c0341432SJohn Baldwin } 693c0341432SJohn Baldwin 694c0341432SJohn Baldwin static bool 695c0341432SJohn Baldwin alg_is_digest(int alg) 696c0341432SJohn Baldwin { 697c0341432SJohn Baldwin 698ad557055SJohn Baldwin return (alg_type(alg) == ALG_DIGEST || 699ad557055SJohn Baldwin alg_type(alg) == ALG_KEYED_DIGEST); 700c0341432SJohn Baldwin } 701c0341432SJohn Baldwin 702c0341432SJohn Baldwin static bool 703c0341432SJohn Baldwin alg_is_keyed_digest(int alg) 704c0341432SJohn Baldwin { 705c0341432SJohn Baldwin 706ad557055SJohn Baldwin return (alg_type(alg) == ALG_KEYED_DIGEST); 707c0341432SJohn Baldwin } 708c0341432SJohn Baldwin 709c0341432SJohn Baldwin static bool 710c0341432SJohn Baldwin alg_is_aead(int alg) 711c0341432SJohn Baldwin { 712c0341432SJohn Baldwin 713ad557055SJohn Baldwin return (alg_type(alg) == ALG_AEAD); 714c0341432SJohn Baldwin } 715c0341432SJohn Baldwin 7167e89ae49SMarcin Wojtas #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 7177e89ae49SMarcin Wojtas 718c0341432SJohn Baldwin /* Various sanity checks on crypto session parameters. */ 719c0341432SJohn Baldwin static bool 720c0341432SJohn Baldwin check_csp(const struct crypto_session_params *csp) 721c0341432SJohn Baldwin { 722d8787d4fSMark Johnston const struct auth_hash *axf; 723c0341432SJohn Baldwin 724c0341432SJohn Baldwin /* Mode-independent checks. */ 7257e89ae49SMarcin Wojtas if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 726c0341432SJohn Baldwin return (false); 727c0341432SJohn Baldwin if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 728c0341432SJohn Baldwin csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 729c0341432SJohn Baldwin return (false); 730c0341432SJohn Baldwin if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 731c0341432SJohn Baldwin return (false); 732c0341432SJohn Baldwin if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 733c0341432SJohn Baldwin return (false); 734c0341432SJohn Baldwin 735c0341432SJohn Baldwin switch (csp->csp_mode) { 736c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 737c0341432SJohn Baldwin if (!alg_is_compression(csp->csp_cipher_alg)) 738c0341432SJohn Baldwin return (false); 7399c0e3d3aSJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 740c0341432SJohn Baldwin return (false); 7419b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7429b774dc0SJohn Baldwin return (false); 743c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 744c0341432SJohn Baldwin csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 745c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 746c0341432SJohn Baldwin return (false); 747c0341432SJohn Baldwin break; 748c0341432SJohn Baldwin case CSP_MODE_CIPHER: 749c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 750c0341432SJohn Baldwin return (false); 7519b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7529b774dc0SJohn Baldwin return (false); 753c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 754c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 755c0341432SJohn Baldwin return (false); 756c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 757c0341432SJohn Baldwin return (false); 758c0341432SJohn Baldwin } 759c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 760c0341432SJohn Baldwin return (false); 761c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 762c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 763c0341432SJohn Baldwin return (false); 764c0341432SJohn Baldwin break; 765c0341432SJohn Baldwin case CSP_MODE_DIGEST: 766c0341432SJohn Baldwin if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 767c0341432SJohn Baldwin return (false); 768c0341432SJohn Baldwin 7699b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7709b774dc0SJohn Baldwin return (false); 7719b774dc0SJohn Baldwin 772c0341432SJohn Baldwin /* IV is optional for digests (e.g. GMAC). */ 773c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 774c0341432SJohn Baldwin return (false); 775c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 776c0341432SJohn Baldwin return (false); 777c0341432SJohn Baldwin 778c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 779c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 780c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 781c0341432SJohn Baldwin ; 782c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 783c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 784c0341432SJohn Baldwin return (false); 785c0341432SJohn Baldwin } else { 786c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 787c0341432SJohn Baldwin return (false); 788c0341432SJohn Baldwin } 789c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 790c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 791c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 792c0341432SJohn Baldwin return (false); 793c0341432SJohn Baldwin } 794c0341432SJohn Baldwin break; 795c0341432SJohn Baldwin case CSP_MODE_AEAD: 796c0341432SJohn Baldwin if (!alg_is_aead(csp->csp_cipher_alg)) 797c0341432SJohn Baldwin return (false); 798c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 799c0341432SJohn Baldwin return (false); 800c0341432SJohn Baldwin if (csp->csp_ivlen == 0 || 801c0341432SJohn Baldwin csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 802c0341432SJohn Baldwin return (false); 803c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 804c0341432SJohn Baldwin return (false); 805c0341432SJohn Baldwin 806c0341432SJohn Baldwin /* 807c0341432SJohn Baldwin * XXX: Would be nice to have a better way to get this 808c0341432SJohn Baldwin * value. 809c0341432SJohn Baldwin */ 810c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 811c0341432SJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 812c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 813fc8fc743SJohn Baldwin case CRYPTO_CHACHA20_POLY1305: 814c0341432SJohn Baldwin if (csp->csp_auth_mlen > 16) 815c0341432SJohn Baldwin return (false); 816c0341432SJohn Baldwin break; 817c0341432SJohn Baldwin } 818c0341432SJohn Baldwin break; 819c0341432SJohn Baldwin case CSP_MODE_ETA: 820c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 821c0341432SJohn Baldwin return (false); 822c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 823c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 824c0341432SJohn Baldwin return (false); 825c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 826c0341432SJohn Baldwin return (false); 827c0341432SJohn Baldwin } 828c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 829c0341432SJohn Baldwin return (false); 830c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 831c0341432SJohn Baldwin return (false); 832c0341432SJohn Baldwin 833c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 834c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 835c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 836c0341432SJohn Baldwin ; 837c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 838c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 839c0341432SJohn Baldwin return (false); 840c0341432SJohn Baldwin } else { 841c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 842c0341432SJohn Baldwin return (false); 843c0341432SJohn Baldwin } 844c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 845c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 846c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 847c0341432SJohn Baldwin return (false); 848c0341432SJohn Baldwin } 849c0341432SJohn Baldwin break; 850c0341432SJohn Baldwin default: 851c0341432SJohn Baldwin return (false); 852c0341432SJohn Baldwin } 853c0341432SJohn Baldwin 854c0341432SJohn Baldwin return (true); 855c0341432SJohn Baldwin } 856c0341432SJohn Baldwin 857c0341432SJohn Baldwin /* 858c0341432SJohn Baldwin * Delete a session after it has been detached from its driver. 859c0341432SJohn Baldwin */ 860c0341432SJohn Baldwin static void 861c0341432SJohn Baldwin crypto_deletesession(crypto_session_t cses) 862c0341432SJohn Baldwin { 863c0341432SJohn Baldwin struct cryptocap *cap; 864c0341432SJohn Baldwin 865c0341432SJohn Baldwin cap = cses->cap; 866c0341432SJohn Baldwin 867d1816248SMark Johnston zfree(cses, M_CRYPTO_DATA); 868c0341432SJohn Baldwin 869c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 870c0341432SJohn Baldwin cap->cc_sessions--; 871c0341432SJohn Baldwin if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 872c0341432SJohn Baldwin wakeup(cap); 873c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 874c0341432SJohn Baldwin cap_rele(cap); 875c0341432SJohn Baldwin } 876c0341432SJohn Baldwin 877694e0113SPawel Jakub Dawidek /* 8786810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 8796810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 8806810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 8816810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 882694e0113SPawel Jakub Dawidek */ 8836810ad6fSSam Leffler int 884c0341432SJohn Baldwin crypto_newsession(crypto_session_t *cses, 885c0341432SJohn Baldwin const struct crypto_session_params *csp, int crid) 8866810ad6fSSam Leffler { 88798d788c8SMark Johnston static uint64_t sessid = 0; 8881b0909d5SConrad Meyer crypto_session_t res; 8896810ad6fSSam Leffler struct cryptocap *cap; 8906810ad6fSSam Leffler int err; 8916810ad6fSSam Leffler 892c0341432SJohn Baldwin if (!check_csp(csp)) 893c0341432SJohn Baldwin return (EINVAL); 894c0341432SJohn Baldwin 8951b0909d5SConrad Meyer res = NULL; 8961b0909d5SConrad Meyer 8976810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 8986810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 899694e0113SPawel Jakub Dawidek /* 9006810ad6fSSam Leffler * Use specified driver; verify it is capable. 901694e0113SPawel Jakub Dawidek */ 9026810ad6fSSam Leffler cap = crypto_checkdriver(crid); 903c0341432SJohn Baldwin if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 904694e0113SPawel Jakub Dawidek cap = NULL; 9056810ad6fSSam Leffler } else { 9066810ad6fSSam Leffler /* 9076810ad6fSSam Leffler * No requested driver; select based on crid flags. 9086810ad6fSSam Leffler */ 909c0341432SJohn Baldwin cap = crypto_select_driver(csp, crid); 910694e0113SPawel Jakub Dawidek } 9111b0909d5SConrad Meyer if (cap == NULL) { 912c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 91308fca7a5SJohn-Mark Gurney CRYPTDEB("no driver"); 914c0341432SJohn Baldwin return (EOPNOTSUPP); 91508fca7a5SJohn-Mark Gurney } 916c0341432SJohn Baldwin cap_ref(cap); 9171b0909d5SConrad Meyer cap->cc_sessions++; 918091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 9191b0909d5SConrad Meyer 9208adcc757SMark Johnston /* Allocate a single block for the generic session and driver softc. */ 921d1816248SMark Johnston res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, 922d1816248SMark Johnston M_WAITOK | M_ZERO); 923c0341432SJohn Baldwin res->cap = cap; 924c0341432SJohn Baldwin res->csp = *csp; 92598d788c8SMark Johnston res->id = atomic_fetchadd_64(&sessid, 1); 9261b0909d5SConrad Meyer 9271b0909d5SConrad Meyer /* Call the driver initialization routine. */ 928c0341432SJohn Baldwin err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 9291b0909d5SConrad Meyer if (err != 0) { 9301b0909d5SConrad Meyer CRYPTDEB("dev newsession failed: %d", err); 931c0341432SJohn Baldwin crypto_deletesession(res); 932c0341432SJohn Baldwin return (err); 9331b0909d5SConrad Meyer } 9341b0909d5SConrad Meyer 9351b0909d5SConrad Meyer *cses = res; 936c0341432SJohn Baldwin return (0); 9374acae0acSPawel Jakub Dawidek } 9384acae0acSPawel Jakub Dawidek 939091d81d1SSam Leffler /* 940091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 941091d81d1SSam Leffler * driver). 942091d81d1SSam Leffler */ 9431b0909d5SConrad Meyer void 9441b0909d5SConrad Meyer crypto_freesession(crypto_session_t cses) 945091d81d1SSam Leffler { 9464acae0acSPawel Jakub Dawidek struct cryptocap *cap; 9471b0909d5SConrad Meyer 9481b0909d5SConrad Meyer if (cses == NULL) 9491b0909d5SConrad Meyer return; 950091d81d1SSam Leffler 951c0341432SJohn Baldwin cap = cses->cap; 952091d81d1SSam Leffler 953091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 9541b0909d5SConrad Meyer CRYPTODEV_FREESESSION(cap->cc_dev, cses); 9551b0909d5SConrad Meyer 956c0341432SJohn Baldwin crypto_deletesession(cses); 957091d81d1SSam Leffler } 958091d81d1SSam Leffler 959091d81d1SSam Leffler /* 960c0341432SJohn Baldwin * Return a new driver id. Registers a driver with the system so that 961c0341432SJohn Baldwin * it can be probed by subsequent sessions. 962091d81d1SSam Leffler */ 963091d81d1SSam Leffler int32_t 9641b0909d5SConrad Meyer crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 965091d81d1SSam Leffler { 966c0341432SJohn Baldwin struct cryptocap *cap, **newdrv; 967091d81d1SSam Leffler int i; 968091d81d1SSam Leffler 9696810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 970c0341432SJohn Baldwin device_printf(dev, 971c0341432SJohn Baldwin "no flags specified when registering driver\n"); 9726810ad6fSSam Leffler return -1; 9736810ad6fSSam Leffler } 9746810ad6fSSam Leffler 975c0341432SJohn Baldwin cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 976c0341432SJohn Baldwin cap->cc_dev = dev; 977c0341432SJohn Baldwin cap->cc_session_size = sessionsize; 978c0341432SJohn Baldwin cap->cc_flags = flags; 979c0341432SJohn Baldwin refcount_init(&cap->cc_refs, 1); 980c0341432SJohn Baldwin 981091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 982c0341432SJohn Baldwin for (;;) { 983c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 984c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 985091d81d1SSam Leffler break; 9864acae0acSPawel Jakub Dawidek } 987c0341432SJohn Baldwin 988c0341432SJohn Baldwin if (i < crypto_drivers_size) 989c0341432SJohn Baldwin break; 990091d81d1SSam Leffler 991091d81d1SSam Leffler /* Out of entries, allocate some more. */ 992c0341432SJohn Baldwin 993c0341432SJohn Baldwin if (2 * crypto_drivers_size <= crypto_drivers_size) { 994091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 995091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 996c0341432SJohn Baldwin cap_rele(cap); 997c0341432SJohn Baldwin return (-1); 998091d81d1SSam Leffler } 999091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1000091d81d1SSam Leffler 1001c0341432SJohn Baldwin newdrv = malloc(2 * crypto_drivers_size * 1002c0341432SJohn Baldwin sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1003091d81d1SSam Leffler 1004c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1005c0341432SJohn Baldwin memcpy(newdrv, crypto_drivers, 1006c0341432SJohn Baldwin crypto_drivers_size * sizeof(*crypto_drivers)); 1007c0341432SJohn Baldwin 1008c0341432SJohn Baldwin crypto_drivers_size *= 2; 1009091d81d1SSam Leffler 1010091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 1011091d81d1SSam Leffler crypto_drivers = newdrv; 1012091d81d1SSam Leffler } 1013091d81d1SSam Leffler 1014c0341432SJohn Baldwin cap->cc_hid = i; 1015c0341432SJohn Baldwin crypto_drivers[i] = cap; 1016c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1017c0341432SJohn Baldwin 1018091d81d1SSam Leffler if (bootverbose) 1019d7d2f0d4SConrad Meyer printf("crypto: assign %s driver id %u, flags 0x%x\n", 10206810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 1021091d81d1SSam Leffler 1022091d81d1SSam Leffler return i; 1023091d81d1SSam Leffler } 1024091d81d1SSam Leffler 10256810ad6fSSam Leffler /* 10266810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 10276810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 10286810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 10296810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 10306810ad6fSSam Leffler */ 10316810ad6fSSam Leffler int 10326810ad6fSSam Leffler crypto_find_driver(const char *match) 1033091d81d1SSam Leffler { 1034c0341432SJohn Baldwin struct cryptocap *cap; 10356810ad6fSSam Leffler int i, len = strlen(match); 10366810ad6fSSam Leffler 10376810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 1038c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 1039c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 10406810ad6fSSam Leffler continue; 1041c0341432SJohn Baldwin cap = crypto_drivers[i]; 1042c0341432SJohn Baldwin if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1043c0341432SJohn Baldwin strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1044c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1045c0341432SJohn Baldwin return (i); 1046c0341432SJohn Baldwin } 10476810ad6fSSam Leffler } 10486810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 1049c0341432SJohn Baldwin return (-1); 10506810ad6fSSam Leffler } 10516810ad6fSSam Leffler 10526810ad6fSSam Leffler /* 10536810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 10546810ad6fSSam Leffler * if the driver identifier is invalid. 10556810ad6fSSam Leffler */ 10566810ad6fSSam Leffler device_t 10576810ad6fSSam Leffler crypto_find_device_byhid(int hid) 10586810ad6fSSam Leffler { 1059c0341432SJohn Baldwin struct cryptocap *cap; 1060c0341432SJohn Baldwin device_t dev; 1061c0341432SJohn Baldwin 1062c0341432SJohn Baldwin dev = NULL; 1063c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1064c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1065c0341432SJohn Baldwin if (cap != NULL) 1066c0341432SJohn Baldwin dev = cap->cc_dev; 1067c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1068c0341432SJohn Baldwin return (dev); 10696810ad6fSSam Leffler } 10706810ad6fSSam Leffler 10716810ad6fSSam Leffler /* 10726810ad6fSSam Leffler * Return the device/driver capabilities. 10736810ad6fSSam Leffler */ 10746810ad6fSSam Leffler int 10756810ad6fSSam Leffler crypto_getcaps(int hid) 10766810ad6fSSam Leffler { 1077c0341432SJohn Baldwin struct cryptocap *cap; 1078c0341432SJohn Baldwin int flags; 1079c0341432SJohn Baldwin 1080c0341432SJohn Baldwin flags = 0; 1081c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1082c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1083c0341432SJohn Baldwin if (cap != NULL) 1084c0341432SJohn Baldwin flags = cap->cc_flags; 1085c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1086c0341432SJohn Baldwin return (flags); 1087091d81d1SSam Leffler } 1088091d81d1SSam Leffler 1089091d81d1SSam Leffler /* 1090091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 1091091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 1092091d81d1SSam Leffler * around so that subsequent calls using those sessions will 1093091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 1094091d81d1SSam Leffler * requests. 1095091d81d1SSam Leffler */ 1096091d81d1SSam Leffler int 1097d3d79e96SJohn Baldwin crypto_unregister_all(uint32_t driverid) 1098091d81d1SSam Leffler { 1099091d81d1SSam Leffler struct cryptocap *cap; 1100091d81d1SSam Leffler 1101091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1102091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1103c0341432SJohn Baldwin if (cap == NULL) { 1104091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1105c0341432SJohn Baldwin return (EINVAL); 1106c0341432SJohn Baldwin } 11076810ad6fSSam Leffler 1108c0341432SJohn Baldwin cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1109c0341432SJohn Baldwin crypto_drivers[driverid] = NULL; 1110c0341432SJohn Baldwin 1111c0341432SJohn Baldwin /* 1112c0341432SJohn Baldwin * XXX: This doesn't do anything to kick sessions that 1113c0341432SJohn Baldwin * have no pending operations. 1114c0341432SJohn Baldwin */ 111576681661SJohn Baldwin while (cap->cc_sessions != 0) 1116c0341432SJohn Baldwin mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1117c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1118c0341432SJohn Baldwin cap_rele(cap); 1119c0341432SJohn Baldwin 1120c0341432SJohn Baldwin return (0); 1121091d81d1SSam Leffler } 1122091d81d1SSam Leffler 1123091d81d1SSam Leffler /* 1124091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 1125091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 1126091d81d1SSam Leffler */ 1127091d81d1SSam Leffler int 1128d3d79e96SJohn Baldwin crypto_unblock(uint32_t driverid, int what) 1129091d81d1SSam Leffler { 1130091d81d1SSam Leffler struct cryptocap *cap; 11313a865c82SPawel Jakub Dawidek int err; 1132091d81d1SSam Leffler 1133091d81d1SSam Leffler CRYPTO_Q_LOCK(); 1134091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1135091d81d1SSam Leffler if (cap != NULL) { 11363a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 1137091d81d1SSam Leffler cap->cc_qblocked = 0; 11383a865c82SPawel Jakub Dawidek if (crp_sleep) 11391a91ccccSSam Leffler wakeup_one(&crp_q); 1140091d81d1SSam Leffler err = 0; 1141091d81d1SSam Leffler } else 1142091d81d1SSam Leffler err = EINVAL; 1143091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 1144091d81d1SSam Leffler 1145091d81d1SSam Leffler return err; 1146091d81d1SSam Leffler } 1147091d81d1SSam Leffler 11489c0e3d3aSJohn Baldwin size_t 11499c0e3d3aSJohn Baldwin crypto_buffer_len(struct crypto_buffer *cb) 11509c0e3d3aSJohn Baldwin { 11519c0e3d3aSJohn Baldwin switch (cb->cb_type) { 11529c0e3d3aSJohn Baldwin case CRYPTO_BUF_CONTIG: 11539c0e3d3aSJohn Baldwin return (cb->cb_buf_len); 11549c0e3d3aSJohn Baldwin case CRYPTO_BUF_MBUF: 11559c0e3d3aSJohn Baldwin if (cb->cb_mbuf->m_flags & M_PKTHDR) 11569c0e3d3aSJohn Baldwin return (cb->cb_mbuf->m_pkthdr.len); 11579c0e3d3aSJohn Baldwin return (m_length(cb->cb_mbuf, NULL)); 1158883a0196SJohn Baldwin case CRYPTO_BUF_SINGLE_MBUF: 1159883a0196SJohn Baldwin return (cb->cb_mbuf->m_len); 1160e6f6d0c9SAlan Somers case CRYPTO_BUF_VMPAGE: 1161e6f6d0c9SAlan Somers return (cb->cb_vm_page_len); 11629c0e3d3aSJohn Baldwin case CRYPTO_BUF_UIO: 11639c0e3d3aSJohn Baldwin return (cb->cb_uio->uio_resid); 11649c0e3d3aSJohn Baldwin default: 11659c0e3d3aSJohn Baldwin return (0); 11669c0e3d3aSJohn Baldwin } 11679c0e3d3aSJohn Baldwin } 11689c0e3d3aSJohn Baldwin 1169c0341432SJohn Baldwin #ifdef INVARIANTS 1170c0341432SJohn Baldwin /* Various sanity checks on crypto requests. */ 1171c0341432SJohn Baldwin static void 11729c0e3d3aSJohn Baldwin cb_sanity(struct crypto_buffer *cb, const char *name) 11739c0e3d3aSJohn Baldwin { 11749c0e3d3aSJohn Baldwin KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 11759c0e3d3aSJohn Baldwin ("incoming crp with invalid %s buffer type", name)); 1176e6f6d0c9SAlan Somers switch (cb->cb_type) { 1177e6f6d0c9SAlan Somers case CRYPTO_BUF_CONTIG: 11789c0e3d3aSJohn Baldwin KASSERT(cb->cb_buf_len >= 0, 11799c0e3d3aSJohn Baldwin ("incoming crp with -ve %s buffer length", name)); 1180e6f6d0c9SAlan Somers break; 1181e6f6d0c9SAlan Somers case CRYPTO_BUF_VMPAGE: 1182e6f6d0c9SAlan Somers KASSERT(CRYPTO_HAS_VMPAGE, 1183e6f6d0c9SAlan Somers ("incoming crp uses dmap on supported arch")); 1184e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_len >= 0, 1185e6f6d0c9SAlan Somers ("incoming crp with -ve %s buffer length", name)); 1186e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_offset >= 0, 1187e6f6d0c9SAlan Somers ("incoming crp with -ve %s buffer offset", name)); 1188e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1189e6f6d0c9SAlan Somers ("incoming crp with %s buffer offset greater than page size" 1190e6f6d0c9SAlan Somers , name)); 1191e6f6d0c9SAlan Somers break; 1192e6f6d0c9SAlan Somers default: 1193e6f6d0c9SAlan Somers break; 1194e6f6d0c9SAlan Somers } 11959c0e3d3aSJohn Baldwin } 11969c0e3d3aSJohn Baldwin 11979c0e3d3aSJohn Baldwin static void 1198c0341432SJohn Baldwin crp_sanity(struct cryptop *crp) 1199c0341432SJohn Baldwin { 1200c0341432SJohn Baldwin struct crypto_session_params *csp; 12019c0e3d3aSJohn Baldwin struct crypto_buffer *out; 12029c0e3d3aSJohn Baldwin size_t ilen, len, olen; 1203c0341432SJohn Baldwin 1204c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 12059c0e3d3aSJohn Baldwin KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 12069c0e3d3aSJohn Baldwin crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 12079c0e3d3aSJohn Baldwin ("incoming crp with invalid output buffer type")); 1208c0341432SJohn Baldwin KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1209c0341432SJohn Baldwin KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1210c0341432SJohn Baldwin ("incoming crp already done")); 1211c0341432SJohn Baldwin 1212c0341432SJohn Baldwin csp = &crp->crp_session->csp; 12139c0e3d3aSJohn Baldwin cb_sanity(&crp->crp_buf, "input"); 12149c0e3d3aSJohn Baldwin ilen = crypto_buffer_len(&crp->crp_buf); 12159c0e3d3aSJohn Baldwin olen = ilen; 12169c0e3d3aSJohn Baldwin out = NULL; 12179c0e3d3aSJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 12189c0e3d3aSJohn Baldwin if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 12199c0e3d3aSJohn Baldwin cb_sanity(&crp->crp_obuf, "output"); 12209c0e3d3aSJohn Baldwin out = &crp->crp_obuf; 12219c0e3d3aSJohn Baldwin olen = crypto_buffer_len(out); 12229c0e3d3aSJohn Baldwin } 12239c0e3d3aSJohn Baldwin } else 12249c0e3d3aSJohn Baldwin KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 12259c0e3d3aSJohn Baldwin ("incoming crp with separate output buffer " 12269c0e3d3aSJohn Baldwin "but no session support")); 12279c0e3d3aSJohn Baldwin 1228c0341432SJohn Baldwin switch (csp->csp_mode) { 1229c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 1230c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1231c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECOMPRESS, 1232c0341432SJohn Baldwin ("invalid compression op %x", crp->crp_op)); 1233c0341432SJohn Baldwin break; 1234c0341432SJohn Baldwin case CSP_MODE_CIPHER: 1235c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1236c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECRYPT, 1237c0341432SJohn Baldwin ("invalid cipher op %x", crp->crp_op)); 1238c0341432SJohn Baldwin break; 1239c0341432SJohn Baldwin case CSP_MODE_DIGEST: 1240c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1241c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1242c0341432SJohn Baldwin ("invalid digest op %x", crp->crp_op)); 1243c0341432SJohn Baldwin break; 1244c0341432SJohn Baldwin case CSP_MODE_AEAD: 1245c0341432SJohn Baldwin KASSERT(crp->crp_op == 1246c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1247c0341432SJohn Baldwin crp->crp_op == 1248c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1249c0341432SJohn Baldwin ("invalid AEAD op %x", crp->crp_op)); 1250c0341432SJohn Baldwin KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1251fc8fc743SJohn Baldwin ("AEAD without a separate IV")); 1252c0341432SJohn Baldwin break; 1253c0341432SJohn Baldwin case CSP_MODE_ETA: 1254c0341432SJohn Baldwin KASSERT(crp->crp_op == 1255c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1256c0341432SJohn Baldwin crp->crp_op == 1257c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1258c0341432SJohn Baldwin ("invalid ETA op %x", crp->crp_op)); 1259c0341432SJohn Baldwin break; 1260c0341432SJohn Baldwin } 1261c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 12629b774dc0SJohn Baldwin if (crp->crp_aad == NULL) { 1263c0341432SJohn Baldwin KASSERT(crp->crp_aad_start == 0 || 12649c0e3d3aSJohn Baldwin crp->crp_aad_start < ilen, 1265c0341432SJohn Baldwin ("invalid AAD start")); 12669b774dc0SJohn Baldwin KASSERT(crp->crp_aad_length != 0 || 12679b774dc0SJohn Baldwin crp->crp_aad_start == 0, 1268c0341432SJohn Baldwin ("AAD with zero length and non-zero start")); 1269c0341432SJohn Baldwin KASSERT(crp->crp_aad_length == 0 || 12709c0e3d3aSJohn Baldwin crp->crp_aad_start + crp->crp_aad_length <= ilen, 1271c0341432SJohn Baldwin ("AAD outside input length")); 1272c0341432SJohn Baldwin } else { 12739b774dc0SJohn Baldwin KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 12749b774dc0SJohn Baldwin ("session doesn't support separate AAD buffer")); 12759b774dc0SJohn Baldwin KASSERT(crp->crp_aad_start == 0, 12769b774dc0SJohn Baldwin ("separate AAD buffer with non-zero AAD start")); 12779b774dc0SJohn Baldwin KASSERT(crp->crp_aad_length != 0, 12789b774dc0SJohn Baldwin ("separate AAD buffer with zero length")); 12799b774dc0SJohn Baldwin } 12809b774dc0SJohn Baldwin } else { 12819b774dc0SJohn Baldwin KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 12829b774dc0SJohn Baldwin crp->crp_aad_length == 0, 1283c0341432SJohn Baldwin ("AAD region in request not supporting AAD")); 1284c0341432SJohn Baldwin } 1285c0341432SJohn Baldwin if (csp->csp_ivlen == 0) { 128629fe41ddSJohn Baldwin KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 128729fe41ddSJohn Baldwin ("IV_SEPARATE set when IV isn't used")); 1288c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1289c0341432SJohn Baldwin ("crp_iv_start set when IV isn't used")); 1290c0341432SJohn Baldwin } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1291c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1292c0341432SJohn Baldwin ("IV_SEPARATE used with non-zero IV start")); 1293c0341432SJohn Baldwin } else { 12949c0e3d3aSJohn Baldwin KASSERT(crp->crp_iv_start < ilen, 1295c0341432SJohn Baldwin ("invalid IV start")); 12969c0e3d3aSJohn Baldwin KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 12979c0e3d3aSJohn Baldwin ("IV outside buffer length")); 1298c0341432SJohn Baldwin } 12999c0e3d3aSJohn Baldwin /* XXX: payload_start of 0 should always be < ilen? */ 1300c0341432SJohn Baldwin KASSERT(crp->crp_payload_start == 0 || 13019c0e3d3aSJohn Baldwin crp->crp_payload_start < ilen, 1302c0341432SJohn Baldwin ("invalid payload start")); 1303c0341432SJohn Baldwin KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 13049c0e3d3aSJohn Baldwin ilen, ("payload outside input buffer")); 13059c0e3d3aSJohn Baldwin if (out == NULL) { 13069c0e3d3aSJohn Baldwin KASSERT(crp->crp_payload_output_start == 0, 13079c0e3d3aSJohn Baldwin ("payload output start non-zero without output buffer")); 13089c0e3d3aSJohn Baldwin } else { 13099c0e3d3aSJohn Baldwin KASSERT(crp->crp_payload_output_start < olen, 13109c0e3d3aSJohn Baldwin ("invalid payload output start")); 13119c0e3d3aSJohn Baldwin KASSERT(crp->crp_payload_output_start + 13129c0e3d3aSJohn Baldwin crp->crp_payload_length <= olen, 13139c0e3d3aSJohn Baldwin ("payload outside output buffer")); 13149c0e3d3aSJohn Baldwin } 1315c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_DIGEST || 1316c0341432SJohn Baldwin csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 13179c0e3d3aSJohn Baldwin if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 13189c0e3d3aSJohn Baldwin len = ilen; 13199c0e3d3aSJohn Baldwin else 13209c0e3d3aSJohn Baldwin len = olen; 1321c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0 || 13229c0e3d3aSJohn Baldwin crp->crp_digest_start < len, 1323c0341432SJohn Baldwin ("invalid digest start")); 1324c0341432SJohn Baldwin /* XXX: For the mlen == 0 case this check isn't perfect. */ 13259c0e3d3aSJohn Baldwin KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 13269c0e3d3aSJohn Baldwin ("digest outside buffer")); 1327c0341432SJohn Baldwin } else { 1328c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0, 1329c0341432SJohn Baldwin ("non-zero digest start for request without a digest")); 1330c0341432SJohn Baldwin } 1331c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0) 1332c0341432SJohn Baldwin KASSERT(csp->csp_cipher_key != NULL || 1333c0341432SJohn Baldwin crp->crp_cipher_key != NULL, 1334c0341432SJohn Baldwin ("cipher request without a key")); 1335c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 1336c0341432SJohn Baldwin KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1337c0341432SJohn Baldwin ("auth request without a key")); 1338c0341432SJohn Baldwin KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1339c0341432SJohn Baldwin } 1340c0341432SJohn Baldwin #endif 1341c0341432SJohn Baldwin 134268f6800cSMark Johnston static int 134368f6800cSMark Johnston crypto_dispatch_one(struct cryptop *crp, int hint) 1344091d81d1SSam Leffler { 13454acae0acSPawel Jakub Dawidek struct cryptocap *cap; 13464acae0acSPawel Jakub Dawidek int result; 1347091d81d1SSam Leffler 1348c0341432SJohn Baldwin #ifdef INVARIANTS 1349c0341432SJohn Baldwin crp_sanity(crp); 1350c0341432SJohn Baldwin #endif 13517290cb47SMark Johnston CRYPTOSTAT_INC(cs_ops); 13527d1853eeSSam Leffler 135398d788c8SMark Johnston crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1354de2b2c90SFabien Thomas 135568f6800cSMark Johnston /* 135668f6800cSMark Johnston * Caller marked the request to be processed immediately; dispatch it 135768f6800cSMark Johnston * directly to the driver unless the driver is currently blocked, in 135868f6800cSMark Johnston * which case it is queued for deferred dispatch. 135968f6800cSMark Johnston */ 136068f6800cSMark Johnston cap = crp->crp_session->cap; 136168f6800cSMark Johnston if (!atomic_load_int(&cap->cc_qblocked)) { 136268f6800cSMark Johnston result = crypto_invoke(cap, crp, hint); 136368f6800cSMark Johnston if (result != ERESTART) 136468f6800cSMark Johnston return (result); 136568f6800cSMark Johnston 136668f6800cSMark Johnston /* 136768f6800cSMark Johnston * The driver ran out of resources, put the request on the 136868f6800cSMark Johnston * queue. 136968f6800cSMark Johnston */ 137068f6800cSMark Johnston } 137168f6800cSMark Johnston crypto_batch_enqueue(crp); 137268f6800cSMark Johnston return (0); 137368f6800cSMark Johnston } 137468f6800cSMark Johnston 137568f6800cSMark Johnston int 137668f6800cSMark Johnston crypto_dispatch(struct cryptop *crp) 137768f6800cSMark Johnston { 137868f6800cSMark Johnston return (crypto_dispatch_one(crp, 0)); 137968f6800cSMark Johnston } 138068f6800cSMark Johnston 138168f6800cSMark Johnston int 138268f6800cSMark Johnston crypto_dispatch_async(struct cryptop *crp, int flags) 138368f6800cSMark Johnston { 138439bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 138539bbca6fSFabien Thomas 138668f6800cSMark Johnston if (!CRYPTO_SESS_SYNC(crp->crp_session)) { 138768f6800cSMark Johnston /* 138868f6800cSMark Johnston * The driver issues completions asynchonously, don't bother 138968f6800cSMark Johnston * deferring dispatch to a worker thread. 139068f6800cSMark Johnston */ 139168f6800cSMark Johnston return (crypto_dispatch(crp)); 139268f6800cSMark Johnston } 139339bbca6fSFabien Thomas 139468f6800cSMark Johnston #ifdef INVARIANTS 139568f6800cSMark Johnston crp_sanity(crp); 139668f6800cSMark Johnston #endif 139768f6800cSMark Johnston CRYPTOSTAT_INC(cs_ops); 139868f6800cSMark Johnston 139968f6800cSMark Johnston crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 140068f6800cSMark Johnston if ((flags & CRYPTO_ASYNC_ORDERED) != 0) { 140168f6800cSMark Johnston crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED; 140268f6800cSMark Johnston ret_worker = CRYPTO_RETW(crp->crp_retw_id); 140339bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 140439bbca6fSFabien Thomas crp->crp_seq = ret_worker->reorder_ops++; 140539bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 140639bbca6fSFabien Thomas } 140739bbca6fSFabien Thomas TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 140839bbca6fSFabien Thomas taskqueue_enqueue(crypto_tq, &crp->crp_task); 140939bbca6fSFabien Thomas return (0); 141039bbca6fSFabien Thomas } 14114acae0acSPawel Jakub Dawidek 141268f6800cSMark Johnston void 141368f6800cSMark Johnston crypto_dispatch_batch(struct cryptopq *crpq, int flags) 141468f6800cSMark Johnston { 141568f6800cSMark Johnston struct cryptop *crp; 141668f6800cSMark Johnston int hint; 141768f6800cSMark Johnston 141868f6800cSMark Johnston while ((crp = TAILQ_FIRST(crpq)) != NULL) { 141968f6800cSMark Johnston hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0; 142068f6800cSMark Johnston TAILQ_REMOVE(crpq, crp, crp_next); 142168f6800cSMark Johnston if (crypto_dispatch_one(crp, hint) != 0) 142239bbca6fSFabien Thomas crypto_batch_enqueue(crp); 142368f6800cSMark Johnston } 142439bbca6fSFabien Thomas } 142539bbca6fSFabien Thomas 142668f6800cSMark Johnston static void 142739bbca6fSFabien Thomas crypto_batch_enqueue(struct cryptop *crp) 142839bbca6fSFabien Thomas { 142939bbca6fSFabien Thomas 14304acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 14314acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 14323a865c82SPawel Jakub Dawidek if (crp_sleep) 14333a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 14343569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 1435091d81d1SSam Leffler } 1436091d81d1SSam Leffler 143739bbca6fSFabien Thomas static void 143839bbca6fSFabien Thomas crypto_task_invoke(void *ctx, int pending) 143939bbca6fSFabien Thomas { 144039bbca6fSFabien Thomas struct cryptocap *cap; 144139bbca6fSFabien Thomas struct cryptop *crp; 1442c0341432SJohn Baldwin int result; 144339bbca6fSFabien Thomas 144439bbca6fSFabien Thomas crp = (struct cryptop *)ctx; 1445c0341432SJohn Baldwin cap = crp->crp_session->cap; 144639bbca6fSFabien Thomas result = crypto_invoke(cap, crp, 0); 144739bbca6fSFabien Thomas if (result == ERESTART) 144839bbca6fSFabien Thomas crypto_batch_enqueue(crp); 144939bbca6fSFabien Thomas } 145039bbca6fSFabien Thomas 1451091d81d1SSam Leffler /* 1452091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 1453091d81d1SSam Leffler */ 1454091d81d1SSam Leffler static int 14554acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1456091d81d1SSam Leffler { 14574acae0acSPawel Jakub Dawidek 14584acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 14594acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 14604acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 1461c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, 1462c0341432SJohn Baldwin ("%s: crp->crp_session == NULL", __func__)); 1463091d81d1SSam Leffler 14644acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1465c0341432SJohn Baldwin struct crypto_session_params csp; 14661b0909d5SConrad Meyer crypto_session_t nses; 1467091d81d1SSam Leffler 1468091d81d1SSam Leffler /* 1469091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1470091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 14714acae0acSPawel Jakub Dawidek * 14724acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 14734acae0acSPawel Jakub Dawidek * session? 1474c0341432SJohn Baldwin * 1475c0341432SJohn Baldwin * XXX: Real solution is to make sessions refcounted 1476c0341432SJohn Baldwin * and force callers to hold a reference when 1477c0341432SJohn Baldwin * assigning to crp_session. Could maybe change 1478c0341432SJohn Baldwin * crypto_getreq to accept a session pointer to make 1479c0341432SJohn Baldwin * that work. Alternatively, we could abandon the 1480c0341432SJohn Baldwin * notion of rewriting crp_session in requests forcing 1481c0341432SJohn Baldwin * the caller to deal with allocating a new session. 1482c0341432SJohn Baldwin * Perhaps provide a method to allow a crp's session to 1483c0341432SJohn Baldwin * be swapped that callers could use. 1484091d81d1SSam Leffler */ 1485c0341432SJohn Baldwin csp = crp->crp_session->csp; 14861b0909d5SConrad Meyer crypto_freesession(crp->crp_session); 14874acae0acSPawel Jakub Dawidek 1488c0341432SJohn Baldwin /* 1489c0341432SJohn Baldwin * XXX: Key pointers may no longer be valid. If we 1490c0341432SJohn Baldwin * really want to support this we need to define the 1491c0341432SJohn Baldwin * KPI such that 'csp' is required to be valid for the 1492c0341432SJohn Baldwin * duration of a session by the caller perhaps. 1493c0341432SJohn Baldwin * 1494c0341432SJohn Baldwin * XXX: If the keys have been changed this will reuse 1495c0341432SJohn Baldwin * the old keys. This probably suggests making 1496c0341432SJohn Baldwin * rekeying more explicit and updating the key 1497c0341432SJohn Baldwin * pointers in 'csp' when the keys change. 1498c0341432SJohn Baldwin */ 1499c0341432SJohn Baldwin if (crypto_newsession(&nses, &csp, 15006810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 15011b0909d5SConrad Meyer crp->crp_session = nses; 1502091d81d1SSam Leffler 1503091d81d1SSam Leffler crp->crp_etype = EAGAIN; 15041a91ccccSSam Leffler crypto_done(crp); 1505091d81d1SSam Leffler return 0; 1506091d81d1SSam Leffler } else { 1507091d81d1SSam Leffler /* 1508091d81d1SSam Leffler * Invoke the driver to process the request. 1509091d81d1SSam Leffler */ 15106810ad6fSSam Leffler return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1511091d81d1SSam Leffler } 1512091d81d1SSam Leffler } 1513091d81d1SSam Leffler 1514091d81d1SSam Leffler void 1515946b8f6fSJohn Baldwin crypto_destroyreq(struct cryptop *crp) 1516091d81d1SSam Leffler { 15170d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 15180d5c337bSPawel Jakub Dawidek { 15190d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 152039bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 15210d5c337bSPawel Jakub Dawidek 15220d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 15230d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 15240d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 15250d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 15260d5c337bSPawel Jakub Dawidek crp)); 15270d5c337bSPawel Jakub Dawidek } 15280d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 152939bbca6fSFabien Thomas 153039bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 153139bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 153239bbca6fSFabien Thomas TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 15330d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 15340d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 15350d5c337bSPawel Jakub Dawidek crp)); 15360d5c337bSPawel Jakub Dawidek } 153739bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 153839bbca6fSFabien Thomas } 15390d5c337bSPawel Jakub Dawidek } 15400d5c337bSPawel Jakub Dawidek #endif 1541946b8f6fSJohn Baldwin } 15420d5c337bSPawel Jakub Dawidek 1543946b8f6fSJohn Baldwin void 1544946b8f6fSJohn Baldwin crypto_freereq(struct cryptop *crp) 1545946b8f6fSJohn Baldwin { 1546946b8f6fSJohn Baldwin if (crp == NULL) 1547946b8f6fSJohn Baldwin return; 1548946b8f6fSJohn Baldwin 1549946b8f6fSJohn Baldwin crypto_destroyreq(crp); 1550091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1551091d81d1SSam Leffler } 1552091d81d1SSam Leffler 1553946b8f6fSJohn Baldwin static void 1554946b8f6fSJohn Baldwin _crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1555946b8f6fSJohn Baldwin { 1556946b8f6fSJohn Baldwin crp->crp_session = cses; 1557946b8f6fSJohn Baldwin } 1558946b8f6fSJohn Baldwin 1559946b8f6fSJohn Baldwin void 1560946b8f6fSJohn Baldwin crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1561946b8f6fSJohn Baldwin { 1562946b8f6fSJohn Baldwin memset(crp, 0, sizeof(*crp)); 1563946b8f6fSJohn Baldwin _crypto_initreq(crp, cses); 1564946b8f6fSJohn Baldwin } 1565946b8f6fSJohn Baldwin 1566091d81d1SSam Leffler struct cryptop * 1567c0341432SJohn Baldwin crypto_getreq(crypto_session_t cses, int how) 1568091d81d1SSam Leffler { 1569091d81d1SSam Leffler struct cryptop *crp; 1570091d81d1SSam Leffler 1571c0341432SJohn Baldwin MPASS(how == M_WAITOK || how == M_NOWAIT); 1572c0341432SJohn Baldwin crp = uma_zalloc(cryptop_zone, how | M_ZERO); 1573946b8f6fSJohn Baldwin if (crp != NULL) 1574946b8f6fSJohn Baldwin _crypto_initreq(crp, cses); 1575c0341432SJohn Baldwin return (crp); 1576091d81d1SSam Leffler } 1577091d81d1SSam Leffler 1578091d81d1SSam Leffler /* 1579091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1580091d81d1SSam Leffler */ 1581091d81d1SSam Leffler void 1582091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1583091d81d1SSam Leffler { 15843569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 15853569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 15863569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 15877d1853eeSSam Leffler if (crp->crp_etype != 0) 15887290cb47SMark Johnston CRYPTOSTAT_INC(cs_errs); 1589a5c053f5SMark Johnston 1590d8409aafSSam Leffler /* 1591d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1592d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1593d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1594d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1595d8409aafSSam Leffler * used with the software crypto driver. 1596d8409aafSSam Leffler */ 159768f6800cSMark Johnston if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 && 159868f6800cSMark Johnston ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 || 159968f6800cSMark Johnston ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 && 160068f6800cSMark Johnston CRYPTO_SESS_SYNC(crp->crp_session)))) { 1601eb73a605SSam Leffler /* 1602eb73a605SSam Leffler * Do the callback directly. This is ok when the 1603eb73a605SSam Leffler * callback routine does very little (e.g. the 1604eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1605eb73a605SSam Leffler */ 1606eb73a605SSam Leffler crp->crp_callback(crp); 1607eb73a605SSam Leffler } else { 160839bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 160939bbca6fSFabien Thomas bool wake; 161039bbca6fSFabien Thomas 161139bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 161239bbca6fSFabien Thomas 1613eb73a605SSam Leffler /* 1614eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1615eb73a605SSam Leffler */ 161639bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 161768f6800cSMark Johnston if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) { 161839bbca6fSFabien Thomas struct cryptop *tmp; 161939bbca6fSFabien Thomas 162068f6800cSMark Johnston TAILQ_FOREACH_REVERSE(tmp, 162168f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, cryptop_q, 162268f6800cSMark Johnston crp_next) { 162339bbca6fSFabien Thomas if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 162468f6800cSMark Johnston TAILQ_INSERT_AFTER( 162568f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, tmp, 162668f6800cSMark Johnston crp, crp_next); 162739bbca6fSFabien Thomas break; 162839bbca6fSFabien Thomas } 162939bbca6fSFabien Thomas } 163039bbca6fSFabien Thomas if (tmp == NULL) { 163168f6800cSMark Johnston TAILQ_INSERT_HEAD( 163268f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, crp, 163368f6800cSMark Johnston crp_next); 163439bbca6fSFabien Thomas } 163539bbca6fSFabien Thomas 163668f6800cSMark Johnston wake = crp->crp_seq == ret_worker->reorder_cur_seq; 163768f6800cSMark Johnston } else { 163868f6800cSMark Johnston wake = TAILQ_EMPTY(&ret_worker->crp_ret_q); 163968f6800cSMark Johnston TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, 164068f6800cSMark Johnston crp_next); 164139bbca6fSFabien Thomas } 164239bbca6fSFabien Thomas 164339bbca6fSFabien Thomas if (wake) 164439bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 164539bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1646091d81d1SSam Leffler } 1647eb73a605SSam Leffler } 1648091d81d1SSam Leffler 1649091d81d1SSam Leffler /* 165051e45326SSam Leffler * Terminate a thread at module unload. The process that 165151e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 165251e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 165351e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 165451e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 165551e45326SSam Leffler * thread so it gets notified at the right time because of an 165651e45326SSam Leffler * extra wakeup that's done in exit1(). 165751e45326SSam Leffler */ 1658091d81d1SSam Leffler static void 165951e45326SSam Leffler crypto_finis(void *chan) 1660091d81d1SSam Leffler { 166151e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 166251e45326SSam Leffler wakeup_one(chan); 166351e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 16643745c395SJulian Elischer kproc_exit(0); 1665091d81d1SSam Leffler } 1666091d81d1SSam Leffler 1667091d81d1SSam Leffler /* 16681a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1669091d81d1SSam Leffler */ 1670091d81d1SSam Leffler static void 1671091d81d1SSam Leffler crypto_proc(void) 1672091d81d1SSam Leffler { 16731a91ccccSSam Leffler struct cryptop *crp, *submit; 1674091d81d1SSam Leffler struct cryptocap *cap; 1675091d81d1SSam Leffler int result, hint; 1676091d81d1SSam Leffler 16776ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 167804c49e68SKonstantin Belousov fpu_kern_thread(FPU_KERN_NORMAL); 167904c49e68SKonstantin Belousov #endif 168004c49e68SKonstantin Belousov 16811a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1682091d81d1SSam Leffler for (;;) { 1683091d81d1SSam Leffler /* 1684091d81d1SSam Leffler * Find the first element in the queue that can be 1685091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1686091d81d1SSam Leffler * are ready for the same driver. 1687091d81d1SSam Leffler */ 1688091d81d1SSam Leffler submit = NULL; 1689091d81d1SSam Leffler hint = 0; 1690091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 1691c0341432SJohn Baldwin cap = crp->crp_session->cap; 16924acae0acSPawel Jakub Dawidek /* 16934acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 16944acae0acSPawel Jakub Dawidek * session. 16954acae0acSPawel Jakub Dawidek */ 1696c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1697c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1698c0341432SJohn Baldwin if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1699091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1700091d81d1SSam Leffler if (submit == NULL) 1701091d81d1SSam Leffler submit = crp; 1702091d81d1SSam Leffler break; 1703091d81d1SSam Leffler } 1704091d81d1SSam Leffler if (!cap->cc_qblocked) { 1705091d81d1SSam Leffler if (submit != NULL) { 1706091d81d1SSam Leffler /* 1707091d81d1SSam Leffler * We stop on finding another op, 1708091d81d1SSam Leffler * regardless whether its for the same 1709091d81d1SSam Leffler * driver or not. We could keep 1710091d81d1SSam Leffler * searching the queue but it might be 1711091d81d1SSam Leffler * better to just use a per-driver 1712091d81d1SSam Leffler * queue instead. 1713091d81d1SSam Leffler */ 1714c0341432SJohn Baldwin if (submit->crp_session->cap == cap) 1715091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1716091d81d1SSam Leffler } else { 1717091d81d1SSam Leffler submit = crp; 1718091d81d1SSam Leffler } 171968f6800cSMark Johnston break; 1720091d81d1SSam Leffler } 1721091d81d1SSam Leffler } 1722091d81d1SSam Leffler if (submit != NULL) { 1723091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 1724c0341432SJohn Baldwin cap = submit->crp_session->cap; 1725c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1726c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1727c0341432SJohn Baldwin CRYPTO_Q_UNLOCK(); 17284acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1729c0341432SJohn Baldwin CRYPTO_Q_LOCK(); 1730091d81d1SSam Leffler if (result == ERESTART) { 1731091d81d1SSam Leffler /* 1732091d81d1SSam Leffler * The driver ran out of resources, mark the 1733091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1734091d81d1SSam Leffler * the request back in the queue. It would 1735091d81d1SSam Leffler * best to put the request back where we got 1736091d81d1SSam Leffler * it but that's hard so for now we put it 1737091d81d1SSam Leffler * at the front. This should be ok; putting 1738091d81d1SSam Leffler * it at the end does not work. 1739091d81d1SSam Leffler */ 1740c0341432SJohn Baldwin cap->cc_qblocked = 1; 1741091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 17427290cb47SMark Johnston CRYPTOSTAT_INC(cs_blocks); 1743091d81d1SSam Leffler } 174476681661SJohn Baldwin } else { 1745091d81d1SSam Leffler /* 1746091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 1747091d81d1SSam Leffler * woken because there are more ops to process. 1748091d81d1SSam Leffler * This happens either by submission or by a driver 1749091d81d1SSam Leffler * becoming unblocked and notifying us through 1750091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 1751091d81d1SSam Leffler * start processing each queue again from the 1752091d81d1SSam Leffler * front. It's not clear that it's important to 1753091d81d1SSam Leffler * preserve this ordering since ops may finish 1754091d81d1SSam Leffler * out of order if dispatched to different devices 1755091d81d1SSam Leffler * and some become blocked while others do not. 1756091d81d1SSam Leffler */ 17573a865c82SPawel Jakub Dawidek crp_sleep = 1; 17581a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 17593a865c82SPawel Jakub Dawidek crp_sleep = 0; 176051e45326SSam Leffler if (cryptoproc == NULL) 176151e45326SSam Leffler break; 17627290cb47SMark Johnston CRYPTOSTAT_INC(cs_intrs); 1763091d81d1SSam Leffler } 1764091d81d1SSam Leffler } 176551e45326SSam Leffler CRYPTO_Q_UNLOCK(); 17661a91ccccSSam Leffler 176751e45326SSam Leffler crypto_finis(&crp_q); 17681a91ccccSSam Leffler } 17691a91ccccSSam Leffler 17701a91ccccSSam Leffler /* 17711a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 17721a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 17731a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 17741a91ccccSSam Leffler */ 17751a91ccccSSam Leffler static void 177639bbca6fSFabien Thomas crypto_ret_proc(struct crypto_ret_worker *ret_worker) 17771a91ccccSSam Leffler { 17781a91ccccSSam Leffler struct cryptop *crpt; 17791a91ccccSSam Leffler 178039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 17811a91ccccSSam Leffler for (;;) { 17821a91ccccSSam Leffler /* Harvest return q's for completed ops */ 178339bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 178439bbca6fSFabien Thomas if (crpt != NULL) { 178539bbca6fSFabien Thomas if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 178639bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 178739bbca6fSFabien Thomas ret_worker->reorder_cur_seq++; 178839bbca6fSFabien Thomas } else { 178939bbca6fSFabien Thomas crpt = NULL; 179039bbca6fSFabien Thomas } 179139bbca6fSFabien Thomas } 17921a91ccccSSam Leffler 179339bbca6fSFabien Thomas if (crpt == NULL) { 179439bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 179539bbca6fSFabien Thomas if (crpt != NULL) 179639bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 179739bbca6fSFabien Thomas } 179839bbca6fSFabien Thomas 179976681661SJohn Baldwin if (crpt != NULL) { 180039bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 18011a91ccccSSam Leffler /* 18021a91ccccSSam Leffler * Run callbacks unlocked. 18031a91ccccSSam Leffler */ 1804a5c053f5SMark Johnston if (crpt != NULL) 18051a91ccccSSam Leffler crpt->crp_callback(crpt); 180639bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 18071a91ccccSSam Leffler } else { 18081a91ccccSSam Leffler /* 18091a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 18101a91ccccSSam Leffler * woken because there are more returns to process. 18111a91ccccSSam Leffler */ 181239bbca6fSFabien Thomas msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 18131a91ccccSSam Leffler "crypto_ret_wait", 0); 181439bbca6fSFabien Thomas if (ret_worker->cryptoretproc == NULL) 181551e45326SSam Leffler break; 18167290cb47SMark Johnston CRYPTOSTAT_INC(cs_rets); 18171a91ccccSSam Leffler } 18181a91ccccSSam Leffler } 181939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 182051e45326SSam Leffler 182139bbca6fSFabien Thomas crypto_finis(&ret_worker->crp_ret_q); 18221a91ccccSSam Leffler } 18236810ad6fSSam Leffler 18246810ad6fSSam Leffler #ifdef DDB 18256810ad6fSSam Leffler static void 18266810ad6fSSam Leffler db_show_drivers(void) 18276810ad6fSSam Leffler { 18286810ad6fSSam Leffler int hid; 18296810ad6fSSam Leffler 183076681661SJohn Baldwin db_printf("%12s %4s %8s %2s\n" 18316810ad6fSSam Leffler , "Device" 18326810ad6fSSam Leffler , "Ses" 18336810ad6fSSam Leffler , "Flags" 18346810ad6fSSam Leffler , "QB" 18356810ad6fSSam Leffler ); 1836c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 1837c0341432SJohn Baldwin const struct cryptocap *cap = crypto_drivers[hid]; 1838c0341432SJohn Baldwin if (cap == NULL) 18396810ad6fSSam Leffler continue; 184076681661SJohn Baldwin db_printf("%-12s %4u %08x %2u\n" 18416810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 18426810ad6fSSam Leffler , cap->cc_sessions 18436810ad6fSSam Leffler , cap->cc_flags 18446810ad6fSSam Leffler , cap->cc_qblocked 18456810ad6fSSam Leffler ); 18466810ad6fSSam Leffler } 18476810ad6fSSam Leffler } 18486810ad6fSSam Leffler 18496810ad6fSSam Leffler DB_SHOW_COMMAND(crypto, db_show_crypto) 18506810ad6fSSam Leffler { 18516810ad6fSSam Leffler struct cryptop *crp; 185239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 18536810ad6fSSam Leffler 18546810ad6fSSam Leffler db_show_drivers(); 18556810ad6fSSam Leffler db_printf("\n"); 18566810ad6fSSam Leffler 18576810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 18586810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 1859c0341432SJohn Baldwin "Device", "Callback"); 18606810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 18619c0e3d3aSJohn Baldwin db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 1862c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 18631b0909d5SConrad Meyer , (int) crypto_ses2caps(crp->crp_session) 18649c0e3d3aSJohn Baldwin , crp->crp_olen 18656810ad6fSSam Leffler , crp->crp_etype 18666810ad6fSSam Leffler , crp->crp_flags 1867c0341432SJohn Baldwin , device_get_nameunit(crp->crp_session->cap->cc_dev) 18686810ad6fSSam Leffler , crp->crp_callback 18696810ad6fSSam Leffler ); 18706810ad6fSSam Leffler } 187139bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 187239bbca6fSFabien Thomas db_printf("\n%8s %4s %4s %4s %8s\n", 187339bbca6fSFabien Thomas "ret_worker", "HID", "Etype", "Flags", "Callback"); 187439bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 187539bbca6fSFabien Thomas TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 187639bbca6fSFabien Thomas db_printf("%8td %4u %4u %04x %8p\n" 187739bbca6fSFabien Thomas , CRYPTO_RETW_ID(ret_worker) 1878c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 18796810ad6fSSam Leffler , crp->crp_etype 18806810ad6fSSam Leffler , crp->crp_flags 18816810ad6fSSam Leffler , crp->crp_callback 18826810ad6fSSam Leffler ); 18836810ad6fSSam Leffler } 18846810ad6fSSam Leffler } 18856810ad6fSSam Leffler } 188639bbca6fSFabien Thomas } 18876810ad6fSSam Leffler #endif 18886810ad6fSSam Leffler 18896810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 18906810ad6fSSam Leffler 18916810ad6fSSam Leffler /* 18926810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 18936810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 18946810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 18956810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 18966810ad6fSSam Leffler * calls on module load/unload. 18976810ad6fSSam Leffler */ 18986810ad6fSSam Leffler int 18996810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 19006810ad6fSSam Leffler { 19016810ad6fSSam Leffler int error = EINVAL; 19026810ad6fSSam Leffler 19036810ad6fSSam Leffler switch (type) { 19046810ad6fSSam Leffler case MOD_LOAD: 19056810ad6fSSam Leffler error = crypto_init(); 19066810ad6fSSam Leffler if (error == 0 && bootverbose) 19076810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 19086810ad6fSSam Leffler break; 19096810ad6fSSam Leffler case MOD_UNLOAD: 19106810ad6fSSam Leffler /*XXX disallow if active sessions */ 19116810ad6fSSam Leffler error = 0; 19126810ad6fSSam Leffler crypto_destroy(); 19136810ad6fSSam Leffler return 0; 19146810ad6fSSam Leffler } 19156810ad6fSSam Leffler return error; 19166810ad6fSSam Leffler } 19176810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 19186810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 1919