16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 36810ad6fSSam Leffler * 46810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 56810ad6fSSam Leffler * modification, are permitted provided that the following conditions 66810ad6fSSam Leffler * are met: 76810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 86810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 96810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 106810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 116810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 126810ad6fSSam Leffler * 136810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 146810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 156810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 166810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 176810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 186810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 196810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 206810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 216810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 226810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 236810ad6fSSam Leffler */ 246810ad6fSSam Leffler 256810ad6fSSam Leffler #include <sys/cdefs.h> 266810ad6fSSam Leffler __FBSDID("$FreeBSD$"); 276810ad6fSSam Leffler 286810ad6fSSam Leffler /* 296810ad6fSSam Leffler * Cryptographic Subsystem. 306810ad6fSSam Leffler * 316810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 326810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 336810ad6fSSam Leffler * code remains. 346810ad6fSSam Leffler */ 356810ad6fSSam Leffler 3660727d8bSWarner Losh /*- 37091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38091d81d1SSam Leffler * 39091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 40091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 41091d81d1SSam Leffler * supported the development of this code. 42091d81d1SSam Leffler * 43091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 46091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 47091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 48091d81d1SSam Leffler * modification of this software. 49091d81d1SSam Leffler * 50091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54091d81d1SSam Leffler * PURPOSE. 55091d81d1SSam Leffler */ 562c446514SDavid E. O'Brien 577d1853eeSSam Leffler #define CRYPTO_TIMING /* enable timing support */ 58091d81d1SSam Leffler 59c0341432SJohn Baldwin #include "opt_compat.h" 606810ad6fSSam Leffler #include "opt_ddb.h" 616810ad6fSSam Leffler 62091d81d1SSam Leffler #include <sys/param.h> 63091d81d1SSam Leffler #include <sys/systm.h> 64091d81d1SSam Leffler #include <sys/eventhandler.h> 65091d81d1SSam Leffler #include <sys/kernel.h> 66091d81d1SSam Leffler #include <sys/kthread.h> 67ec5c0e5bSAllan Jude #include <sys/linker.h> 68091d81d1SSam Leffler #include <sys/lock.h> 695dba30f1SPoul-Henning Kamp #include <sys/module.h> 70091d81d1SSam Leffler #include <sys/mutex.h> 71091d81d1SSam Leffler #include <sys/malloc.h> 72091d81d1SSam Leffler #include <sys/proc.h> 73c0341432SJohn Baldwin #include <sys/refcount.h> 74df21ad6eSBjoern A. Zeeb #include <sys/sdt.h> 7539bbca6fSFabien Thomas #include <sys/smp.h> 76091d81d1SSam Leffler #include <sys/sysctl.h> 7739bbca6fSFabien Thomas #include <sys/taskqueue.h> 78091d81d1SSam Leffler 796810ad6fSSam Leffler #include <ddb/ddb.h> 806810ad6fSSam Leffler 81091d81d1SSam Leffler #include <vm/uma.h> 82ec5c0e5bSAllan Jude #include <crypto/intake.h> 83091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 84c0341432SJohn Baldwin #include <opencrypto/xform_auth.h> 85c0341432SJohn Baldwin #include <opencrypto/xform_enc.h> 86091d81d1SSam Leffler 876810ad6fSSam Leffler #include <sys/kobj.h> 886810ad6fSSam Leffler #include <sys/bus.h> 896810ad6fSSam Leffler #include "cryptodev_if.h" 906810ad6fSSam Leffler 916ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 9204c49e68SKonstantin Belousov #include <machine/pcb.h> 9304c49e68SKonstantin Belousov #endif 9404c49e68SKonstantin Belousov 95df21ad6eSBjoern A. Zeeb SDT_PROVIDER_DEFINE(opencrypto); 96df21ad6eSBjoern A. Zeeb 97091d81d1SSam Leffler /* 98091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 99091d81d1SSam Leffler * crypto_drivers table with crypto_get_driverid() and then registering 100c0341432SJohn Baldwin * each asym algorithm they support with crypto_kregister(). 101091d81d1SSam Leffler */ 102091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 103091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 104091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 1056810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 1066810ad6fSSam Leffler 1076810ad6fSSam Leffler /* 1086810ad6fSSam Leffler * Crypto device/driver capabilities structure. 1096810ad6fSSam Leffler * 1106810ad6fSSam Leffler * Synchronization: 1116810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 1126810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 1136810ad6fSSam Leffler * Not tagged fields are read-only. 1146810ad6fSSam Leffler */ 1156810ad6fSSam Leffler struct cryptocap { 116c0341432SJohn Baldwin device_t cc_dev; 117c0341432SJohn Baldwin uint32_t cc_hid; 1186810ad6fSSam Leffler u_int32_t cc_sessions; /* (d) # of sessions */ 1196810ad6fSSam Leffler u_int32_t cc_koperations; /* (d) # os asym operations */ 1206810ad6fSSam Leffler u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 1216810ad6fSSam Leffler 1226810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1236810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1246810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1256810ad6fSSam Leffler int cc_kqblocked; /* (q) asymmetric q blocked */ 1261b0909d5SConrad Meyer size_t cc_session_size; 127c0341432SJohn Baldwin volatile int cc_refs; 1286810ad6fSSam Leffler }; 129c0341432SJohn Baldwin 130c0341432SJohn Baldwin static struct cryptocap **crypto_drivers = NULL; 131c0341432SJohn Baldwin static int crypto_drivers_size = 0; 132c0341432SJohn Baldwin 133c0341432SJohn Baldwin struct crypto_session { 134c0341432SJohn Baldwin struct cryptocap *cap; 135c0341432SJohn Baldwin void *softc; 136c0341432SJohn Baldwin struct crypto_session_params csp; 137c0341432SJohn Baldwin }; 138091d81d1SSam Leffler 139091d81d1SSam Leffler /* 140091d81d1SSam Leffler * There are two queues for crypto requests; one for symmetric (e.g. 141091d81d1SSam Leffler * cipher) operations and one for asymmetric (e.g. MOD)operations. 142091d81d1SSam Leffler * A single mutex is used to lock access to both queues. We could 143091d81d1SSam Leffler * have one per-queue but having one simplifies handling of block/unblock 144091d81d1SSam Leffler * operations. 145091d81d1SSam Leffler */ 1463a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 14739bbca6fSFabien Thomas static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 148091d81d1SSam Leffler static TAILQ_HEAD(,cryptkop) crp_kq; 149091d81d1SSam Leffler static struct mtx crypto_q_mtx; 150091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 151091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 152091d81d1SSam Leffler 153c0341432SJohn Baldwin static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 154c0341432SJohn Baldwin "In-kernel cryptography"); 155c0341432SJohn Baldwin 156091d81d1SSam Leffler /* 15739bbca6fSFabien Thomas * Taskqueue used to dispatch the crypto requests 15839bbca6fSFabien Thomas * that have the CRYPTO_F_ASYNC flag 159091d81d1SSam Leffler */ 16039bbca6fSFabien Thomas static struct taskqueue *crypto_tq; 16139bbca6fSFabien Thomas 16239bbca6fSFabien Thomas /* 16339bbca6fSFabien Thomas * Crypto seq numbers are operated on with modular arithmetic 16439bbca6fSFabien Thomas */ 16539bbca6fSFabien Thomas #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 16639bbca6fSFabien Thomas 16739bbca6fSFabien Thomas struct crypto_ret_worker { 16839bbca6fSFabien Thomas struct mtx crypto_ret_mtx; 16939bbca6fSFabien Thomas 17039bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 17139bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 17239bbca6fSFabien Thomas TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ 17339bbca6fSFabien Thomas 17439bbca6fSFabien Thomas u_int32_t reorder_ops; /* total ordered sym jobs received */ 17539bbca6fSFabien Thomas u_int32_t reorder_cur_seq; /* current sym job dispatched */ 17639bbca6fSFabien Thomas 17739bbca6fSFabien Thomas struct proc *cryptoretproc; 17839bbca6fSFabien Thomas }; 17939bbca6fSFabien Thomas static struct crypto_ret_worker *crypto_ret_workers = NULL; 18039bbca6fSFabien Thomas 18139bbca6fSFabien Thomas #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 18239bbca6fSFabien Thomas #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 18339bbca6fSFabien Thomas #define FOREACH_CRYPTO_RETW(w) \ 18439bbca6fSFabien Thomas for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 18539bbca6fSFabien Thomas 18639bbca6fSFabien Thomas #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 18739bbca6fSFabien Thomas #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 18839bbca6fSFabien Thomas #define CRYPTO_RETW_EMPTY(w) \ 18939bbca6fSFabien Thomas (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) 19039bbca6fSFabien Thomas 19139bbca6fSFabien Thomas static int crypto_workers_num = 0; 192c0341432SJohn Baldwin SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 193c0341432SJohn Baldwin &crypto_workers_num, 0, 194c0341432SJohn Baldwin "Number of crypto workers used to dispatch crypto jobs"); 195c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 19639bbca6fSFabien Thomas SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 19739bbca6fSFabien Thomas &crypto_workers_num, 0, 19839bbca6fSFabien Thomas "Number of crypto workers used to dispatch crypto jobs"); 199c0341432SJohn Baldwin #endif 200091d81d1SSam Leffler 201091d81d1SSam Leffler static uma_zone_t cryptop_zone; 2021b0909d5SConrad Meyer static uma_zone_t cryptoses_zone; 203091d81d1SSam Leffler 204c0341432SJohn Baldwin int crypto_userasymcrypto = 1; 205c0341432SJohn Baldwin SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, 206c0341432SJohn Baldwin &crypto_userasymcrypto, 0, 207c0341432SJohn Baldwin "Enable user-mode access to asymmetric crypto support"); 208c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 209091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 210091d81d1SSam Leffler &crypto_userasymcrypto, 0, 211091d81d1SSam Leffler "Enable/disable user-mode access to asymmetric crypto support"); 212c0341432SJohn Baldwin #endif 213c0341432SJohn Baldwin 214c0341432SJohn Baldwin int crypto_devallowsoft = 0; 215c0341432SJohn Baldwin SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, 216c0341432SJohn Baldwin &crypto_devallowsoft, 0, 217c0341432SJohn Baldwin "Enable use of software crypto by /dev/crypto"); 218c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 219091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 220091d81d1SSam Leffler &crypto_devallowsoft, 0, 2216c20d7a3SJohn-Mark Gurney "Enable/disable use of software crypto by /dev/crypto"); 222c0341432SJohn Baldwin #endif 223091d81d1SSam Leffler 224091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 225091d81d1SSam Leffler 22651e45326SSam Leffler static void crypto_proc(void); 22751e45326SSam Leffler static struct proc *cryptoproc; 22839bbca6fSFabien Thomas static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 22951e45326SSam Leffler static void crypto_destroy(void); 2304acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 231c0341432SJohn Baldwin static int crypto_kinvoke(struct cryptkop *krp); 23239bbca6fSFabien Thomas static void crypto_task_invoke(void *ctx, int pending); 23339bbca6fSFabien Thomas static void crypto_batch_enqueue(struct cryptop *crp); 23451e45326SSam Leffler 2357d1853eeSSam Leffler static struct cryptostats cryptostats; 236c0341432SJohn Baldwin SYSCTL_STRUCT(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, &cryptostats, 2377d1853eeSSam Leffler cryptostats, "Crypto system statistics"); 2387d1853eeSSam Leffler 2397d1853eeSSam Leffler #ifdef CRYPTO_TIMING 2407d1853eeSSam Leffler static int crypto_timing = 0; 2417d1853eeSSam Leffler SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, 2427d1853eeSSam Leffler &crypto_timing, 0, "Enable/disable crypto timing support"); 2437d1853eeSSam Leffler #endif 2447d1853eeSSam Leffler 245ec5c0e5bSAllan Jude /* Try to avoid directly exposing the key buffer as a symbol */ 246ec5c0e5bSAllan Jude static struct keybuf *keybuf; 247ec5c0e5bSAllan Jude 248ec5c0e5bSAllan Jude static struct keybuf empty_keybuf = { 249ec5c0e5bSAllan Jude .kb_nents = 0 250ec5c0e5bSAllan Jude }; 251ec5c0e5bSAllan Jude 252ec5c0e5bSAllan Jude /* Obtain the key buffer from boot metadata */ 253ec5c0e5bSAllan Jude static void 254ec5c0e5bSAllan Jude keybuf_init(void) 255ec5c0e5bSAllan Jude { 256ec5c0e5bSAllan Jude caddr_t kmdp; 257ec5c0e5bSAllan Jude 258ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf kernel"); 259ec5c0e5bSAllan Jude 260ec5c0e5bSAllan Jude if (kmdp == NULL) 261ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf64 kernel"); 262ec5c0e5bSAllan Jude 263ec5c0e5bSAllan Jude keybuf = (struct keybuf *)preload_search_info(kmdp, 264ec5c0e5bSAllan Jude MODINFO_METADATA | MODINFOMD_KEYBUF); 265ec5c0e5bSAllan Jude 266ec5c0e5bSAllan Jude if (keybuf == NULL) 267ec5c0e5bSAllan Jude keybuf = &empty_keybuf; 268ec5c0e5bSAllan Jude } 269ec5c0e5bSAllan Jude 270ec5c0e5bSAllan Jude /* It'd be nice if we could store these in some kind of secure memory... */ 271ec5c0e5bSAllan Jude struct keybuf * get_keybuf(void) { 272ec5c0e5bSAllan Jude 273ec5c0e5bSAllan Jude return (keybuf); 274ec5c0e5bSAllan Jude } 275ec5c0e5bSAllan Jude 276c0341432SJohn Baldwin static struct cryptocap * 277c0341432SJohn Baldwin cap_ref(struct cryptocap *cap) 278c0341432SJohn Baldwin { 279c0341432SJohn Baldwin 280c0341432SJohn Baldwin refcount_acquire(&cap->cc_refs); 281c0341432SJohn Baldwin return (cap); 282c0341432SJohn Baldwin } 283c0341432SJohn Baldwin 284c0341432SJohn Baldwin static void 285c0341432SJohn Baldwin cap_rele(struct cryptocap *cap) 286c0341432SJohn Baldwin { 287c0341432SJohn Baldwin 288c0341432SJohn Baldwin if (refcount_release(&cap->cc_refs) == 0) 289c0341432SJohn Baldwin return; 290c0341432SJohn Baldwin 291c0341432SJohn Baldwin KASSERT(cap->cc_sessions == 0, 292c0341432SJohn Baldwin ("freeing crypto driver with active sessions")); 293c0341432SJohn Baldwin KASSERT(cap->cc_koperations == 0, 294c0341432SJohn Baldwin ("freeing crypto driver with active key operations")); 295c0341432SJohn Baldwin 296c0341432SJohn Baldwin free(cap, M_CRYPTO_DATA); 297c0341432SJohn Baldwin } 298c0341432SJohn Baldwin 29951e45326SSam Leffler static int 300091d81d1SSam Leffler crypto_init(void) 301091d81d1SSam Leffler { 30239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 30351e45326SSam Leffler int error; 304091d81d1SSam Leffler 3053569ae7fSSam Leffler mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 3063569ae7fSSam Leffler MTX_DEF|MTX_QUIET); 307091d81d1SSam Leffler 308091d81d1SSam Leffler TAILQ_INIT(&crp_q); 309091d81d1SSam Leffler TAILQ_INIT(&crp_kq); 3103569ae7fSSam Leffler mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 311091d81d1SSam Leffler 31251e45326SSam Leffler cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 31351e45326SSam Leffler 0, 0, 0, 0, 31451e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 3151b0909d5SConrad Meyer cryptoses_zone = uma_zcreate("crypto_session", 3161b0909d5SConrad Meyer sizeof(struct crypto_session), NULL, NULL, NULL, NULL, 3171b0909d5SConrad Meyer UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 3181b0909d5SConrad Meyer 319c0341432SJohn Baldwin if (cryptop_zone == NULL || cryptoses_zone == NULL) { 32051e45326SSam Leffler printf("crypto_init: cannot setup crypto zones\n"); 32151e45326SSam Leffler error = ENOMEM; 32251e45326SSam Leffler goto bad; 32351e45326SSam Leffler } 32451e45326SSam Leffler 325c0341432SJohn Baldwin crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 326c0341432SJohn Baldwin crypto_drivers = malloc(crypto_drivers_size * 32751e45326SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 32851e45326SSam Leffler if (crypto_drivers == NULL) { 32951e45326SSam Leffler printf("crypto_init: cannot setup crypto drivers\n"); 33051e45326SSam Leffler error = ENOMEM; 33151e45326SSam Leffler goto bad; 33251e45326SSam Leffler } 33351e45326SSam Leffler 33439bbca6fSFabien Thomas if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 33539bbca6fSFabien Thomas crypto_workers_num = mp_ncpus; 33639bbca6fSFabien Thomas 33739bbca6fSFabien Thomas crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO, 33839bbca6fSFabien Thomas taskqueue_thread_enqueue, &crypto_tq); 33939bbca6fSFabien Thomas if (crypto_tq == NULL) { 34039bbca6fSFabien Thomas printf("crypto init: cannot setup crypto taskqueue\n"); 34139bbca6fSFabien Thomas error = ENOMEM; 34239bbca6fSFabien Thomas goto bad; 34339bbca6fSFabien Thomas } 34439bbca6fSFabien Thomas 34539bbca6fSFabien Thomas taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 34639bbca6fSFabien Thomas "crypto"); 34739bbca6fSFabien Thomas 3483745c395SJulian Elischer error = kproc_create((void (*)(void *)) crypto_proc, NULL, 34951e45326SSam Leffler &cryptoproc, 0, 0, "crypto"); 35051e45326SSam Leffler if (error) { 35151e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 35251e45326SSam Leffler error); 35351e45326SSam Leffler goto bad; 35451e45326SSam Leffler } 35551e45326SSam Leffler 35639bbca6fSFabien Thomas crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), 35739bbca6fSFabien Thomas M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 35839bbca6fSFabien Thomas if (crypto_ret_workers == NULL) { 35939bbca6fSFabien Thomas error = ENOMEM; 36039bbca6fSFabien Thomas printf("crypto_init: cannot allocate ret workers\n"); 36139bbca6fSFabien Thomas goto bad; 36239bbca6fSFabien Thomas } 36339bbca6fSFabien Thomas 36439bbca6fSFabien Thomas 36539bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 36639bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 36739bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_q); 36839bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_kq); 36939bbca6fSFabien Thomas 37039bbca6fSFabien Thomas ret_worker->reorder_ops = 0; 37139bbca6fSFabien Thomas ret_worker->reorder_cur_seq = 0; 37239bbca6fSFabien Thomas 37339bbca6fSFabien Thomas mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 37439bbca6fSFabien Thomas 37539bbca6fSFabien Thomas error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 37639bbca6fSFabien Thomas &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 37751e45326SSam Leffler if (error) { 37851e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 37951e45326SSam Leffler error); 38051e45326SSam Leffler goto bad; 38151e45326SSam Leffler } 38239bbca6fSFabien Thomas } 383ec5c0e5bSAllan Jude 384ec5c0e5bSAllan Jude keybuf_init(); 385ec5c0e5bSAllan Jude 38651e45326SSam Leffler return 0; 38751e45326SSam Leffler bad: 38851e45326SSam Leffler crypto_destroy(); 38951e45326SSam Leffler return error; 39051e45326SSam Leffler } 39151e45326SSam Leffler 39251e45326SSam Leffler /* 39351e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 39451e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 39551e45326SSam Leffler * are sure the threads have terminated before we release 39651e45326SSam Leffler * the data structures they use. See crypto_finis below 39751e45326SSam Leffler * for the other half of this song-and-dance. 39851e45326SSam Leffler */ 39951e45326SSam Leffler static void 40051e45326SSam Leffler crypto_terminate(struct proc **pp, void *q) 40151e45326SSam Leffler { 40251e45326SSam Leffler struct proc *p; 40351e45326SSam Leffler 40451e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 40551e45326SSam Leffler p = *pp; 40651e45326SSam Leffler *pp = NULL; 40751e45326SSam Leffler if (p) { 40851e45326SSam Leffler wakeup_one(q); 40951e45326SSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 41051e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 41151e45326SSam Leffler msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 41251e45326SSam Leffler PROC_UNLOCK(p); 41351e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 41451e45326SSam Leffler } 41551e45326SSam Leffler } 41651e45326SSam Leffler 41751e45326SSam Leffler static void 418c0341432SJohn Baldwin hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx, 419c0341432SJohn Baldwin uint8_t padval) 420c0341432SJohn Baldwin { 421c0341432SJohn Baldwin uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 422c0341432SJohn Baldwin u_int i; 423c0341432SJohn Baldwin 424c0341432SJohn Baldwin KASSERT(axf->blocksize <= sizeof(hmac_key), 425c0341432SJohn Baldwin ("Invalid HMAC block size %d", axf->blocksize)); 426c0341432SJohn Baldwin 427c0341432SJohn Baldwin /* 428c0341432SJohn Baldwin * If the key is larger than the block size, use the digest of 429c0341432SJohn Baldwin * the key as the key instead. 430c0341432SJohn Baldwin */ 431c0341432SJohn Baldwin memset(hmac_key, 0, sizeof(hmac_key)); 432c0341432SJohn Baldwin if (klen > axf->blocksize) { 433c0341432SJohn Baldwin axf->Init(auth_ctx); 434c0341432SJohn Baldwin axf->Update(auth_ctx, key, klen); 435c0341432SJohn Baldwin axf->Final(hmac_key, auth_ctx); 436c0341432SJohn Baldwin klen = axf->hashsize; 437c0341432SJohn Baldwin } else 438c0341432SJohn Baldwin memcpy(hmac_key, key, klen); 439c0341432SJohn Baldwin 440c0341432SJohn Baldwin for (i = 0; i < axf->blocksize; i++) 441c0341432SJohn Baldwin hmac_key[i] ^= padval; 442c0341432SJohn Baldwin 443c0341432SJohn Baldwin axf->Init(auth_ctx); 444c0341432SJohn Baldwin axf->Update(auth_ctx, hmac_key, axf->blocksize); 445c0341432SJohn Baldwin } 446c0341432SJohn Baldwin 447c0341432SJohn Baldwin void 448c0341432SJohn Baldwin hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, 449c0341432SJohn Baldwin void *auth_ctx) 450c0341432SJohn Baldwin { 451c0341432SJohn Baldwin 452c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 453c0341432SJohn Baldwin } 454c0341432SJohn Baldwin 455c0341432SJohn Baldwin void 456c0341432SJohn Baldwin hmac_init_opad(struct auth_hash *axf, const char *key, int klen, 457c0341432SJohn Baldwin void *auth_ctx) 458c0341432SJohn Baldwin { 459c0341432SJohn Baldwin 460c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 461c0341432SJohn Baldwin } 462c0341432SJohn Baldwin 463c0341432SJohn Baldwin static void 46451e45326SSam Leffler crypto_destroy(void) 46551e45326SSam Leffler { 46639bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 467c0341432SJohn Baldwin int i; 46839bbca6fSFabien Thomas 46951e45326SSam Leffler /* 47051e45326SSam Leffler * Terminate any crypto threads. 47151e45326SSam Leffler */ 47239bbca6fSFabien Thomas if (crypto_tq != NULL) 47339bbca6fSFabien Thomas taskqueue_drain_all(crypto_tq); 47451e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 47551e45326SSam Leffler crypto_terminate(&cryptoproc, &crp_q); 47639bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 47739bbca6fSFabien Thomas crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 47851e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 47951e45326SSam Leffler 48051e45326SSam Leffler /* XXX flush queues??? */ 48151e45326SSam Leffler 48251e45326SSam Leffler /* 48351e45326SSam Leffler * Reclaim dynamically allocated resources. 48451e45326SSam Leffler */ 485c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 486c0341432SJohn Baldwin if (crypto_drivers[i] != NULL) 487c0341432SJohn Baldwin cap_rele(crypto_drivers[i]); 488c0341432SJohn Baldwin } 48951e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 49051e45326SSam Leffler 4911b0909d5SConrad Meyer if (cryptoses_zone != NULL) 4921b0909d5SConrad Meyer uma_zdestroy(cryptoses_zone); 49351e45326SSam Leffler if (cryptop_zone != NULL) 49451e45326SSam Leffler uma_zdestroy(cryptop_zone); 49551e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 49639bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 49739bbca6fSFabien Thomas mtx_destroy(&ret_worker->crypto_ret_mtx); 49839bbca6fSFabien Thomas free(crypto_ret_workers, M_CRYPTO_DATA); 49939bbca6fSFabien Thomas if (crypto_tq != NULL) 50039bbca6fSFabien Thomas taskqueue_free(crypto_tq); 50151e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 502091d81d1SSam Leffler } 503f544a528SMark Murray 5041b0909d5SConrad Meyer uint32_t 5051b0909d5SConrad Meyer crypto_ses2hid(crypto_session_t crypto_session) 5061b0909d5SConrad Meyer { 507c0341432SJohn Baldwin return (crypto_session->cap->cc_hid); 5081b0909d5SConrad Meyer } 5091b0909d5SConrad Meyer 5101b0909d5SConrad Meyer uint32_t 5111b0909d5SConrad Meyer crypto_ses2caps(crypto_session_t crypto_session) 5121b0909d5SConrad Meyer { 513c0341432SJohn Baldwin return (crypto_session->cap->cc_flags & 0xff000000); 5141b0909d5SConrad Meyer } 5151b0909d5SConrad Meyer 5161b0909d5SConrad Meyer void * 5171b0909d5SConrad Meyer crypto_get_driver_session(crypto_session_t crypto_session) 5181b0909d5SConrad Meyer { 5191b0909d5SConrad Meyer return (crypto_session->softc); 5201b0909d5SConrad Meyer } 5211b0909d5SConrad Meyer 522c0341432SJohn Baldwin const struct crypto_session_params * 523c0341432SJohn Baldwin crypto_get_params(crypto_session_t crypto_session) 524c0341432SJohn Baldwin { 525c0341432SJohn Baldwin return (&crypto_session->csp); 526c0341432SJohn Baldwin } 527c0341432SJohn Baldwin 528c0341432SJohn Baldwin struct auth_hash * 529c0341432SJohn Baldwin crypto_auth_hash(const struct crypto_session_params *csp) 530c0341432SJohn Baldwin { 531c0341432SJohn Baldwin 532c0341432SJohn Baldwin switch (csp->csp_auth_alg) { 533c0341432SJohn Baldwin case CRYPTO_MD5_HMAC: 534c0341432SJohn Baldwin return (&auth_hash_hmac_md5); 535c0341432SJohn Baldwin case CRYPTO_SHA1_HMAC: 536c0341432SJohn Baldwin return (&auth_hash_hmac_sha1); 537c0341432SJohn Baldwin case CRYPTO_SHA2_224_HMAC: 538c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_224); 539c0341432SJohn Baldwin case CRYPTO_SHA2_256_HMAC: 540c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_256); 541c0341432SJohn Baldwin case CRYPTO_SHA2_384_HMAC: 542c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_384); 543c0341432SJohn Baldwin case CRYPTO_SHA2_512_HMAC: 544c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_512); 545c0341432SJohn Baldwin case CRYPTO_NULL_HMAC: 546c0341432SJohn Baldwin return (&auth_hash_null); 547c0341432SJohn Baldwin case CRYPTO_RIPEMD160_HMAC: 548c0341432SJohn Baldwin return (&auth_hash_hmac_ripemd_160); 549c0341432SJohn Baldwin case CRYPTO_MD5_KPDK: 550c0341432SJohn Baldwin return (&auth_hash_key_md5); 551c0341432SJohn Baldwin case CRYPTO_SHA1_KPDK: 552c0341432SJohn Baldwin return (&auth_hash_key_sha1); 553c0341432SJohn Baldwin #ifdef notyet 554c0341432SJohn Baldwin case CRYPTO_MD5: 555c0341432SJohn Baldwin return (&auth_hash_md5); 556c0341432SJohn Baldwin #endif 557c0341432SJohn Baldwin case CRYPTO_SHA1: 558c0341432SJohn Baldwin return (&auth_hash_sha1); 559c0341432SJohn Baldwin case CRYPTO_SHA2_224: 560c0341432SJohn Baldwin return (&auth_hash_sha2_224); 561c0341432SJohn Baldwin case CRYPTO_SHA2_256: 562c0341432SJohn Baldwin return (&auth_hash_sha2_256); 563c0341432SJohn Baldwin case CRYPTO_SHA2_384: 564c0341432SJohn Baldwin return (&auth_hash_sha2_384); 565c0341432SJohn Baldwin case CRYPTO_SHA2_512: 566c0341432SJohn Baldwin return (&auth_hash_sha2_512); 567c0341432SJohn Baldwin case CRYPTO_AES_NIST_GMAC: 568c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 569c0341432SJohn Baldwin case 128 / 8: 570c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_128); 571c0341432SJohn Baldwin case 192 / 8: 572c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_192); 573c0341432SJohn Baldwin case 256 / 8: 574c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_256); 575c0341432SJohn Baldwin default: 576c0341432SJohn Baldwin return (NULL); 577c0341432SJohn Baldwin } 578c0341432SJohn Baldwin case CRYPTO_BLAKE2B: 579c0341432SJohn Baldwin return (&auth_hash_blake2b); 580c0341432SJohn Baldwin case CRYPTO_BLAKE2S: 581c0341432SJohn Baldwin return (&auth_hash_blake2s); 582c0341432SJohn Baldwin case CRYPTO_POLY1305: 583c0341432SJohn Baldwin return (&auth_hash_poly1305); 584c0341432SJohn Baldwin case CRYPTO_AES_CCM_CBC_MAC: 585c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 586c0341432SJohn Baldwin case 128 / 8: 587c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_128); 588c0341432SJohn Baldwin case 192 / 8: 589c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_192); 590c0341432SJohn Baldwin case 256 / 8: 591c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_256); 592c0341432SJohn Baldwin default: 593c0341432SJohn Baldwin return (NULL); 594c0341432SJohn Baldwin } 595c0341432SJohn Baldwin default: 596c0341432SJohn Baldwin return (NULL); 597c0341432SJohn Baldwin } 598c0341432SJohn Baldwin } 599c0341432SJohn Baldwin 600c0341432SJohn Baldwin struct enc_xform * 601c0341432SJohn Baldwin crypto_cipher(const struct crypto_session_params *csp) 602c0341432SJohn Baldwin { 603c0341432SJohn Baldwin 604c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 605c0341432SJohn Baldwin case CRYPTO_DES_CBC: 606c0341432SJohn Baldwin return (&enc_xform_des); 607c0341432SJohn Baldwin case CRYPTO_3DES_CBC: 608c0341432SJohn Baldwin return (&enc_xform_3des); 609c0341432SJohn Baldwin case CRYPTO_BLF_CBC: 610c0341432SJohn Baldwin return (&enc_xform_blf); 611c0341432SJohn Baldwin case CRYPTO_CAST_CBC: 612c0341432SJohn Baldwin return (&enc_xform_cast5); 613c0341432SJohn Baldwin case CRYPTO_SKIPJACK_CBC: 614c0341432SJohn Baldwin return (&enc_xform_skipjack); 615c0341432SJohn Baldwin case CRYPTO_RIJNDAEL128_CBC: 616c0341432SJohn Baldwin return (&enc_xform_rijndael128); 617c0341432SJohn Baldwin case CRYPTO_AES_XTS: 618c0341432SJohn Baldwin return (&enc_xform_aes_xts); 619c0341432SJohn Baldwin case CRYPTO_AES_ICM: 620c0341432SJohn Baldwin return (&enc_xform_aes_icm); 621c0341432SJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 622c0341432SJohn Baldwin return (&enc_xform_aes_nist_gcm); 623c0341432SJohn Baldwin case CRYPTO_CAMELLIA_CBC: 624c0341432SJohn Baldwin return (&enc_xform_camellia); 625c0341432SJohn Baldwin case CRYPTO_NULL_CBC: 626c0341432SJohn Baldwin return (&enc_xform_null); 627c0341432SJohn Baldwin case CRYPTO_CHACHA20: 628c0341432SJohn Baldwin return (&enc_xform_chacha20); 629c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 630c0341432SJohn Baldwin return (&enc_xform_ccm); 631c0341432SJohn Baldwin default: 632c0341432SJohn Baldwin return (NULL); 633c0341432SJohn Baldwin } 634c0341432SJohn Baldwin } 635c0341432SJohn Baldwin 6366810ad6fSSam Leffler static struct cryptocap * 6376810ad6fSSam Leffler crypto_checkdriver(u_int32_t hid) 6386810ad6fSSam Leffler { 6396810ad6fSSam Leffler 640c0341432SJohn Baldwin return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 641f544a528SMark Murray } 642f544a528SMark Murray 643091d81d1SSam Leffler /* 6446810ad6fSSam Leffler * Select a driver for a new session that supports the specified 6456810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 646091d81d1SSam Leffler */ 6476810ad6fSSam Leffler static struct cryptocap * 648c0341432SJohn Baldwin crypto_select_driver(const struct crypto_session_params *csp, int flags) 6496810ad6fSSam Leffler { 6506810ad6fSSam Leffler struct cryptocap *cap, *best; 651c0341432SJohn Baldwin int best_match, error, hid; 6526810ad6fSSam Leffler 6536810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 654091d81d1SSam Leffler 6556810ad6fSSam Leffler best = NULL; 656c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 657091d81d1SSam Leffler /* 658c0341432SJohn Baldwin * If there is no driver for this slot, or the driver 659c0341432SJohn Baldwin * is not appropriate (hardware or software based on 660c0341432SJohn Baldwin * match), then skip. 661091d81d1SSam Leffler */ 662c0341432SJohn Baldwin cap = crypto_drivers[hid]; 663c0341432SJohn Baldwin if (cap == NULL || 664c0341432SJohn Baldwin (cap->cc_flags & flags) == 0) 665091d81d1SSam Leffler continue; 666091d81d1SSam Leffler 667c0341432SJohn Baldwin error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 668c0341432SJohn Baldwin if (error >= 0) 669c0341432SJohn Baldwin continue; 670c0341432SJohn Baldwin 671c0341432SJohn Baldwin /* 672c0341432SJohn Baldwin * Use the driver with the highest probe value. 673c0341432SJohn Baldwin * Hardware drivers use a higher probe value than 674c0341432SJohn Baldwin * software. In case of a tie, prefer the driver with 675c0341432SJohn Baldwin * the fewest active sessions. 676c0341432SJohn Baldwin */ 677c0341432SJohn Baldwin if (best == NULL || error > best_match || 678c0341432SJohn Baldwin (error == best_match && 679c0341432SJohn Baldwin cap->cc_sessions < best->cc_sessions)) { 6806810ad6fSSam Leffler best = cap; 681c0341432SJohn Baldwin best_match = error; 6826810ad6fSSam Leffler } 6836810ad6fSSam Leffler } 6846810ad6fSSam Leffler return best; 6856810ad6fSSam Leffler } 686091d81d1SSam Leffler 687c0341432SJohn Baldwin static bool 688c0341432SJohn Baldwin alg_is_compression(int alg) 689c0341432SJohn Baldwin { 690c0341432SJohn Baldwin 691c0341432SJohn Baldwin if (alg == CRYPTO_DEFLATE_COMP) 692c0341432SJohn Baldwin return (true); 693c0341432SJohn Baldwin return (false); 694c0341432SJohn Baldwin } 695c0341432SJohn Baldwin 696c0341432SJohn Baldwin static bool 697c0341432SJohn Baldwin alg_is_cipher(int alg) 698c0341432SJohn Baldwin { 699c0341432SJohn Baldwin 700c0341432SJohn Baldwin if (alg >= CRYPTO_DES_CBC && alg <= CRYPTO_SKIPJACK_CBC) 701c0341432SJohn Baldwin return (true); 702c0341432SJohn Baldwin if (alg >= CRYPTO_AES_CBC && alg <= CRYPTO_ARC4) 703c0341432SJohn Baldwin return (true); 704c0341432SJohn Baldwin if (alg == CRYPTO_NULL_CBC) 705c0341432SJohn Baldwin return (true); 706c0341432SJohn Baldwin if (alg >= CRYPTO_CAMELLIA_CBC && alg <= CRYPTO_AES_ICM) 707c0341432SJohn Baldwin return (true); 708c0341432SJohn Baldwin if (alg == CRYPTO_CHACHA20) 709c0341432SJohn Baldwin return (true); 710c0341432SJohn Baldwin return (false); 711c0341432SJohn Baldwin } 712c0341432SJohn Baldwin 713c0341432SJohn Baldwin static bool 714c0341432SJohn Baldwin alg_is_digest(int alg) 715c0341432SJohn Baldwin { 716c0341432SJohn Baldwin 717c0341432SJohn Baldwin if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK) 718c0341432SJohn Baldwin return (true); 719c0341432SJohn Baldwin if (alg >= CRYPTO_MD5 && alg <= CRYPTO_SHA1) 720c0341432SJohn Baldwin return (true); 721c0341432SJohn Baldwin if (alg == CRYPTO_NULL_HMAC) 722c0341432SJohn Baldwin return (true); 723c0341432SJohn Baldwin if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC) 724c0341432SJohn Baldwin return (true); 725c0341432SJohn Baldwin if (alg == CRYPTO_AES_NIST_GMAC) 726c0341432SJohn Baldwin return (true); 727c0341432SJohn Baldwin if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S) 728c0341432SJohn Baldwin return (true); 729c0341432SJohn Baldwin if (alg >= CRYPTO_SHA2_224_HMAC && alg <= CRYPTO_POLY1305) 730c0341432SJohn Baldwin return (true); 731c0341432SJohn Baldwin if (alg == CRYPTO_AES_CCM_CBC_MAC) 732c0341432SJohn Baldwin return (true); 733c0341432SJohn Baldwin return (false); 734c0341432SJohn Baldwin } 735c0341432SJohn Baldwin 736c0341432SJohn Baldwin static bool 737c0341432SJohn Baldwin alg_is_keyed_digest(int alg) 738c0341432SJohn Baldwin { 739c0341432SJohn Baldwin 740c0341432SJohn Baldwin if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK) 741c0341432SJohn Baldwin return (true); 742c0341432SJohn Baldwin if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC) 743c0341432SJohn Baldwin return (true); 744c0341432SJohn Baldwin if (alg == CRYPTO_AES_NIST_GMAC) 745c0341432SJohn Baldwin return (true); 746c0341432SJohn Baldwin if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S) 747c0341432SJohn Baldwin return (true); 748c0341432SJohn Baldwin if (alg == CRYPTO_SHA2_224_HMAC) 749c0341432SJohn Baldwin return (true); 750c0341432SJohn Baldwin if (alg == CRYPTO_POLY1305) 751c0341432SJohn Baldwin return (true); 752c0341432SJohn Baldwin if (alg == CRYPTO_AES_CCM_CBC_MAC) 753c0341432SJohn Baldwin return (true); 754c0341432SJohn Baldwin return (false); 755c0341432SJohn Baldwin } 756c0341432SJohn Baldwin 757c0341432SJohn Baldwin static bool 758c0341432SJohn Baldwin alg_is_aead(int alg) 759c0341432SJohn Baldwin { 760c0341432SJohn Baldwin 761c0341432SJohn Baldwin if (alg == CRYPTO_AES_NIST_GCM_16) 762c0341432SJohn Baldwin return (true); 763c0341432SJohn Baldwin if (alg == CRYPTO_AES_CCM_16) 764c0341432SJohn Baldwin return (true); 765c0341432SJohn Baldwin return (false); 766c0341432SJohn Baldwin } 767c0341432SJohn Baldwin 768c0341432SJohn Baldwin /* Various sanity checks on crypto session parameters. */ 769c0341432SJohn Baldwin static bool 770c0341432SJohn Baldwin check_csp(const struct crypto_session_params *csp) 771c0341432SJohn Baldwin { 772c0341432SJohn Baldwin struct auth_hash *axf; 773c0341432SJohn Baldwin 774c0341432SJohn Baldwin /* Mode-independent checks. */ 775c0341432SJohn Baldwin if (csp->csp_flags != 0) 776c0341432SJohn Baldwin return (false); 777c0341432SJohn Baldwin if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 778c0341432SJohn Baldwin csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 779c0341432SJohn Baldwin return (false); 780c0341432SJohn Baldwin if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 781c0341432SJohn Baldwin return (false); 782c0341432SJohn Baldwin if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 783c0341432SJohn Baldwin return (false); 784c0341432SJohn Baldwin 785c0341432SJohn Baldwin switch (csp->csp_mode) { 786c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 787c0341432SJohn Baldwin if (!alg_is_compression(csp->csp_cipher_alg)) 788c0341432SJohn Baldwin return (false); 789c0341432SJohn Baldwin if (csp->csp_flags != 0) 790c0341432SJohn Baldwin return (false); 791c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 792c0341432SJohn Baldwin csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 793c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 794c0341432SJohn Baldwin return (false); 795c0341432SJohn Baldwin break; 796c0341432SJohn Baldwin case CSP_MODE_CIPHER: 797c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 798c0341432SJohn Baldwin return (false); 799c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 800c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 801c0341432SJohn Baldwin return (false); 802c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_ARC4) { 803c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 804c0341432SJohn Baldwin return (false); 805c0341432SJohn Baldwin } 806c0341432SJohn Baldwin } 807c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 808c0341432SJohn Baldwin return (false); 809c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 810c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 811c0341432SJohn Baldwin return (false); 812c0341432SJohn Baldwin break; 813c0341432SJohn Baldwin case CSP_MODE_DIGEST: 814c0341432SJohn Baldwin if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 815c0341432SJohn Baldwin return (false); 816c0341432SJohn Baldwin 817c0341432SJohn Baldwin /* IV is optional for digests (e.g. GMAC). */ 818c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 819c0341432SJohn Baldwin return (false); 820c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 821c0341432SJohn Baldwin return (false); 822c0341432SJohn Baldwin 823c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 824c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 825c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 826c0341432SJohn Baldwin ; 827c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 828c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 829c0341432SJohn Baldwin return (false); 830c0341432SJohn Baldwin } else { 831c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 832c0341432SJohn Baldwin return (false); 833c0341432SJohn Baldwin } 834c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 835c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 836c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 837c0341432SJohn Baldwin return (false); 838c0341432SJohn Baldwin } 839c0341432SJohn Baldwin break; 840c0341432SJohn Baldwin case CSP_MODE_AEAD: 841c0341432SJohn Baldwin if (!alg_is_aead(csp->csp_cipher_alg)) 842c0341432SJohn Baldwin return (false); 843c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 844c0341432SJohn Baldwin return (false); 845c0341432SJohn Baldwin if (csp->csp_ivlen == 0 || 846c0341432SJohn Baldwin csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 847c0341432SJohn Baldwin return (false); 848c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 849c0341432SJohn Baldwin return (false); 850c0341432SJohn Baldwin 851c0341432SJohn Baldwin /* 852c0341432SJohn Baldwin * XXX: Would be nice to have a better way to get this 853c0341432SJohn Baldwin * value. 854c0341432SJohn Baldwin */ 855c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 856c0341432SJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 857c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 858c0341432SJohn Baldwin if (csp->csp_auth_mlen > 16) 859c0341432SJohn Baldwin return (false); 860c0341432SJohn Baldwin break; 861c0341432SJohn Baldwin } 862c0341432SJohn Baldwin break; 863c0341432SJohn Baldwin case CSP_MODE_ETA: 864c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 865c0341432SJohn Baldwin return (false); 866c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 867c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 868c0341432SJohn Baldwin return (false); 869c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_ARC4) { 870c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 871c0341432SJohn Baldwin return (false); 872c0341432SJohn Baldwin } 873c0341432SJohn Baldwin } 874c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 875c0341432SJohn Baldwin return (false); 876c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 877c0341432SJohn Baldwin return (false); 878c0341432SJohn Baldwin 879c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 880c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 881c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 882c0341432SJohn Baldwin ; 883c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 884c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 885c0341432SJohn Baldwin return (false); 886c0341432SJohn Baldwin } else { 887c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 888c0341432SJohn Baldwin return (false); 889c0341432SJohn Baldwin } 890c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 891c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 892c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 893c0341432SJohn Baldwin return (false); 894c0341432SJohn Baldwin } 895c0341432SJohn Baldwin break; 896c0341432SJohn Baldwin default: 897c0341432SJohn Baldwin return (false); 898c0341432SJohn Baldwin } 899c0341432SJohn Baldwin 900c0341432SJohn Baldwin return (true); 901c0341432SJohn Baldwin } 902c0341432SJohn Baldwin 903c0341432SJohn Baldwin /* 904c0341432SJohn Baldwin * Delete a session after it has been detached from its driver. 905c0341432SJohn Baldwin */ 906c0341432SJohn Baldwin static void 907c0341432SJohn Baldwin crypto_deletesession(crypto_session_t cses) 908c0341432SJohn Baldwin { 909c0341432SJohn Baldwin struct cryptocap *cap; 910c0341432SJohn Baldwin 911c0341432SJohn Baldwin cap = cses->cap; 912c0341432SJohn Baldwin 913c0341432SJohn Baldwin explicit_bzero(cses->softc, cap->cc_session_size); 914c0341432SJohn Baldwin free(cses->softc, M_CRYPTO_DATA); 915c0341432SJohn Baldwin uma_zfree(cryptoses_zone, cses); 916c0341432SJohn Baldwin 917c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 918c0341432SJohn Baldwin cap->cc_sessions--; 919c0341432SJohn Baldwin if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 920c0341432SJohn Baldwin wakeup(cap); 921c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 922c0341432SJohn Baldwin cap_rele(cap); 923c0341432SJohn Baldwin } 924c0341432SJohn Baldwin 925694e0113SPawel Jakub Dawidek /* 9266810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 9276810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 9286810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 9296810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 930694e0113SPawel Jakub Dawidek */ 9316810ad6fSSam Leffler int 932c0341432SJohn Baldwin crypto_newsession(crypto_session_t *cses, 933c0341432SJohn Baldwin const struct crypto_session_params *csp, int crid) 9346810ad6fSSam Leffler { 9351b0909d5SConrad Meyer crypto_session_t res; 9366810ad6fSSam Leffler struct cryptocap *cap; 9376810ad6fSSam Leffler int err; 9386810ad6fSSam Leffler 939c0341432SJohn Baldwin if (!check_csp(csp)) 940c0341432SJohn Baldwin return (EINVAL); 941c0341432SJohn Baldwin 9421b0909d5SConrad Meyer res = NULL; 9431b0909d5SConrad Meyer 9446810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 9456810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 946694e0113SPawel Jakub Dawidek /* 9476810ad6fSSam Leffler * Use specified driver; verify it is capable. 948694e0113SPawel Jakub Dawidek */ 9496810ad6fSSam Leffler cap = crypto_checkdriver(crid); 950c0341432SJohn Baldwin if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 951694e0113SPawel Jakub Dawidek cap = NULL; 9526810ad6fSSam Leffler } else { 9536810ad6fSSam Leffler /* 9546810ad6fSSam Leffler * No requested driver; select based on crid flags. 9556810ad6fSSam Leffler */ 956c0341432SJohn Baldwin cap = crypto_select_driver(csp, crid); 957694e0113SPawel Jakub Dawidek } 9581b0909d5SConrad Meyer if (cap == NULL) { 959c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 96008fca7a5SJohn-Mark Gurney CRYPTDEB("no driver"); 961c0341432SJohn Baldwin return (EOPNOTSUPP); 96208fca7a5SJohn-Mark Gurney } 963c0341432SJohn Baldwin cap_ref(cap); 9641b0909d5SConrad Meyer cap->cc_sessions++; 965091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 9661b0909d5SConrad Meyer 9671b0909d5SConrad Meyer res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); 968c0341432SJohn Baldwin res->cap = cap; 969c0341432SJohn Baldwin res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | 970c0341432SJohn Baldwin M_ZERO); 971c0341432SJohn Baldwin res->csp = *csp; 9721b0909d5SConrad Meyer 9731b0909d5SConrad Meyer /* Call the driver initialization routine. */ 974c0341432SJohn Baldwin err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 9751b0909d5SConrad Meyer if (err != 0) { 9761b0909d5SConrad Meyer CRYPTDEB("dev newsession failed: %d", err); 977c0341432SJohn Baldwin crypto_deletesession(res); 978c0341432SJohn Baldwin return (err); 9791b0909d5SConrad Meyer } 9801b0909d5SConrad Meyer 9811b0909d5SConrad Meyer *cses = res; 982c0341432SJohn Baldwin return (0); 9834acae0acSPawel Jakub Dawidek } 9844acae0acSPawel Jakub Dawidek 985091d81d1SSam Leffler /* 986091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 987091d81d1SSam Leffler * driver). 988091d81d1SSam Leffler */ 9891b0909d5SConrad Meyer void 9901b0909d5SConrad Meyer crypto_freesession(crypto_session_t cses) 991091d81d1SSam Leffler { 9924acae0acSPawel Jakub Dawidek struct cryptocap *cap; 9931b0909d5SConrad Meyer 9941b0909d5SConrad Meyer if (cses == NULL) 9951b0909d5SConrad Meyer return; 996091d81d1SSam Leffler 997c0341432SJohn Baldwin cap = cses->cap; 998091d81d1SSam Leffler 999091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 10001b0909d5SConrad Meyer CRYPTODEV_FREESESSION(cap->cc_dev, cses); 10011b0909d5SConrad Meyer 1002c0341432SJohn Baldwin crypto_deletesession(cses); 1003091d81d1SSam Leffler } 1004091d81d1SSam Leffler 1005091d81d1SSam Leffler /* 1006c0341432SJohn Baldwin * Return a new driver id. Registers a driver with the system so that 1007c0341432SJohn Baldwin * it can be probed by subsequent sessions. 1008091d81d1SSam Leffler */ 1009091d81d1SSam Leffler int32_t 10101b0909d5SConrad Meyer crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 1011091d81d1SSam Leffler { 1012c0341432SJohn Baldwin struct cryptocap *cap, **newdrv; 1013091d81d1SSam Leffler int i; 1014091d81d1SSam Leffler 10156810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1016c0341432SJohn Baldwin device_printf(dev, 1017c0341432SJohn Baldwin "no flags specified when registering driver\n"); 10186810ad6fSSam Leffler return -1; 10196810ad6fSSam Leffler } 10206810ad6fSSam Leffler 1021c0341432SJohn Baldwin cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1022c0341432SJohn Baldwin cap->cc_dev = dev; 1023c0341432SJohn Baldwin cap->cc_session_size = sessionsize; 1024c0341432SJohn Baldwin cap->cc_flags = flags; 1025c0341432SJohn Baldwin refcount_init(&cap->cc_refs, 1); 1026c0341432SJohn Baldwin 1027091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1028c0341432SJohn Baldwin for (;;) { 1029c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 1030c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 1031091d81d1SSam Leffler break; 10324acae0acSPawel Jakub Dawidek } 1033c0341432SJohn Baldwin 1034c0341432SJohn Baldwin if (i < crypto_drivers_size) 1035c0341432SJohn Baldwin break; 1036091d81d1SSam Leffler 1037091d81d1SSam Leffler /* Out of entries, allocate some more. */ 1038c0341432SJohn Baldwin 1039c0341432SJohn Baldwin if (2 * crypto_drivers_size <= crypto_drivers_size) { 1040091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1041091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 1042c0341432SJohn Baldwin cap_rele(cap); 1043c0341432SJohn Baldwin return (-1); 1044091d81d1SSam Leffler } 1045091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1046091d81d1SSam Leffler 1047c0341432SJohn Baldwin newdrv = malloc(2 * crypto_drivers_size * 1048c0341432SJohn Baldwin sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1049091d81d1SSam Leffler 1050c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1051c0341432SJohn Baldwin memcpy(newdrv, crypto_drivers, 1052c0341432SJohn Baldwin crypto_drivers_size * sizeof(*crypto_drivers)); 1053c0341432SJohn Baldwin 1054c0341432SJohn Baldwin crypto_drivers_size *= 2; 1055091d81d1SSam Leffler 1056091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 1057091d81d1SSam Leffler crypto_drivers = newdrv; 1058091d81d1SSam Leffler } 1059091d81d1SSam Leffler 1060c0341432SJohn Baldwin cap->cc_hid = i; 1061c0341432SJohn Baldwin crypto_drivers[i] = cap; 1062c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1063c0341432SJohn Baldwin 1064091d81d1SSam Leffler if (bootverbose) 1065d7d2f0d4SConrad Meyer printf("crypto: assign %s driver id %u, flags 0x%x\n", 10666810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 1067091d81d1SSam Leffler 1068091d81d1SSam Leffler return i; 1069091d81d1SSam Leffler } 1070091d81d1SSam Leffler 10716810ad6fSSam Leffler /* 10726810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 10736810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 10746810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 10756810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 10766810ad6fSSam Leffler */ 10776810ad6fSSam Leffler int 10786810ad6fSSam Leffler crypto_find_driver(const char *match) 1079091d81d1SSam Leffler { 1080c0341432SJohn Baldwin struct cryptocap *cap; 10816810ad6fSSam Leffler int i, len = strlen(match); 10826810ad6fSSam Leffler 10836810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 1084c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 1085c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 10866810ad6fSSam Leffler continue; 1087c0341432SJohn Baldwin cap = crypto_drivers[i]; 1088c0341432SJohn Baldwin if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1089c0341432SJohn Baldwin strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1090c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1091c0341432SJohn Baldwin return (i); 1092c0341432SJohn Baldwin } 10936810ad6fSSam Leffler } 10946810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 1095c0341432SJohn Baldwin return (-1); 10966810ad6fSSam Leffler } 10976810ad6fSSam Leffler 10986810ad6fSSam Leffler /* 10996810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 11006810ad6fSSam Leffler * if the driver identifier is invalid. 11016810ad6fSSam Leffler */ 11026810ad6fSSam Leffler device_t 11036810ad6fSSam Leffler crypto_find_device_byhid(int hid) 11046810ad6fSSam Leffler { 1105c0341432SJohn Baldwin struct cryptocap *cap; 1106c0341432SJohn Baldwin device_t dev; 1107c0341432SJohn Baldwin 1108c0341432SJohn Baldwin dev = NULL; 1109c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1110c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1111c0341432SJohn Baldwin if (cap != NULL) 1112c0341432SJohn Baldwin dev = cap->cc_dev; 1113c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1114c0341432SJohn Baldwin return (dev); 11156810ad6fSSam Leffler } 11166810ad6fSSam Leffler 11176810ad6fSSam Leffler /* 11186810ad6fSSam Leffler * Return the device/driver capabilities. 11196810ad6fSSam Leffler */ 11206810ad6fSSam Leffler int 11216810ad6fSSam Leffler crypto_getcaps(int hid) 11226810ad6fSSam Leffler { 1123c0341432SJohn Baldwin struct cryptocap *cap; 1124c0341432SJohn Baldwin int flags; 1125c0341432SJohn Baldwin 1126c0341432SJohn Baldwin flags = 0; 1127c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1128c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1129c0341432SJohn Baldwin if (cap != NULL) 1130c0341432SJohn Baldwin flags = cap->cc_flags; 1131c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1132c0341432SJohn Baldwin return (flags); 1133091d81d1SSam Leffler } 1134091d81d1SSam Leffler 1135091d81d1SSam Leffler /* 1136091d81d1SSam Leffler * Register support for a key-related algorithm. This routine 1137091d81d1SSam Leffler * is called once for each algorithm supported a driver. 1138091d81d1SSam Leffler */ 1139091d81d1SSam Leffler int 11406810ad6fSSam Leffler crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) 1141091d81d1SSam Leffler { 1142091d81d1SSam Leffler struct cryptocap *cap; 1143091d81d1SSam Leffler int err; 1144091d81d1SSam Leffler 1145091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1146091d81d1SSam Leffler 1147091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1148091d81d1SSam Leffler if (cap != NULL && 1149091d81d1SSam Leffler (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1150091d81d1SSam Leffler /* 1151091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 1152091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 1153091d81d1SSam Leffler * XXX describes relative performances. 1154091d81d1SSam Leffler */ 1155091d81d1SSam Leffler 1156091d81d1SSam Leffler cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1157091d81d1SSam Leffler if (bootverbose) 11586810ad6fSSam Leffler printf("crypto: %s registers key alg %u flags %u\n" 11596810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 1160091d81d1SSam Leffler , kalg 1161091d81d1SSam Leffler , flags 1162091d81d1SSam Leffler ); 1163091d81d1SSam Leffler err = 0; 1164091d81d1SSam Leffler } else 1165091d81d1SSam Leffler err = EINVAL; 1166091d81d1SSam Leffler 1167091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1168091d81d1SSam Leffler return err; 1169091d81d1SSam Leffler } 1170091d81d1SSam Leffler 1171091d81d1SSam Leffler /* 1172091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 1173091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 1174091d81d1SSam Leffler * around so that subsequent calls using those sessions will 1175091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 1176091d81d1SSam Leffler * requests. 1177091d81d1SSam Leffler */ 1178091d81d1SSam Leffler int 1179091d81d1SSam Leffler crypto_unregister_all(u_int32_t driverid) 1180091d81d1SSam Leffler { 1181091d81d1SSam Leffler struct cryptocap *cap; 1182091d81d1SSam Leffler 1183091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1184091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1185c0341432SJohn Baldwin if (cap == NULL) { 1186091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1187c0341432SJohn Baldwin return (EINVAL); 1188c0341432SJohn Baldwin } 11896810ad6fSSam Leffler 1190c0341432SJohn Baldwin cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1191c0341432SJohn Baldwin crypto_drivers[driverid] = NULL; 1192c0341432SJohn Baldwin 1193c0341432SJohn Baldwin /* 1194c0341432SJohn Baldwin * XXX: This doesn't do anything to kick sessions that 1195c0341432SJohn Baldwin * have no pending operations. 1196c0341432SJohn Baldwin */ 1197c0341432SJohn Baldwin while (cap->cc_sessions != 0 || cap->cc_koperations != 0) 1198c0341432SJohn Baldwin mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1199c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1200c0341432SJohn Baldwin cap_rele(cap); 1201c0341432SJohn Baldwin 1202c0341432SJohn Baldwin return (0); 1203091d81d1SSam Leffler } 1204091d81d1SSam Leffler 1205091d81d1SSam Leffler /* 1206091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 1207091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 1208091d81d1SSam Leffler */ 1209091d81d1SSam Leffler int 1210091d81d1SSam Leffler crypto_unblock(u_int32_t driverid, int what) 1211091d81d1SSam Leffler { 1212091d81d1SSam Leffler struct cryptocap *cap; 12133a865c82SPawel Jakub Dawidek int err; 1214091d81d1SSam Leffler 1215091d81d1SSam Leffler CRYPTO_Q_LOCK(); 1216091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1217091d81d1SSam Leffler if (cap != NULL) { 12183a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 1219091d81d1SSam Leffler cap->cc_qblocked = 0; 12203a865c82SPawel Jakub Dawidek if (what & CRYPTO_ASYMQ) 1221091d81d1SSam Leffler cap->cc_kqblocked = 0; 12223a865c82SPawel Jakub Dawidek if (crp_sleep) 12231a91ccccSSam Leffler wakeup_one(&crp_q); 1224091d81d1SSam Leffler err = 0; 1225091d81d1SSam Leffler } else 1226091d81d1SSam Leffler err = EINVAL; 1227091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 1228091d81d1SSam Leffler 1229091d81d1SSam Leffler return err; 1230091d81d1SSam Leffler } 1231091d81d1SSam Leffler 1232c0341432SJohn Baldwin #ifdef INVARIANTS 1233c0341432SJohn Baldwin /* Various sanity checks on crypto requests. */ 1234c0341432SJohn Baldwin static void 1235c0341432SJohn Baldwin crp_sanity(struct cryptop *crp) 1236c0341432SJohn Baldwin { 1237c0341432SJohn Baldwin struct crypto_session_params *csp; 1238c0341432SJohn Baldwin 1239c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 1240c0341432SJohn Baldwin KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length")); 1241c0341432SJohn Baldwin KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1242c0341432SJohn Baldwin KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1243c0341432SJohn Baldwin ("incoming crp already done")); 1244c0341432SJohn Baldwin 1245c0341432SJohn Baldwin csp = &crp->crp_session->csp; 1246c0341432SJohn Baldwin switch (csp->csp_mode) { 1247c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 1248c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1249c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECOMPRESS, 1250c0341432SJohn Baldwin ("invalid compression op %x", crp->crp_op)); 1251c0341432SJohn Baldwin break; 1252c0341432SJohn Baldwin case CSP_MODE_CIPHER: 1253c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1254c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECRYPT, 1255c0341432SJohn Baldwin ("invalid cipher op %x", crp->crp_op)); 1256c0341432SJohn Baldwin break; 1257c0341432SJohn Baldwin case CSP_MODE_DIGEST: 1258c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1259c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1260c0341432SJohn Baldwin ("invalid digest op %x", crp->crp_op)); 1261c0341432SJohn Baldwin break; 1262c0341432SJohn Baldwin case CSP_MODE_AEAD: 1263c0341432SJohn Baldwin KASSERT(crp->crp_op == 1264c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1265c0341432SJohn Baldwin crp->crp_op == 1266c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1267c0341432SJohn Baldwin ("invalid AEAD op %x", crp->crp_op)); 1268c0341432SJohn Baldwin if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) 1269c0341432SJohn Baldwin KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1270c0341432SJohn Baldwin ("GCM without a separate IV")); 1271c0341432SJohn Baldwin if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) 1272c0341432SJohn Baldwin KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1273c0341432SJohn Baldwin ("CCM without a separate IV")); 1274c0341432SJohn Baldwin break; 1275c0341432SJohn Baldwin case CSP_MODE_ETA: 1276c0341432SJohn Baldwin KASSERT(crp->crp_op == 1277c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1278c0341432SJohn Baldwin crp->crp_op == 1279c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1280c0341432SJohn Baldwin ("invalid ETA op %x", crp->crp_op)); 1281c0341432SJohn Baldwin break; 1282c0341432SJohn Baldwin } 1283c0341432SJohn Baldwin KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG && 1284c0341432SJohn Baldwin crp->crp_buf_type <= CRYPTO_BUF_MBUF, 1285c0341432SJohn Baldwin ("invalid crp buffer type %d", crp->crp_buf_type)); 1286c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1287c0341432SJohn Baldwin KASSERT(crp->crp_aad_start == 0 || 1288c0341432SJohn Baldwin crp->crp_aad_start < crp->crp_ilen, 1289c0341432SJohn Baldwin ("invalid AAD start")); 1290c0341432SJohn Baldwin KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, 1291c0341432SJohn Baldwin ("AAD with zero length and non-zero start")); 1292c0341432SJohn Baldwin KASSERT(crp->crp_aad_length == 0 || 1293c0341432SJohn Baldwin crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen, 1294c0341432SJohn Baldwin ("AAD outside input length")); 1295c0341432SJohn Baldwin } else { 1296c0341432SJohn Baldwin KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0, 1297c0341432SJohn Baldwin ("AAD region in request not supporting AAD")); 1298c0341432SJohn Baldwin } 1299c0341432SJohn Baldwin if (csp->csp_ivlen == 0) { 1300*29fe41ddSJohn Baldwin KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 1301*29fe41ddSJohn Baldwin ("IV_SEPARATE set when IV isn't used")); 1302c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1303c0341432SJohn Baldwin ("crp_iv_start set when IV isn't used")); 1304c0341432SJohn Baldwin } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1305c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1306c0341432SJohn Baldwin ("IV_SEPARATE used with non-zero IV start")); 1307c0341432SJohn Baldwin } else { 1308c0341432SJohn Baldwin KASSERT(crp->crp_iv_start < crp->crp_ilen, 1309c0341432SJohn Baldwin ("invalid IV start")); 1310c0341432SJohn Baldwin KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen, 1311c0341432SJohn Baldwin ("IV outside input length")); 1312c0341432SJohn Baldwin } 1313c0341432SJohn Baldwin KASSERT(crp->crp_payload_start == 0 || 1314c0341432SJohn Baldwin crp->crp_payload_start < crp->crp_ilen, 1315c0341432SJohn Baldwin ("invalid payload start")); 1316c0341432SJohn Baldwin KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 1317c0341432SJohn Baldwin crp->crp_ilen, ("payload outside input length")); 1318c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_DIGEST || 1319c0341432SJohn Baldwin csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1320c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0 || 1321c0341432SJohn Baldwin crp->crp_digest_start < crp->crp_ilen, 1322c0341432SJohn Baldwin ("invalid digest start")); 1323c0341432SJohn Baldwin /* XXX: For the mlen == 0 case this check isn't perfect. */ 1324c0341432SJohn Baldwin KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= 1325c0341432SJohn Baldwin crp->crp_ilen, 1326c0341432SJohn Baldwin ("digest outside input length")); 1327c0341432SJohn Baldwin } else { 1328c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0, 1329c0341432SJohn Baldwin ("non-zero digest start for request without a digest")); 1330c0341432SJohn Baldwin } 1331c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0) 1332c0341432SJohn Baldwin KASSERT(csp->csp_cipher_key != NULL || 1333c0341432SJohn Baldwin crp->crp_cipher_key != NULL, 1334c0341432SJohn Baldwin ("cipher request without a key")); 1335c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 1336c0341432SJohn Baldwin KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1337c0341432SJohn Baldwin ("auth request without a key")); 1338c0341432SJohn Baldwin KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1339c0341432SJohn Baldwin } 1340c0341432SJohn Baldwin #endif 1341c0341432SJohn Baldwin 1342091d81d1SSam Leffler /* 1343091d81d1SSam Leffler * Add a crypto request to a queue, to be processed by the kernel thread. 1344091d81d1SSam Leffler */ 1345091d81d1SSam Leffler int 1346091d81d1SSam Leffler crypto_dispatch(struct cryptop *crp) 1347091d81d1SSam Leffler { 13484acae0acSPawel Jakub Dawidek struct cryptocap *cap; 13494acae0acSPawel Jakub Dawidek int result; 1350091d81d1SSam Leffler 1351c0341432SJohn Baldwin #ifdef INVARIANTS 1352c0341432SJohn Baldwin crp_sanity(crp); 1353c0341432SJohn Baldwin #endif 1354c0341432SJohn Baldwin 13557d1853eeSSam Leffler cryptostats.cs_ops++; 13567d1853eeSSam Leffler 13577d1853eeSSam Leffler #ifdef CRYPTO_TIMING 13587d1853eeSSam Leffler if (crypto_timing) 13597d1853eeSSam Leffler binuptime(&crp->crp_tstamp); 13607d1853eeSSam Leffler #endif 13617d1853eeSSam Leffler 13621b0909d5SConrad Meyer crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; 1363de2b2c90SFabien Thomas 136439bbca6fSFabien Thomas if (CRYPTOP_ASYNC(crp)) { 136539bbca6fSFabien Thomas if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { 136639bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 136739bbca6fSFabien Thomas 136839bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 136939bbca6fSFabien Thomas 137039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 137139bbca6fSFabien Thomas crp->crp_seq = ret_worker->reorder_ops++; 137239bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 137339bbca6fSFabien Thomas } 137439bbca6fSFabien Thomas 137539bbca6fSFabien Thomas TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 137639bbca6fSFabien Thomas taskqueue_enqueue(crypto_tq, &crp->crp_task); 137739bbca6fSFabien Thomas return (0); 137839bbca6fSFabien Thomas } 13794acae0acSPawel Jakub Dawidek 1380eb73a605SSam Leffler if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 1381eb73a605SSam Leffler /* 1382eb73a605SSam Leffler * Caller marked the request to be processed 1383eb73a605SSam Leffler * immediately; dispatch it directly to the 1384eb73a605SSam Leffler * driver unless the driver is currently blocked. 1385eb73a605SSam Leffler */ 1386c0341432SJohn Baldwin cap = crp->crp_session->cap; 13874acae0acSPawel Jakub Dawidek if (!cap->cc_qblocked) { 13884acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, crp, 0); 13894acae0acSPawel Jakub Dawidek if (result != ERESTART) 13904acae0acSPawel Jakub Dawidek return (result); 1391091d81d1SSam Leffler /* 1392bda0abc6SPawel Jakub Dawidek * The driver ran out of resources, put the request on 1393bda0abc6SPawel Jakub Dawidek * the queue. 1394091d81d1SSam Leffler */ 1395f7890744SSam Leffler } 1396eb73a605SSam Leffler } 139739bbca6fSFabien Thomas crypto_batch_enqueue(crp); 139839bbca6fSFabien Thomas return 0; 139939bbca6fSFabien Thomas } 140039bbca6fSFabien Thomas 140139bbca6fSFabien Thomas void 140239bbca6fSFabien Thomas crypto_batch_enqueue(struct cryptop *crp) 140339bbca6fSFabien Thomas { 140439bbca6fSFabien Thomas 14054acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 14064acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 14073a865c82SPawel Jakub Dawidek if (crp_sleep) 14083a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 14093569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 1410091d81d1SSam Leffler } 1411091d81d1SSam Leffler 1412091d81d1SSam Leffler /* 1413091d81d1SSam Leffler * Add an asymetric crypto request to a queue, 1414091d81d1SSam Leffler * to be processed by the kernel thread. 1415091d81d1SSam Leffler */ 1416091d81d1SSam Leffler int 1417091d81d1SSam Leffler crypto_kdispatch(struct cryptkop *krp) 1418091d81d1SSam Leffler { 14196810ad6fSSam Leffler int error; 1420091d81d1SSam Leffler 14217d1853eeSSam Leffler cryptostats.cs_kops++; 14227d1853eeSSam Leffler 1423c0341432SJohn Baldwin krp->krp_cap = NULL; 1424c0341432SJohn Baldwin error = crypto_kinvoke(krp); 14256810ad6fSSam Leffler if (error == ERESTART) { 1426091d81d1SSam Leffler CRYPTO_Q_LOCK(); 14274acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 14283a865c82SPawel Jakub Dawidek if (crp_sleep) 14293a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 1430091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 14316810ad6fSSam Leffler error = 0; 14326810ad6fSSam Leffler } 14336810ad6fSSam Leffler return error; 1434091d81d1SSam Leffler } 1435091d81d1SSam Leffler 1436091d81d1SSam Leffler /* 14376810ad6fSSam Leffler * Verify a driver is suitable for the specified operation. 14386810ad6fSSam Leffler */ 14396810ad6fSSam Leffler static __inline int 14406810ad6fSSam Leffler kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 14416810ad6fSSam Leffler { 14426810ad6fSSam Leffler return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 14436810ad6fSSam Leffler } 14446810ad6fSSam Leffler 14456810ad6fSSam Leffler /* 14466810ad6fSSam Leffler * Select a driver for an asym operation. The driver must 14476810ad6fSSam Leffler * support the necessary algorithm. The caller can constrain 14486810ad6fSSam Leffler * which device is selected with the flags parameter. The 14496810ad6fSSam Leffler * algorithm we use here is pretty stupid; just use the first 14506810ad6fSSam Leffler * driver that supports the algorithms we need. If there are 14516810ad6fSSam Leffler * multiple suitable drivers we choose the driver with the 14526810ad6fSSam Leffler * fewest active operations. We prefer hardware-backed 14536810ad6fSSam Leffler * drivers to software ones when either may be used. 14546810ad6fSSam Leffler */ 14556810ad6fSSam Leffler static struct cryptocap * 14566810ad6fSSam Leffler crypto_select_kdriver(const struct cryptkop *krp, int flags) 14576810ad6fSSam Leffler { 1458151ba793SAlexander Kabaev struct cryptocap *cap, *best; 14596810ad6fSSam Leffler int match, hid; 14606810ad6fSSam Leffler 14616810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 14626810ad6fSSam Leffler 14636810ad6fSSam Leffler /* 14646810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 14656810ad6fSSam Leffler */ 14666810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 14676810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 14686810ad6fSSam Leffler else 14696810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 14706810ad6fSSam Leffler best = NULL; 14716810ad6fSSam Leffler again: 1472c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 14736810ad6fSSam Leffler /* 1474c0341432SJohn Baldwin * If there is no driver for this slot, or the driver 1475c0341432SJohn Baldwin * is not appropriate (hardware or software based on 1476c0341432SJohn Baldwin * match), then skip. 14776810ad6fSSam Leffler */ 1478c0341432SJohn Baldwin cap = crypto_drivers[hid]; 14796810ad6fSSam Leffler if (cap->cc_dev == NULL || 14806810ad6fSSam Leffler (cap->cc_flags & match) == 0) 14816810ad6fSSam Leffler continue; 14826810ad6fSSam Leffler 14836810ad6fSSam Leffler /* verify all the algorithms are supported. */ 14846810ad6fSSam Leffler if (kdriver_suitable(cap, krp)) { 14856810ad6fSSam Leffler if (best == NULL || 14866810ad6fSSam Leffler cap->cc_koperations < best->cc_koperations) 14876810ad6fSSam Leffler best = cap; 14886810ad6fSSam Leffler } 14896810ad6fSSam Leffler } 14906810ad6fSSam Leffler if (best != NULL) 14916810ad6fSSam Leffler return best; 14926810ad6fSSam Leffler if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 14936810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 14946810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 14956810ad6fSSam Leffler goto again; 14966810ad6fSSam Leffler } 14976810ad6fSSam Leffler return best; 14986810ad6fSSam Leffler } 14996810ad6fSSam Leffler 15006810ad6fSSam Leffler /* 1501c0341432SJohn Baldwin * Choose a driver for an asymmetric crypto request. 1502091d81d1SSam Leffler */ 1503c0341432SJohn Baldwin static struct cryptocap * 1504c0341432SJohn Baldwin crypto_lookup_kdriver(struct cryptkop *krp) 1505091d81d1SSam Leffler { 1506c0341432SJohn Baldwin struct cryptocap *cap; 1507c0341432SJohn Baldwin uint32_t crid; 1508091d81d1SSam Leffler 1509c0341432SJohn Baldwin /* If this request is requeued, it might already have a driver. */ 1510c0341432SJohn Baldwin cap = krp->krp_cap; 1511c0341432SJohn Baldwin if (cap != NULL) 1512c0341432SJohn Baldwin return (cap); 1513091d81d1SSam Leffler 1514c0341432SJohn Baldwin /* Use krp_crid to choose a driver. */ 1515c0341432SJohn Baldwin crid = krp->krp_crid; 15166810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 15176810ad6fSSam Leffler cap = crypto_checkdriver(crid); 15186810ad6fSSam Leffler if (cap != NULL) { 15196810ad6fSSam Leffler /* 1520c0341432SJohn Baldwin * Driver present, it must support the 1521c0341432SJohn Baldwin * necessary algorithm and, if s/w drivers are 1522c0341432SJohn Baldwin * excluded, it must be registered as 1523c0341432SJohn Baldwin * hardware-backed. 15246810ad6fSSam Leffler */ 15256810ad6fSSam Leffler if (!kdriver_suitable(cap, krp) || 15266810ad6fSSam Leffler (!crypto_devallowsoft && 15276810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 15286810ad6fSSam Leffler cap = NULL; 15294acae0acSPawel Jakub Dawidek } 15306810ad6fSSam Leffler } else { 15316810ad6fSSam Leffler /* 15326810ad6fSSam Leffler * No requested driver; select based on crid flags. 15336810ad6fSSam Leffler */ 15346810ad6fSSam Leffler if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 15356810ad6fSSam Leffler crid &= ~CRYPTOCAP_F_SOFTWARE; 15366810ad6fSSam Leffler cap = crypto_select_kdriver(krp, crid); 15374acae0acSPawel Jakub Dawidek } 1538c0341432SJohn Baldwin 1539c0341432SJohn Baldwin if (cap != NULL) { 1540c0341432SJohn Baldwin krp->krp_cap = cap_ref(cap); 1541c0341432SJohn Baldwin krp->krp_hid = cap->cc_hid; 1542c0341432SJohn Baldwin } 1543c0341432SJohn Baldwin return (cap); 1544c0341432SJohn Baldwin } 1545c0341432SJohn Baldwin 1546c0341432SJohn Baldwin /* 1547c0341432SJohn Baldwin * Dispatch an asymmetric crypto request. 1548c0341432SJohn Baldwin */ 1549c0341432SJohn Baldwin static int 1550c0341432SJohn Baldwin crypto_kinvoke(struct cryptkop *krp) 1551c0341432SJohn Baldwin { 1552c0341432SJohn Baldwin struct cryptocap *cap = NULL; 1553c0341432SJohn Baldwin int error; 1554c0341432SJohn Baldwin 1555c0341432SJohn Baldwin KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 1556c0341432SJohn Baldwin KASSERT(krp->krp_callback != NULL, 1557c0341432SJohn Baldwin ("%s: krp->crp_callback == NULL", __func__)); 1558c0341432SJohn Baldwin 1559c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1560c0341432SJohn Baldwin cap = crypto_lookup_kdriver(krp); 1561c0341432SJohn Baldwin if (cap == NULL) { 1562c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1563c0341432SJohn Baldwin krp->krp_status = ENODEV; 1564c0341432SJohn Baldwin crypto_kdone(krp); 1565c0341432SJohn Baldwin return (0); 1566c0341432SJohn Baldwin } 1567c0341432SJohn Baldwin 1568c0341432SJohn Baldwin /* 1569c0341432SJohn Baldwin * If the device is blocked, return ERESTART to requeue it. 1570c0341432SJohn Baldwin */ 1571c0341432SJohn Baldwin if (cap->cc_kqblocked) { 1572c0341432SJohn Baldwin /* 1573c0341432SJohn Baldwin * XXX: Previously this set krp_status to ERESTART and 1574c0341432SJohn Baldwin * invoked crypto_kdone but the caller would still 1575c0341432SJohn Baldwin * requeue it. 1576c0341432SJohn Baldwin */ 1577c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1578c0341432SJohn Baldwin return (ERESTART); 1579c0341432SJohn Baldwin } 1580c0341432SJohn Baldwin 15814acae0acSPawel Jakub Dawidek cap->cc_koperations++; 15824acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 15836810ad6fSSam Leffler error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 15844acae0acSPawel Jakub Dawidek if (error == ERESTART) { 1585c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 15864acae0acSPawel Jakub Dawidek cap->cc_koperations--; 15874acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 15884acae0acSPawel Jakub Dawidek return (error); 15894acae0acSPawel Jakub Dawidek } 1590091d81d1SSam Leffler 1591c0341432SJohn Baldwin KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); 1592c0341432SJohn Baldwin return (0); 1593091d81d1SSam Leffler } 1594091d81d1SSam Leffler 15957d1853eeSSam Leffler #ifdef CRYPTO_TIMING 15967d1853eeSSam Leffler static void 15977d1853eeSSam Leffler crypto_tstat(struct cryptotstat *ts, struct bintime *bt) 15987d1853eeSSam Leffler { 15997d1853eeSSam Leffler struct bintime now, delta; 16007d1853eeSSam Leffler struct timespec t; 16017d1853eeSSam Leffler uint64_t u; 16027d1853eeSSam Leffler 16037d1853eeSSam Leffler binuptime(&now); 16047d1853eeSSam Leffler u = now.frac; 16057d1853eeSSam Leffler delta.frac = now.frac - bt->frac; 16067d1853eeSSam Leffler delta.sec = now.sec - bt->sec; 16077d1853eeSSam Leffler if (u < delta.frac) 16087d1853eeSSam Leffler delta.sec--; 16097d1853eeSSam Leffler bintime2timespec(&delta, &t); 16106040822cSAlan Somers timespecadd(&ts->acc, &t, &ts->acc); 16117d1853eeSSam Leffler if (timespeccmp(&t, &ts->min, <)) 16127d1853eeSSam Leffler ts->min = t; 16137d1853eeSSam Leffler if (timespeccmp(&t, &ts->max, >)) 16147d1853eeSSam Leffler ts->max = t; 16157d1853eeSSam Leffler ts->count++; 16167d1853eeSSam Leffler 16177d1853eeSSam Leffler *bt = now; 16187d1853eeSSam Leffler } 16197d1853eeSSam Leffler #endif 16207d1853eeSSam Leffler 162139bbca6fSFabien Thomas static void 162239bbca6fSFabien Thomas crypto_task_invoke(void *ctx, int pending) 162339bbca6fSFabien Thomas { 162439bbca6fSFabien Thomas struct cryptocap *cap; 162539bbca6fSFabien Thomas struct cryptop *crp; 1626c0341432SJohn Baldwin int result; 162739bbca6fSFabien Thomas 162839bbca6fSFabien Thomas crp = (struct cryptop *)ctx; 1629c0341432SJohn Baldwin cap = crp->crp_session->cap; 163039bbca6fSFabien Thomas result = crypto_invoke(cap, crp, 0); 163139bbca6fSFabien Thomas if (result == ERESTART) 163239bbca6fSFabien Thomas crypto_batch_enqueue(crp); 163339bbca6fSFabien Thomas } 163439bbca6fSFabien Thomas 1635091d81d1SSam Leffler /* 1636091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 1637091d81d1SSam Leffler */ 1638091d81d1SSam Leffler static int 16394acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1640091d81d1SSam Leffler { 16414acae0acSPawel Jakub Dawidek 16424acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 16434acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 16444acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 1645c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, 1646c0341432SJohn Baldwin ("%s: crp->crp_session == NULL", __func__)); 1647091d81d1SSam Leffler 16487d1853eeSSam Leffler #ifdef CRYPTO_TIMING 16497d1853eeSSam Leffler if (crypto_timing) 16507d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 16517d1853eeSSam Leffler #endif 16524acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1653c0341432SJohn Baldwin struct crypto_session_params csp; 16541b0909d5SConrad Meyer crypto_session_t nses; 1655091d81d1SSam Leffler 1656091d81d1SSam Leffler /* 1657091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1658091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 16594acae0acSPawel Jakub Dawidek * 16604acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 16614acae0acSPawel Jakub Dawidek * session? 1662c0341432SJohn Baldwin * 1663c0341432SJohn Baldwin * XXX: Real solution is to make sessions refcounted 1664c0341432SJohn Baldwin * and force callers to hold a reference when 1665c0341432SJohn Baldwin * assigning to crp_session. Could maybe change 1666c0341432SJohn Baldwin * crypto_getreq to accept a session pointer to make 1667c0341432SJohn Baldwin * that work. Alternatively, we could abandon the 1668c0341432SJohn Baldwin * notion of rewriting crp_session in requests forcing 1669c0341432SJohn Baldwin * the caller to deal with allocating a new session. 1670c0341432SJohn Baldwin * Perhaps provide a method to allow a crp's session to 1671c0341432SJohn Baldwin * be swapped that callers could use. 1672091d81d1SSam Leffler */ 1673c0341432SJohn Baldwin csp = crp->crp_session->csp; 16741b0909d5SConrad Meyer crypto_freesession(crp->crp_session); 16754acae0acSPawel Jakub Dawidek 1676c0341432SJohn Baldwin /* 1677c0341432SJohn Baldwin * XXX: Key pointers may no longer be valid. If we 1678c0341432SJohn Baldwin * really want to support this we need to define the 1679c0341432SJohn Baldwin * KPI such that 'csp' is required to be valid for the 1680c0341432SJohn Baldwin * duration of a session by the caller perhaps. 1681c0341432SJohn Baldwin * 1682c0341432SJohn Baldwin * XXX: If the keys have been changed this will reuse 1683c0341432SJohn Baldwin * the old keys. This probably suggests making 1684c0341432SJohn Baldwin * rekeying more explicit and updating the key 1685c0341432SJohn Baldwin * pointers in 'csp' when the keys change. 1686c0341432SJohn Baldwin */ 1687c0341432SJohn Baldwin if (crypto_newsession(&nses, &csp, 16886810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 16891b0909d5SConrad Meyer crp->crp_session = nses; 1690091d81d1SSam Leffler 1691091d81d1SSam Leffler crp->crp_etype = EAGAIN; 16921a91ccccSSam Leffler crypto_done(crp); 1693091d81d1SSam Leffler return 0; 1694091d81d1SSam Leffler } else { 1695091d81d1SSam Leffler /* 1696091d81d1SSam Leffler * Invoke the driver to process the request. 1697091d81d1SSam Leffler */ 16986810ad6fSSam Leffler return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1699091d81d1SSam Leffler } 1700091d81d1SSam Leffler } 1701091d81d1SSam Leffler 1702091d81d1SSam Leffler void 1703091d81d1SSam Leffler crypto_freereq(struct cryptop *crp) 1704091d81d1SSam Leffler { 1705091d81d1SSam Leffler 1706091d81d1SSam Leffler if (crp == NULL) 1707091d81d1SSam Leffler return; 1708091d81d1SSam Leffler 17090d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 17100d5c337bSPawel Jakub Dawidek { 17110d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 171239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 17130d5c337bSPawel Jakub Dawidek 17140d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 17150d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 17160d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 17170d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 17180d5c337bSPawel Jakub Dawidek crp)); 17190d5c337bSPawel Jakub Dawidek } 17200d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 172139bbca6fSFabien Thomas 172239bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 172339bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 172439bbca6fSFabien Thomas TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 17250d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 17260d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 17270d5c337bSPawel Jakub Dawidek crp)); 17280d5c337bSPawel Jakub Dawidek } 172939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 173039bbca6fSFabien Thomas } 17310d5c337bSPawel Jakub Dawidek } 17320d5c337bSPawel Jakub Dawidek #endif 17330d5c337bSPawel Jakub Dawidek 1734091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1735091d81d1SSam Leffler } 1736091d81d1SSam Leffler 1737091d81d1SSam Leffler struct cryptop * 1738c0341432SJohn Baldwin crypto_getreq(crypto_session_t cses, int how) 1739091d81d1SSam Leffler { 1740091d81d1SSam Leffler struct cryptop *crp; 1741091d81d1SSam Leffler 1742c0341432SJohn Baldwin MPASS(how == M_WAITOK || how == M_NOWAIT); 1743c0341432SJohn Baldwin crp = uma_zalloc(cryptop_zone, how | M_ZERO); 1744c0341432SJohn Baldwin crp->crp_session = cses; 1745c0341432SJohn Baldwin return (crp); 1746091d81d1SSam Leffler } 1747091d81d1SSam Leffler 1748091d81d1SSam Leffler /* 1749091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1750091d81d1SSam Leffler */ 1751091d81d1SSam Leffler void 1752091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1753091d81d1SSam Leffler { 17543569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 17553569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 17563569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 17577d1853eeSSam Leffler if (crp->crp_etype != 0) 17587d1853eeSSam Leffler cryptostats.cs_errs++; 17597d1853eeSSam Leffler #ifdef CRYPTO_TIMING 17607d1853eeSSam Leffler if (crypto_timing) 17617d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 17627d1853eeSSam Leffler #endif 1763d8409aafSSam Leffler /* 1764d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1765d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1766d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1767d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1768d8409aafSSam Leffler * used with the software crypto driver. 1769d8409aafSSam Leffler */ 177039bbca6fSFabien Thomas if (!CRYPTOP_ASYNC_KEEPORDER(crp) && 177139bbca6fSFabien Thomas ((crp->crp_flags & CRYPTO_F_CBIMM) || 1772d8409aafSSam Leffler ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 17731b0909d5SConrad Meyer (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { 1774eb73a605SSam Leffler /* 1775eb73a605SSam Leffler * Do the callback directly. This is ok when the 1776eb73a605SSam Leffler * callback routine does very little (e.g. the 1777eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1778eb73a605SSam Leffler */ 1779eb73a605SSam Leffler #ifdef CRYPTO_TIMING 1780eb73a605SSam Leffler if (crypto_timing) { 1781eb73a605SSam Leffler /* 1782eb73a605SSam Leffler * NB: We must copy the timestamp before 1783eb73a605SSam Leffler * doing the callback as the cryptop is 1784eb73a605SSam Leffler * likely to be reclaimed. 1785eb73a605SSam Leffler */ 1786eb73a605SSam Leffler struct bintime t = crp->crp_tstamp; 1787eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 1788eb73a605SSam Leffler crp->crp_callback(crp); 1789eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 1790eb73a605SSam Leffler } else 1791eb73a605SSam Leffler #endif 1792eb73a605SSam Leffler crp->crp_callback(crp); 1793eb73a605SSam Leffler } else { 179439bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 179539bbca6fSFabien Thomas bool wake; 179639bbca6fSFabien Thomas 179739bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 179839bbca6fSFabien Thomas wake = false; 179939bbca6fSFabien Thomas 1800eb73a605SSam Leffler /* 1801eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1802eb73a605SSam Leffler */ 180339bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 180439bbca6fSFabien Thomas if (CRYPTOP_ASYNC_KEEPORDER(crp)) { 180539bbca6fSFabien Thomas struct cryptop *tmp; 180639bbca6fSFabien Thomas 180739bbca6fSFabien Thomas TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, 180839bbca6fSFabien Thomas cryptop_q, crp_next) { 180939bbca6fSFabien Thomas if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 181039bbca6fSFabien Thomas TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, 181139bbca6fSFabien Thomas tmp, crp, crp_next); 181239bbca6fSFabien Thomas break; 181339bbca6fSFabien Thomas } 181439bbca6fSFabien Thomas } 181539bbca6fSFabien Thomas if (tmp == NULL) { 181639bbca6fSFabien Thomas TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, 181739bbca6fSFabien Thomas crp, crp_next); 181839bbca6fSFabien Thomas } 181939bbca6fSFabien Thomas 182039bbca6fSFabien Thomas if (crp->crp_seq == ret_worker->reorder_cur_seq) 182139bbca6fSFabien Thomas wake = true; 182239bbca6fSFabien Thomas } 182339bbca6fSFabien Thomas else { 182439bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 182539bbca6fSFabien Thomas wake = true; 182639bbca6fSFabien Thomas 182739bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); 182839bbca6fSFabien Thomas } 182939bbca6fSFabien Thomas 183039bbca6fSFabien Thomas if (wake) 183139bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 183239bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1833091d81d1SSam Leffler } 1834eb73a605SSam Leffler } 1835091d81d1SSam Leffler 1836091d81d1SSam Leffler /* 1837091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1838091d81d1SSam Leffler */ 1839091d81d1SSam Leffler void 1840091d81d1SSam Leffler crypto_kdone(struct cryptkop *krp) 1841091d81d1SSam Leffler { 184239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 18434acae0acSPawel Jakub Dawidek struct cryptocap *cap; 1844091d81d1SSam Leffler 18457d1853eeSSam Leffler if (krp->krp_status != 0) 18467d1853eeSSam Leffler cryptostats.cs_kerrs++; 18474acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 1848c0341432SJohn Baldwin cap = krp->krp_cap; 1849fb17b4c5SJohn-Mark Gurney KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); 18504acae0acSPawel Jakub Dawidek cap->cc_koperations--; 1851c0341432SJohn Baldwin if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 1852c0341432SJohn Baldwin wakeup(cap); 18534acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 1854c0341432SJohn Baldwin krp->krp_cap = NULL; 1855c0341432SJohn Baldwin cap_rele(cap); 185639bbca6fSFabien Thomas 185739bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 185839bbca6fSFabien Thomas 185939bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 186039bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 186139bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 186239bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); 186339bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1864091d81d1SSam Leffler } 1865091d81d1SSam Leffler 1866091d81d1SSam Leffler int 1867091d81d1SSam Leffler crypto_getfeat(int *featp) 1868091d81d1SSam Leffler { 1869091d81d1SSam Leffler int hid, kalg, feat = 0; 1870091d81d1SSam Leffler 1871091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1872c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 1873c0341432SJohn Baldwin const struct cryptocap *cap = crypto_drivers[hid]; 18746810ad6fSSam Leffler 1875c0341432SJohn Baldwin if (cap == NULL || 1876c0341432SJohn Baldwin ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1877c0341432SJohn Baldwin !crypto_devallowsoft)) { 1878091d81d1SSam Leffler continue; 1879091d81d1SSam Leffler } 1880091d81d1SSam Leffler for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 18816810ad6fSSam Leffler if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1882091d81d1SSam Leffler feat |= 1 << kalg; 1883091d81d1SSam Leffler } 1884091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1885091d81d1SSam Leffler *featp = feat; 1886091d81d1SSam Leffler return (0); 1887091d81d1SSam Leffler } 1888091d81d1SSam Leffler 188951e45326SSam Leffler /* 189051e45326SSam Leffler * Terminate a thread at module unload. The process that 189151e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 189251e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 189351e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 189451e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 189551e45326SSam Leffler * thread so it gets notified at the right time because of an 189651e45326SSam Leffler * extra wakeup that's done in exit1(). 189751e45326SSam Leffler */ 1898091d81d1SSam Leffler static void 189951e45326SSam Leffler crypto_finis(void *chan) 1900091d81d1SSam Leffler { 190151e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 190251e45326SSam Leffler wakeup_one(chan); 190351e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 19043745c395SJulian Elischer kproc_exit(0); 1905091d81d1SSam Leffler } 1906091d81d1SSam Leffler 1907091d81d1SSam Leffler /* 19081a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1909091d81d1SSam Leffler */ 1910091d81d1SSam Leffler static void 1911091d81d1SSam Leffler crypto_proc(void) 1912091d81d1SSam Leffler { 19131a91ccccSSam Leffler struct cryptop *crp, *submit; 19141a91ccccSSam Leffler struct cryptkop *krp; 1915091d81d1SSam Leffler struct cryptocap *cap; 1916091d81d1SSam Leffler int result, hint; 1917091d81d1SSam Leffler 19186ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 191904c49e68SKonstantin Belousov fpu_kern_thread(FPU_KERN_NORMAL); 192004c49e68SKonstantin Belousov #endif 192104c49e68SKonstantin Belousov 19221a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1923091d81d1SSam Leffler for (;;) { 1924091d81d1SSam Leffler /* 1925091d81d1SSam Leffler * Find the first element in the queue that can be 1926091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1927091d81d1SSam Leffler * are ready for the same driver. 1928091d81d1SSam Leffler */ 1929091d81d1SSam Leffler submit = NULL; 1930091d81d1SSam Leffler hint = 0; 1931091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 1932c0341432SJohn Baldwin cap = crp->crp_session->cap; 19334acae0acSPawel Jakub Dawidek /* 19344acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 19354acae0acSPawel Jakub Dawidek * session. 19364acae0acSPawel Jakub Dawidek */ 1937c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1938c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1939c0341432SJohn Baldwin if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1940091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1941091d81d1SSam Leffler if (submit == NULL) 1942091d81d1SSam Leffler submit = crp; 1943091d81d1SSam Leffler break; 1944091d81d1SSam Leffler } 1945091d81d1SSam Leffler if (!cap->cc_qblocked) { 1946091d81d1SSam Leffler if (submit != NULL) { 1947091d81d1SSam Leffler /* 1948091d81d1SSam Leffler * We stop on finding another op, 1949091d81d1SSam Leffler * regardless whether its for the same 1950091d81d1SSam Leffler * driver or not. We could keep 1951091d81d1SSam Leffler * searching the queue but it might be 1952091d81d1SSam Leffler * better to just use a per-driver 1953091d81d1SSam Leffler * queue instead. 1954091d81d1SSam Leffler */ 1955c0341432SJohn Baldwin if (submit->crp_session->cap == cap) 1956091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1957091d81d1SSam Leffler break; 1958091d81d1SSam Leffler } else { 1959091d81d1SSam Leffler submit = crp; 1960eb73a605SSam Leffler if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1961091d81d1SSam Leffler break; 1962091d81d1SSam Leffler /* keep scanning for more are q'd */ 1963091d81d1SSam Leffler } 1964091d81d1SSam Leffler } 1965091d81d1SSam Leffler } 1966091d81d1SSam Leffler if (submit != NULL) { 1967091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 1968c0341432SJohn Baldwin cap = submit->crp_session->cap; 1969c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1970c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1971c0341432SJohn Baldwin CRYPTO_Q_UNLOCK(); 19724acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1973c0341432SJohn Baldwin CRYPTO_Q_LOCK(); 1974091d81d1SSam Leffler if (result == ERESTART) { 1975091d81d1SSam Leffler /* 1976091d81d1SSam Leffler * The driver ran out of resources, mark the 1977091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1978091d81d1SSam Leffler * the request back in the queue. It would 1979091d81d1SSam Leffler * best to put the request back where we got 1980091d81d1SSam Leffler * it but that's hard so for now we put it 1981091d81d1SSam Leffler * at the front. This should be ok; putting 1982091d81d1SSam Leffler * it at the end does not work. 1983091d81d1SSam Leffler */ 1984c0341432SJohn Baldwin cap->cc_qblocked = 1; 1985091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 19867d1853eeSSam Leffler cryptostats.cs_blocks++; 1987091d81d1SSam Leffler } 1988091d81d1SSam Leffler } 1989091d81d1SSam Leffler 1990091d81d1SSam Leffler /* As above, but for key ops */ 1991091d81d1SSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1992c0341432SJohn Baldwin cap = krp->krp_cap; 1993c0341432SJohn Baldwin if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 19946810ad6fSSam Leffler /* 1995c0341432SJohn Baldwin * Operation needs to be migrated, 1996c0341432SJohn Baldwin * clear krp_cap so a new driver is 1997c0341432SJohn Baldwin * selected. 19986810ad6fSSam Leffler */ 1999c0341432SJohn Baldwin krp->krp_cap = NULL; 2000c0341432SJohn Baldwin cap_rele(cap); 2001091d81d1SSam Leffler break; 2002091d81d1SSam Leffler } 2003091d81d1SSam Leffler if (!cap->cc_kqblocked) 2004091d81d1SSam Leffler break; 2005091d81d1SSam Leffler } 2006091d81d1SSam Leffler if (krp != NULL) { 2007091d81d1SSam Leffler TAILQ_REMOVE(&crp_kq, krp, krp_next); 2008c0341432SJohn Baldwin CRYPTO_Q_UNLOCK(); 2009c0341432SJohn Baldwin result = crypto_kinvoke(krp); 2010c0341432SJohn Baldwin CRYPTO_Q_LOCK(); 2011091d81d1SSam Leffler if (result == ERESTART) { 2012091d81d1SSam Leffler /* 2013091d81d1SSam Leffler * The driver ran out of resources, mark the 2014091d81d1SSam Leffler * driver ``blocked'' for cryptkop's and put 2015091d81d1SSam Leffler * the request back in the queue. It would 2016091d81d1SSam Leffler * best to put the request back where we got 2017091d81d1SSam Leffler * it but that's hard so for now we put it 2018091d81d1SSam Leffler * at the front. This should be ok; putting 2019091d81d1SSam Leffler * it at the end does not work. 2020091d81d1SSam Leffler */ 2021c0341432SJohn Baldwin krp->krp_cap->cc_kqblocked = 1; 2022091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 20237d1853eeSSam Leffler cryptostats.cs_kblocks++; 2024091d81d1SSam Leffler } 2025091d81d1SSam Leffler } 2026091d81d1SSam Leffler 20271a91ccccSSam Leffler if (submit == NULL && krp == NULL) { 2028091d81d1SSam Leffler /* 2029091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 2030091d81d1SSam Leffler * woken because there are more ops to process. 2031091d81d1SSam Leffler * This happens either by submission or by a driver 2032091d81d1SSam Leffler * becoming unblocked and notifying us through 2033091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 2034091d81d1SSam Leffler * start processing each queue again from the 2035091d81d1SSam Leffler * front. It's not clear that it's important to 2036091d81d1SSam Leffler * preserve this ordering since ops may finish 2037091d81d1SSam Leffler * out of order if dispatched to different devices 2038091d81d1SSam Leffler * and some become blocked while others do not. 2039091d81d1SSam Leffler */ 20403a865c82SPawel Jakub Dawidek crp_sleep = 1; 20411a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 20423a865c82SPawel Jakub Dawidek crp_sleep = 0; 204351e45326SSam Leffler if (cryptoproc == NULL) 204451e45326SSam Leffler break; 20457d1853eeSSam Leffler cryptostats.cs_intrs++; 2046091d81d1SSam Leffler } 2047091d81d1SSam Leffler } 204851e45326SSam Leffler CRYPTO_Q_UNLOCK(); 20491a91ccccSSam Leffler 205051e45326SSam Leffler crypto_finis(&crp_q); 20511a91ccccSSam Leffler } 20521a91ccccSSam Leffler 20531a91ccccSSam Leffler /* 20541a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 20551a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 20561a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 20571a91ccccSSam Leffler */ 20581a91ccccSSam Leffler static void 205939bbca6fSFabien Thomas crypto_ret_proc(struct crypto_ret_worker *ret_worker) 20601a91ccccSSam Leffler { 20611a91ccccSSam Leffler struct cryptop *crpt; 20621a91ccccSSam Leffler struct cryptkop *krpt; 20631a91ccccSSam Leffler 206439bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 20651a91ccccSSam Leffler for (;;) { 20661a91ccccSSam Leffler /* Harvest return q's for completed ops */ 206739bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 206839bbca6fSFabien Thomas if (crpt != NULL) { 206939bbca6fSFabien Thomas if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 207039bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 207139bbca6fSFabien Thomas ret_worker->reorder_cur_seq++; 207239bbca6fSFabien Thomas } else { 207339bbca6fSFabien Thomas crpt = NULL; 207439bbca6fSFabien Thomas } 207539bbca6fSFabien Thomas } 20761a91ccccSSam Leffler 207739bbca6fSFabien Thomas if (crpt == NULL) { 207839bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 207939bbca6fSFabien Thomas if (crpt != NULL) 208039bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 208139bbca6fSFabien Thomas } 208239bbca6fSFabien Thomas 208339bbca6fSFabien Thomas krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); 20841a91ccccSSam Leffler if (krpt != NULL) 208539bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); 20861a91ccccSSam Leffler 20871a91ccccSSam Leffler if (crpt != NULL || krpt != NULL) { 208839bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 20891a91ccccSSam Leffler /* 20901a91ccccSSam Leffler * Run callbacks unlocked. 20911a91ccccSSam Leffler */ 20927d1853eeSSam Leffler if (crpt != NULL) { 20937d1853eeSSam Leffler #ifdef CRYPTO_TIMING 20947d1853eeSSam Leffler if (crypto_timing) { 20957d1853eeSSam Leffler /* 20967d1853eeSSam Leffler * NB: We must copy the timestamp before 20977d1853eeSSam Leffler * doing the callback as the cryptop is 20987d1853eeSSam Leffler * likely to be reclaimed. 20997d1853eeSSam Leffler */ 21007d1853eeSSam Leffler struct bintime t = crpt->crp_tstamp; 21017d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 21021a91ccccSSam Leffler crpt->crp_callback(crpt); 21037d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 21047d1853eeSSam Leffler } else 21057d1853eeSSam Leffler #endif 21067d1853eeSSam Leffler crpt->crp_callback(crpt); 21077d1853eeSSam Leffler } 21081a91ccccSSam Leffler if (krpt != NULL) 21091a91ccccSSam Leffler krpt->krp_callback(krpt); 211039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 21111a91ccccSSam Leffler } else { 21121a91ccccSSam Leffler /* 21131a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 21141a91ccccSSam Leffler * woken because there are more returns to process. 21151a91ccccSSam Leffler */ 211639bbca6fSFabien Thomas msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 21171a91ccccSSam Leffler "crypto_ret_wait", 0); 211839bbca6fSFabien Thomas if (ret_worker->cryptoretproc == NULL) 211951e45326SSam Leffler break; 21207d1853eeSSam Leffler cryptostats.cs_rets++; 21211a91ccccSSam Leffler } 21221a91ccccSSam Leffler } 212339bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 212451e45326SSam Leffler 212539bbca6fSFabien Thomas crypto_finis(&ret_worker->crp_ret_q); 21261a91ccccSSam Leffler } 21276810ad6fSSam Leffler 21286810ad6fSSam Leffler #ifdef DDB 21296810ad6fSSam Leffler static void 21306810ad6fSSam Leffler db_show_drivers(void) 21316810ad6fSSam Leffler { 21326810ad6fSSam Leffler int hid; 21336810ad6fSSam Leffler 21346810ad6fSSam Leffler db_printf("%12s %4s %4s %8s %2s %2s\n" 21356810ad6fSSam Leffler , "Device" 21366810ad6fSSam Leffler , "Ses" 21376810ad6fSSam Leffler , "Kops" 21386810ad6fSSam Leffler , "Flags" 21396810ad6fSSam Leffler , "QB" 21406810ad6fSSam Leffler , "KB" 21416810ad6fSSam Leffler ); 2142c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 2143c0341432SJohn Baldwin const struct cryptocap *cap = crypto_drivers[hid]; 2144c0341432SJohn Baldwin if (cap == NULL) 21456810ad6fSSam Leffler continue; 21466810ad6fSSam Leffler db_printf("%-12s %4u %4u %08x %2u %2u\n" 21476810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 21486810ad6fSSam Leffler , cap->cc_sessions 21496810ad6fSSam Leffler , cap->cc_koperations 21506810ad6fSSam Leffler , cap->cc_flags 21516810ad6fSSam Leffler , cap->cc_qblocked 21526810ad6fSSam Leffler , cap->cc_kqblocked 21536810ad6fSSam Leffler ); 21546810ad6fSSam Leffler } 21556810ad6fSSam Leffler } 21566810ad6fSSam Leffler 21576810ad6fSSam Leffler DB_SHOW_COMMAND(crypto, db_show_crypto) 21586810ad6fSSam Leffler { 21596810ad6fSSam Leffler struct cryptop *crp; 216039bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 21616810ad6fSSam Leffler 21626810ad6fSSam Leffler db_show_drivers(); 21636810ad6fSSam Leffler db_printf("\n"); 21646810ad6fSSam Leffler 21656810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 21666810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 2167c0341432SJohn Baldwin "Device", "Callback"); 21686810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 21696810ad6fSSam Leffler db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" 2170c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 21711b0909d5SConrad Meyer , (int) crypto_ses2caps(crp->crp_session) 21726810ad6fSSam Leffler , crp->crp_ilen, crp->crp_olen 21736810ad6fSSam Leffler , crp->crp_etype 21746810ad6fSSam Leffler , crp->crp_flags 2175c0341432SJohn Baldwin , device_get_nameunit(crp->crp_session->cap->cc_dev) 21766810ad6fSSam Leffler , crp->crp_callback 21776810ad6fSSam Leffler ); 21786810ad6fSSam Leffler } 217939bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 218039bbca6fSFabien Thomas db_printf("\n%8s %4s %4s %4s %8s\n", 218139bbca6fSFabien Thomas "ret_worker", "HID", "Etype", "Flags", "Callback"); 218239bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 218339bbca6fSFabien Thomas TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 218439bbca6fSFabien Thomas db_printf("%8td %4u %4u %04x %8p\n" 218539bbca6fSFabien Thomas , CRYPTO_RETW_ID(ret_worker) 2186c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 21876810ad6fSSam Leffler , crp->crp_etype 21886810ad6fSSam Leffler , crp->crp_flags 21896810ad6fSSam Leffler , crp->crp_callback 21906810ad6fSSam Leffler ); 21916810ad6fSSam Leffler } 21926810ad6fSSam Leffler } 21936810ad6fSSam Leffler } 219439bbca6fSFabien Thomas } 21956810ad6fSSam Leffler 21966810ad6fSSam Leffler DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 21976810ad6fSSam Leffler { 21986810ad6fSSam Leffler struct cryptkop *krp; 219939bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 22006810ad6fSSam Leffler 22016810ad6fSSam Leffler db_show_drivers(); 22026810ad6fSSam Leffler db_printf("\n"); 22036810ad6fSSam Leffler 22046810ad6fSSam Leffler db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 22056810ad6fSSam Leffler "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 22066810ad6fSSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 22076810ad6fSSam Leffler db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 22086810ad6fSSam Leffler , krp->krp_op 22096810ad6fSSam Leffler , krp->krp_status 22106810ad6fSSam Leffler , krp->krp_iparams, krp->krp_oparams 22116810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 22126810ad6fSSam Leffler , krp->krp_callback 22136810ad6fSSam Leffler ); 22146810ad6fSSam Leffler } 221539bbca6fSFabien Thomas 221639bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 221739bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 22186810ad6fSSam Leffler db_printf("%4s %5s %8s %4s %8s\n", 22196810ad6fSSam Leffler "Op", "Status", "CRID", "HID", "Callback"); 222039bbca6fSFabien Thomas TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { 22216810ad6fSSam Leffler db_printf("%4u %5u %08x %4u %8p\n" 22226810ad6fSSam Leffler , krp->krp_op 22236810ad6fSSam Leffler , krp->krp_status 22246810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 22256810ad6fSSam Leffler , krp->krp_callback 22266810ad6fSSam Leffler ); 22276810ad6fSSam Leffler } 22286810ad6fSSam Leffler } 22296810ad6fSSam Leffler } 22306810ad6fSSam Leffler #endif 22316810ad6fSSam Leffler 22326810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 22336810ad6fSSam Leffler 22346810ad6fSSam Leffler /* 22356810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 22366810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 22376810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 22386810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 22396810ad6fSSam Leffler * calls on module load/unload. 22406810ad6fSSam Leffler */ 22416810ad6fSSam Leffler int 22426810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 22436810ad6fSSam Leffler { 22446810ad6fSSam Leffler int error = EINVAL; 22456810ad6fSSam Leffler 22466810ad6fSSam Leffler switch (type) { 22476810ad6fSSam Leffler case MOD_LOAD: 22486810ad6fSSam Leffler error = crypto_init(); 22496810ad6fSSam Leffler if (error == 0 && bootverbose) 22506810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 22516810ad6fSSam Leffler break; 22526810ad6fSSam Leffler case MOD_UNLOAD: 22536810ad6fSSam Leffler /*XXX disallow if active sessions */ 22546810ad6fSSam Leffler error = 0; 22556810ad6fSSam Leffler crypto_destroy(); 22566810ad6fSSam Leffler return 0; 22576810ad6fSSam Leffler } 22586810ad6fSSam Leffler return error; 22596810ad6fSSam Leffler } 22606810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 22616810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 2262