16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 36810ad6fSSam Leffler * 46810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 56810ad6fSSam Leffler * modification, are permitted provided that the following conditions 66810ad6fSSam Leffler * are met: 76810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 86810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 96810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 106810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 116810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 126810ad6fSSam Leffler * 136810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 146810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 156810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 166810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 176810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 186810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 196810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 206810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 216810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 226810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 236810ad6fSSam Leffler */ 246810ad6fSSam Leffler 256810ad6fSSam Leffler #include <sys/cdefs.h> 266810ad6fSSam Leffler __FBSDID("$FreeBSD$"); 276810ad6fSSam Leffler 286810ad6fSSam Leffler /* 296810ad6fSSam Leffler * Cryptographic Subsystem. 306810ad6fSSam Leffler * 316810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 326810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 336810ad6fSSam Leffler * code remains. 346810ad6fSSam Leffler */ 356810ad6fSSam Leffler 3660727d8bSWarner Losh /*- 37091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38091d81d1SSam Leffler * 39091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 40091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 41091d81d1SSam Leffler * supported the development of this code. 42091d81d1SSam Leffler * 43091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 46091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 47091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 48091d81d1SSam Leffler * modification of this software. 49091d81d1SSam Leffler * 50091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54091d81d1SSam Leffler * PURPOSE. 55091d81d1SSam Leffler */ 562c446514SDavid E. O'Brien 577d1853eeSSam Leffler #define CRYPTO_TIMING /* enable timing support */ 58091d81d1SSam Leffler 596810ad6fSSam Leffler #include "opt_ddb.h" 606810ad6fSSam Leffler 61091d81d1SSam Leffler #include <sys/param.h> 62091d81d1SSam Leffler #include <sys/systm.h> 63091d81d1SSam Leffler #include <sys/eventhandler.h> 64091d81d1SSam Leffler #include <sys/kernel.h> 65091d81d1SSam Leffler #include <sys/kthread.h> 66ec5c0e5bSAllan Jude #include <sys/linker.h> 67091d81d1SSam Leffler #include <sys/lock.h> 685dba30f1SPoul-Henning Kamp #include <sys/module.h> 69091d81d1SSam Leffler #include <sys/mutex.h> 70091d81d1SSam Leffler #include <sys/malloc.h> 71091d81d1SSam Leffler #include <sys/proc.h> 72df21ad6eSBjoern A. Zeeb #include <sys/sdt.h> 7339bbca6fSFabien Thomas #include <sys/smp.h> 74091d81d1SSam Leffler #include <sys/sysctl.h> 7539bbca6fSFabien Thomas #include <sys/taskqueue.h> 76091d81d1SSam Leffler 776810ad6fSSam Leffler #include <ddb/ddb.h> 786810ad6fSSam Leffler 79091d81d1SSam Leffler #include <vm/uma.h> 80ec5c0e5bSAllan Jude #include <crypto/intake.h> 81091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 821a91ccccSSam Leffler #include <opencrypto/xform.h> /* XXX for M_XDATA */ 83091d81d1SSam Leffler 846810ad6fSSam Leffler #include <sys/kobj.h> 856810ad6fSSam Leffler #include <sys/bus.h> 866810ad6fSSam Leffler #include "cryptodev_if.h" 876810ad6fSSam Leffler 886ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 8904c49e68SKonstantin Belousov #include <machine/pcb.h> 9004c49e68SKonstantin Belousov #endif 9104c49e68SKonstantin Belousov 92*1b0909d5SConrad Meyer struct crypto_session { 93*1b0909d5SConrad Meyer device_t parent; 94*1b0909d5SConrad Meyer void *softc; 95*1b0909d5SConrad Meyer uint32_t hid; 96*1b0909d5SConrad Meyer uint32_t capabilities; 97*1b0909d5SConrad Meyer }; 98*1b0909d5SConrad Meyer 99df21ad6eSBjoern A. Zeeb SDT_PROVIDER_DEFINE(opencrypto); 100df21ad6eSBjoern A. Zeeb 101091d81d1SSam Leffler /* 102091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 103091d81d1SSam Leffler * crypto_drivers table with crypto_get_driverid() and then registering 104091d81d1SSam Leffler * each algorithm they support with crypto_register() and crypto_kregister(). 105091d81d1SSam Leffler */ 106091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 107091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 108091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 1096810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 1106810ad6fSSam Leffler 1116810ad6fSSam Leffler /* 1126810ad6fSSam Leffler * Crypto device/driver capabilities structure. 1136810ad6fSSam Leffler * 1146810ad6fSSam Leffler * Synchronization: 1156810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 1166810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 1176810ad6fSSam Leffler * Not tagged fields are read-only. 1186810ad6fSSam Leffler */ 1196810ad6fSSam Leffler struct cryptocap { 1206810ad6fSSam Leffler device_t cc_dev; /* (d) device/driver */ 1216810ad6fSSam Leffler u_int32_t cc_sessions; /* (d) # of sessions */ 1226810ad6fSSam Leffler u_int32_t cc_koperations; /* (d) # os asym operations */ 1236810ad6fSSam Leffler /* 1246810ad6fSSam Leffler * Largest possible operator length (in bits) for each type of 1256810ad6fSSam Leffler * encryption algorithm. XXX not used 1266810ad6fSSam Leffler */ 1276810ad6fSSam Leffler u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; 1286810ad6fSSam Leffler u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; 1296810ad6fSSam Leffler u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 1306810ad6fSSam Leffler 1316810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1326810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1336810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1346810ad6fSSam Leffler int cc_kqblocked; /* (q) asymmetric q blocked */ 135*1b0909d5SConrad Meyer size_t cc_session_size; 1366810ad6fSSam Leffler }; 137091d81d1SSam Leffler static struct cryptocap *crypto_drivers = NULL; 138091d81d1SSam Leffler static int crypto_drivers_num = 0; 139091d81d1SSam Leffler 140091d81d1SSam Leffler /* 141091d81d1SSam Leffler * There are two queues for crypto requests; one for symmetric (e.g. 142091d81d1SSam Leffler * cipher) operations and one for asymmetric (e.g. MOD)operations. 143091d81d1SSam Leffler * A single mutex is used to lock access to both queues. We could 144091d81d1SSam Leffler * have one per-queue but having one simplifies handling of block/unblock 145091d81d1SSam Leffler * operations. 146091d81d1SSam Leffler */ 1473a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 14839bbca6fSFabien Thomas static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 149091d81d1SSam Leffler static TAILQ_HEAD(,cryptkop) crp_kq; 150091d81d1SSam Leffler static struct mtx crypto_q_mtx; 151091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 152091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 153091d81d1SSam Leffler 154091d81d1SSam Leffler /* 15539bbca6fSFabien Thomas * Taskqueue used to dispatch the crypto requests 15639bbca6fSFabien Thomas * that have the CRYPTO_F_ASYNC flag 157091d81d1SSam Leffler */ 15839bbca6fSFabien Thomas static struct taskqueue *crypto_tq; 15939bbca6fSFabien Thomas 16039bbca6fSFabien Thomas /* 16139bbca6fSFabien Thomas * Crypto seq numbers are operated on with modular arithmetic 16239bbca6fSFabien Thomas */ 16339bbca6fSFabien Thomas #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 16439bbca6fSFabien Thomas 16539bbca6fSFabien Thomas struct crypto_ret_worker { 16639bbca6fSFabien Thomas struct mtx crypto_ret_mtx; 16739bbca6fSFabien Thomas 16839bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 16939bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 17039bbca6fSFabien Thomas TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ 17139bbca6fSFabien Thomas 17239bbca6fSFabien Thomas u_int32_t reorder_ops; /* total ordered sym jobs received */ 17339bbca6fSFabien Thomas u_int32_t reorder_cur_seq; /* current sym job dispatched */ 17439bbca6fSFabien Thomas 17539bbca6fSFabien Thomas struct proc *cryptoretproc; 17639bbca6fSFabien Thomas }; 17739bbca6fSFabien Thomas static struct crypto_ret_worker *crypto_ret_workers = NULL; 17839bbca6fSFabien Thomas 17939bbca6fSFabien Thomas #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 18039bbca6fSFabien Thomas #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 18139bbca6fSFabien Thomas #define FOREACH_CRYPTO_RETW(w) \ 18239bbca6fSFabien Thomas for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 18339bbca6fSFabien Thomas 18439bbca6fSFabien Thomas #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 18539bbca6fSFabien Thomas #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 18639bbca6fSFabien Thomas #define CRYPTO_RETW_EMPTY(w) \ 18739bbca6fSFabien Thomas (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) 18839bbca6fSFabien Thomas 18939bbca6fSFabien Thomas static int crypto_workers_num = 0; 19039bbca6fSFabien Thomas SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 19139bbca6fSFabien Thomas &crypto_workers_num, 0, 19239bbca6fSFabien Thomas "Number of crypto workers used to dispatch crypto jobs"); 193091d81d1SSam Leffler 194091d81d1SSam Leffler static uma_zone_t cryptop_zone; 195091d81d1SSam Leffler static uma_zone_t cryptodesc_zone; 196*1b0909d5SConrad Meyer static uma_zone_t cryptoses_zone; 197091d81d1SSam Leffler 198091d81d1SSam Leffler int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 199091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 200091d81d1SSam Leffler &crypto_userasymcrypto, 0, 201091d81d1SSam Leffler "Enable/disable user-mode access to asymmetric crypto support"); 2026c20d7a3SJohn-Mark Gurney int crypto_devallowsoft = 0; /* only use hardware crypto */ 203091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 204091d81d1SSam Leffler &crypto_devallowsoft, 0, 2056c20d7a3SJohn-Mark Gurney "Enable/disable use of software crypto by /dev/crypto"); 206091d81d1SSam Leffler 207091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 208091d81d1SSam Leffler 20951e45326SSam Leffler static void crypto_proc(void); 21051e45326SSam Leffler static struct proc *cryptoproc; 21139bbca6fSFabien Thomas static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 21251e45326SSam Leffler static void crypto_destroy(void); 2134acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 2146810ad6fSSam Leffler static int crypto_kinvoke(struct cryptkop *krp, int flags); 215*1b0909d5SConrad Meyer static void crypto_remove(struct cryptocap *cap); 21639bbca6fSFabien Thomas static void crypto_task_invoke(void *ctx, int pending); 21739bbca6fSFabien Thomas static void crypto_batch_enqueue(struct cryptop *crp); 21851e45326SSam Leffler 2197d1853eeSSam Leffler static struct cryptostats cryptostats; 2207d1853eeSSam Leffler SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, 2217d1853eeSSam Leffler cryptostats, "Crypto system statistics"); 2227d1853eeSSam Leffler 2237d1853eeSSam Leffler #ifdef CRYPTO_TIMING 2247d1853eeSSam Leffler static int crypto_timing = 0; 2257d1853eeSSam Leffler SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, 2267d1853eeSSam Leffler &crypto_timing, 0, "Enable/disable crypto timing support"); 2277d1853eeSSam Leffler #endif 2287d1853eeSSam Leffler 229ec5c0e5bSAllan Jude /* Try to avoid directly exposing the key buffer as a symbol */ 230ec5c0e5bSAllan Jude static struct keybuf *keybuf; 231ec5c0e5bSAllan Jude 232ec5c0e5bSAllan Jude static struct keybuf empty_keybuf = { 233ec5c0e5bSAllan Jude .kb_nents = 0 234ec5c0e5bSAllan Jude }; 235ec5c0e5bSAllan Jude 236ec5c0e5bSAllan Jude /* Obtain the key buffer from boot metadata */ 237ec5c0e5bSAllan Jude static void 238ec5c0e5bSAllan Jude keybuf_init(void) 239ec5c0e5bSAllan Jude { 240ec5c0e5bSAllan Jude caddr_t kmdp; 241ec5c0e5bSAllan Jude 242ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf kernel"); 243ec5c0e5bSAllan Jude 244ec5c0e5bSAllan Jude if (kmdp == NULL) 245ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf64 kernel"); 246ec5c0e5bSAllan Jude 247ec5c0e5bSAllan Jude keybuf = (struct keybuf *)preload_search_info(kmdp, 248ec5c0e5bSAllan Jude MODINFO_METADATA | MODINFOMD_KEYBUF); 249ec5c0e5bSAllan Jude 250ec5c0e5bSAllan Jude if (keybuf == NULL) 251ec5c0e5bSAllan Jude keybuf = &empty_keybuf; 252ec5c0e5bSAllan Jude } 253ec5c0e5bSAllan Jude 254ec5c0e5bSAllan Jude /* It'd be nice if we could store these in some kind of secure memory... */ 255ec5c0e5bSAllan Jude struct keybuf * get_keybuf(void) { 256ec5c0e5bSAllan Jude 257ec5c0e5bSAllan Jude return (keybuf); 258ec5c0e5bSAllan Jude } 259ec5c0e5bSAllan Jude 26051e45326SSam Leffler static int 261091d81d1SSam Leffler crypto_init(void) 262091d81d1SSam Leffler { 26339bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 26451e45326SSam Leffler int error; 265091d81d1SSam Leffler 2663569ae7fSSam Leffler mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 2673569ae7fSSam Leffler MTX_DEF|MTX_QUIET); 268091d81d1SSam Leffler 269091d81d1SSam Leffler TAILQ_INIT(&crp_q); 270091d81d1SSam Leffler TAILQ_INIT(&crp_kq); 2713569ae7fSSam Leffler mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 272091d81d1SSam Leffler 27351e45326SSam Leffler cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 27451e45326SSam Leffler 0, 0, 0, 0, 27551e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 27651e45326SSam Leffler cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 27751e45326SSam Leffler 0, 0, 0, 0, 27851e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 279*1b0909d5SConrad Meyer cryptoses_zone = uma_zcreate("crypto_session", 280*1b0909d5SConrad Meyer sizeof(struct crypto_session), NULL, NULL, NULL, NULL, 281*1b0909d5SConrad Meyer UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 282*1b0909d5SConrad Meyer 283*1b0909d5SConrad Meyer if (cryptodesc_zone == NULL || cryptop_zone == NULL || 284*1b0909d5SConrad Meyer cryptoses_zone == NULL) { 28551e45326SSam Leffler printf("crypto_init: cannot setup crypto zones\n"); 28651e45326SSam Leffler error = ENOMEM; 28751e45326SSam Leffler goto bad; 28851e45326SSam Leffler } 28951e45326SSam Leffler 29051e45326SSam Leffler crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 29151e45326SSam Leffler crypto_drivers = malloc(crypto_drivers_num * 29251e45326SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 29351e45326SSam Leffler if (crypto_drivers == NULL) { 29451e45326SSam Leffler printf("crypto_init: cannot setup crypto drivers\n"); 29551e45326SSam Leffler error = ENOMEM; 29651e45326SSam Leffler goto bad; 29751e45326SSam Leffler } 29851e45326SSam Leffler 29939bbca6fSFabien Thomas if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 30039bbca6fSFabien Thomas crypto_workers_num = mp_ncpus; 30139bbca6fSFabien Thomas 30239bbca6fSFabien Thomas crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO, 30339bbca6fSFabien Thomas taskqueue_thread_enqueue, &crypto_tq); 30439bbca6fSFabien Thomas if (crypto_tq == NULL) { 30539bbca6fSFabien Thomas printf("crypto init: cannot setup crypto taskqueue\n"); 30639bbca6fSFabien Thomas error = ENOMEM; 30739bbca6fSFabien Thomas goto bad; 30839bbca6fSFabien Thomas } 30939bbca6fSFabien Thomas 31039bbca6fSFabien Thomas taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 31139bbca6fSFabien Thomas "crypto"); 31239bbca6fSFabien Thomas 3133745c395SJulian Elischer error = kproc_create((void (*)(void *)) crypto_proc, NULL, 31451e45326SSam Leffler &cryptoproc, 0, 0, "crypto"); 31551e45326SSam Leffler if (error) { 31651e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 31751e45326SSam Leffler error); 31851e45326SSam Leffler goto bad; 31951e45326SSam Leffler } 32051e45326SSam Leffler 32139bbca6fSFabien Thomas crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), 32239bbca6fSFabien Thomas M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 32339bbca6fSFabien Thomas if (crypto_ret_workers == NULL) { 32439bbca6fSFabien Thomas error = ENOMEM; 32539bbca6fSFabien Thomas printf("crypto_init: cannot allocate ret workers\n"); 32639bbca6fSFabien Thomas goto bad; 32739bbca6fSFabien Thomas } 32839bbca6fSFabien Thomas 32939bbca6fSFabien Thomas 33039bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 33139bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 33239bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_q); 33339bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_kq); 33439bbca6fSFabien Thomas 33539bbca6fSFabien Thomas ret_worker->reorder_ops = 0; 33639bbca6fSFabien Thomas ret_worker->reorder_cur_seq = 0; 33739bbca6fSFabien Thomas 33839bbca6fSFabien Thomas mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 33939bbca6fSFabien Thomas 34039bbca6fSFabien Thomas error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 34139bbca6fSFabien Thomas &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 34251e45326SSam Leffler if (error) { 34351e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 34451e45326SSam Leffler error); 34551e45326SSam Leffler goto bad; 34651e45326SSam Leffler } 34739bbca6fSFabien Thomas } 348ec5c0e5bSAllan Jude 349ec5c0e5bSAllan Jude keybuf_init(); 350ec5c0e5bSAllan Jude 35151e45326SSam Leffler return 0; 35251e45326SSam Leffler bad: 35351e45326SSam Leffler crypto_destroy(); 35451e45326SSam Leffler return error; 35551e45326SSam Leffler } 35651e45326SSam Leffler 35751e45326SSam Leffler /* 35851e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 35951e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 36051e45326SSam Leffler * are sure the threads have terminated before we release 36151e45326SSam Leffler * the data structures they use. See crypto_finis below 36251e45326SSam Leffler * for the other half of this song-and-dance. 36351e45326SSam Leffler */ 36451e45326SSam Leffler static void 36551e45326SSam Leffler crypto_terminate(struct proc **pp, void *q) 36651e45326SSam Leffler { 36751e45326SSam Leffler struct proc *p; 36851e45326SSam Leffler 36951e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 37051e45326SSam Leffler p = *pp; 37151e45326SSam Leffler *pp = NULL; 37251e45326SSam Leffler if (p) { 37351e45326SSam Leffler wakeup_one(q); 37451e45326SSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 37551e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 37651e45326SSam Leffler msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 37751e45326SSam Leffler PROC_UNLOCK(p); 37851e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 37951e45326SSam Leffler } 38051e45326SSam Leffler } 38151e45326SSam Leffler 38251e45326SSam Leffler static void 38351e45326SSam Leffler crypto_destroy(void) 38451e45326SSam Leffler { 38539bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 38639bbca6fSFabien Thomas 38751e45326SSam Leffler /* 38851e45326SSam Leffler * Terminate any crypto threads. 38951e45326SSam Leffler */ 39039bbca6fSFabien Thomas if (crypto_tq != NULL) 39139bbca6fSFabien Thomas taskqueue_drain_all(crypto_tq); 39251e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 39351e45326SSam Leffler crypto_terminate(&cryptoproc, &crp_q); 39439bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 39539bbca6fSFabien Thomas crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 39651e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 39751e45326SSam Leffler 39851e45326SSam Leffler /* XXX flush queues??? */ 39951e45326SSam Leffler 40051e45326SSam Leffler /* 40151e45326SSam Leffler * Reclaim dynamically allocated resources. 40251e45326SSam Leffler */ 40351e45326SSam Leffler if (crypto_drivers != NULL) 40451e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 40551e45326SSam Leffler 406*1b0909d5SConrad Meyer if (cryptoses_zone != NULL) 407*1b0909d5SConrad Meyer uma_zdestroy(cryptoses_zone); 40851e45326SSam Leffler if (cryptodesc_zone != NULL) 40951e45326SSam Leffler uma_zdestroy(cryptodesc_zone); 41051e45326SSam Leffler if (cryptop_zone != NULL) 41151e45326SSam Leffler uma_zdestroy(cryptop_zone); 41251e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 41339bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 41439bbca6fSFabien Thomas mtx_destroy(&ret_worker->crypto_ret_mtx); 41539bbca6fSFabien Thomas free(crypto_ret_workers, M_CRYPTO_DATA); 41639bbca6fSFabien Thomas if (crypto_tq != NULL) 41739bbca6fSFabien Thomas taskqueue_free(crypto_tq); 41851e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 419091d81d1SSam Leffler } 420f544a528SMark Murray 421*1b0909d5SConrad Meyer uint32_t 422*1b0909d5SConrad Meyer crypto_ses2hid(crypto_session_t crypto_session) 423*1b0909d5SConrad Meyer { 424*1b0909d5SConrad Meyer return (crypto_session->hid); 425*1b0909d5SConrad Meyer } 426*1b0909d5SConrad Meyer 427*1b0909d5SConrad Meyer uint32_t 428*1b0909d5SConrad Meyer crypto_ses2caps(crypto_session_t crypto_session) 429*1b0909d5SConrad Meyer { 430*1b0909d5SConrad Meyer return (crypto_session->capabilities); 431*1b0909d5SConrad Meyer } 432*1b0909d5SConrad Meyer 433*1b0909d5SConrad Meyer void * 434*1b0909d5SConrad Meyer crypto_get_driver_session(crypto_session_t crypto_session) 435*1b0909d5SConrad Meyer { 436*1b0909d5SConrad Meyer return (crypto_session->softc); 437*1b0909d5SConrad Meyer } 438*1b0909d5SConrad Meyer 4396810ad6fSSam Leffler static struct cryptocap * 4406810ad6fSSam Leffler crypto_checkdriver(u_int32_t hid) 4416810ad6fSSam Leffler { 4426810ad6fSSam Leffler if (crypto_drivers == NULL) 4436810ad6fSSam Leffler return NULL; 4446810ad6fSSam Leffler return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 4456810ad6fSSam Leffler } 4466810ad6fSSam Leffler 447f544a528SMark Murray /* 4486810ad6fSSam Leffler * Compare a driver's list of supported algorithms against another 4496810ad6fSSam Leffler * list; return non-zero if all algorithms are supported. 450f544a528SMark Murray */ 451f544a528SMark Murray static int 4526810ad6fSSam Leffler driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) 453f544a528SMark Murray { 4546810ad6fSSam Leffler const struct cryptoini *cr; 45551e45326SSam Leffler 4566810ad6fSSam Leffler /* See if all the algorithms are supported. */ 4576810ad6fSSam Leffler for (cr = cri; cr; cr = cr->cri_next) 4586810ad6fSSam Leffler if (cap->cc_alg[cr->cri_alg] == 0) 459f544a528SMark Murray return 0; 4606810ad6fSSam Leffler return 1; 461f544a528SMark Murray } 462f544a528SMark Murray 463091d81d1SSam Leffler /* 4646810ad6fSSam Leffler * Select a driver for a new session that supports the specified 4656810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 466091d81d1SSam Leffler * The algorithm we use here is pretty stupid; just use the 4676810ad6fSSam Leffler * first driver that supports all the algorithms we need. If there 4686810ad6fSSam Leffler * are multiple drivers we choose the driver with the fewest active 4696810ad6fSSam Leffler * sessions. We prefer hardware-backed drivers to software ones. 470091d81d1SSam Leffler * 471091d81d1SSam Leffler * XXX We need more smarts here (in real life too, but that's 472091d81d1SSam Leffler * XXX another story altogether). 473091d81d1SSam Leffler */ 4746810ad6fSSam Leffler static struct cryptocap * 4756810ad6fSSam Leffler crypto_select_driver(const struct cryptoini *cri, int flags) 4766810ad6fSSam Leffler { 4776810ad6fSSam Leffler struct cryptocap *cap, *best; 4786810ad6fSSam Leffler int match, hid; 4796810ad6fSSam Leffler 4806810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 481091d81d1SSam Leffler 482694e0113SPawel Jakub Dawidek /* 4836810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 484694e0113SPawel Jakub Dawidek */ 4856810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 4866810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 4876810ad6fSSam Leffler else 4886810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 4896810ad6fSSam Leffler best = NULL; 4906810ad6fSSam Leffler again: 491091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 492694e0113SPawel Jakub Dawidek cap = &crypto_drivers[hid]; 493091d81d1SSam Leffler /* 4946810ad6fSSam Leffler * If it's not initialized, is in the process of 4956810ad6fSSam Leffler * going away, or is not appropriate (hardware 4966810ad6fSSam Leffler * or software based on match), then skip. 497091d81d1SSam Leffler */ 4986810ad6fSSam Leffler if (cap->cc_dev == NULL || 4996810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 5006810ad6fSSam Leffler (cap->cc_flags & match) == 0) 501091d81d1SSam Leffler continue; 502091d81d1SSam Leffler 5036810ad6fSSam Leffler /* verify all the algorithms are supported. */ 5046810ad6fSSam Leffler if (driver_suitable(cap, cri)) { 5056810ad6fSSam Leffler if (best == NULL || 5066810ad6fSSam Leffler cap->cc_sessions < best->cc_sessions) 5076810ad6fSSam Leffler best = cap; 5086810ad6fSSam Leffler } 5096810ad6fSSam Leffler } 51008fca7a5SJohn-Mark Gurney if (best == NULL && match == CRYPTOCAP_F_HARDWARE && 51108fca7a5SJohn-Mark Gurney (flags & CRYPTOCAP_F_SOFTWARE)) { 5126810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 5136810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 5146810ad6fSSam Leffler goto again; 5156810ad6fSSam Leffler } 5166810ad6fSSam Leffler return best; 5176810ad6fSSam Leffler } 518091d81d1SSam Leffler 519694e0113SPawel Jakub Dawidek /* 5206810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 5216810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 5226810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 5236810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 524694e0113SPawel Jakub Dawidek */ 5256810ad6fSSam Leffler int 526*1b0909d5SConrad Meyer crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int crid) 5276810ad6fSSam Leffler { 528*1b0909d5SConrad Meyer crypto_session_t res; 529*1b0909d5SConrad Meyer void *softc_mem; 5306810ad6fSSam Leffler struct cryptocap *cap; 531*1b0909d5SConrad Meyer u_int32_t hid; 532*1b0909d5SConrad Meyer size_t softc_size; 5336810ad6fSSam Leffler int err; 5346810ad6fSSam Leffler 535*1b0909d5SConrad Meyer restart: 536*1b0909d5SConrad Meyer res = NULL; 537*1b0909d5SConrad Meyer softc_mem = NULL; 538*1b0909d5SConrad Meyer 5396810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 5406810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 541694e0113SPawel Jakub Dawidek /* 5426810ad6fSSam Leffler * Use specified driver; verify it is capable. 543694e0113SPawel Jakub Dawidek */ 5446810ad6fSSam Leffler cap = crypto_checkdriver(crid); 5456810ad6fSSam Leffler if (cap != NULL && !driver_suitable(cap, cri)) 546694e0113SPawel Jakub Dawidek cap = NULL; 5476810ad6fSSam Leffler } else { 5486810ad6fSSam Leffler /* 5496810ad6fSSam Leffler * No requested driver; select based on crid flags. 5506810ad6fSSam Leffler */ 5516810ad6fSSam Leffler cap = crypto_select_driver(cri, crid); 5526810ad6fSSam Leffler /* 5536810ad6fSSam Leffler * if NULL then can't do everything in one session. 5546810ad6fSSam Leffler * XXX Fix this. We need to inject a "virtual" session 5556810ad6fSSam Leffler * XXX layer right about here. 5566810ad6fSSam Leffler */ 557694e0113SPawel Jakub Dawidek } 558*1b0909d5SConrad Meyer if (cap == NULL) { 55908fca7a5SJohn-Mark Gurney CRYPTDEB("no driver"); 560a317fb03SConrad Meyer err = EOPNOTSUPP; 561*1b0909d5SConrad Meyer goto out; 56208fca7a5SJohn-Mark Gurney } 563*1b0909d5SConrad Meyer cap->cc_sessions++; 564*1b0909d5SConrad Meyer softc_size = cap->cc_session_size; 565*1b0909d5SConrad Meyer hid = cap - crypto_drivers; 566*1b0909d5SConrad Meyer cap = NULL; 567091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 568*1b0909d5SConrad Meyer 569*1b0909d5SConrad Meyer softc_mem = malloc(softc_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); 570*1b0909d5SConrad Meyer res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); 571*1b0909d5SConrad Meyer res->softc = softc_mem; 572*1b0909d5SConrad Meyer 573*1b0909d5SConrad Meyer CRYPTO_DRIVER_LOCK(); 574*1b0909d5SConrad Meyer cap = crypto_checkdriver(hid); 575*1b0909d5SConrad Meyer if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0) { 576*1b0909d5SConrad Meyer cap->cc_sessions--; 577*1b0909d5SConrad Meyer crypto_remove(cap); 578*1b0909d5SConrad Meyer cap = NULL; 579*1b0909d5SConrad Meyer } 580*1b0909d5SConrad Meyer if (cap == NULL) { 581*1b0909d5SConrad Meyer free(softc_mem, M_CRYPTO_DATA); 582*1b0909d5SConrad Meyer uma_zfree(cryptoses_zone, res); 583*1b0909d5SConrad Meyer CRYPTO_DRIVER_UNLOCK(); 584*1b0909d5SConrad Meyer goto restart; 585*1b0909d5SConrad Meyer } 586*1b0909d5SConrad Meyer 587*1b0909d5SConrad Meyer /* Call the driver initialization routine. */ 588*1b0909d5SConrad Meyer err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, cri); 589*1b0909d5SConrad Meyer if (err != 0) { 590*1b0909d5SConrad Meyer CRYPTDEB("dev newsession failed: %d", err); 591*1b0909d5SConrad Meyer goto out; 592*1b0909d5SConrad Meyer } 593*1b0909d5SConrad Meyer 594*1b0909d5SConrad Meyer res->capabilities = cap->cc_flags & 0xff000000; 595*1b0909d5SConrad Meyer res->hid = hid; 596*1b0909d5SConrad Meyer *cses = res; 597*1b0909d5SConrad Meyer 598*1b0909d5SConrad Meyer out: 599*1b0909d5SConrad Meyer CRYPTO_DRIVER_UNLOCK(); 600*1b0909d5SConrad Meyer if (err != 0) { 601*1b0909d5SConrad Meyer free(softc_mem, M_CRYPTO_DATA); 602*1b0909d5SConrad Meyer if (res != NULL) 603*1b0909d5SConrad Meyer uma_zfree(cryptoses_zone, res); 604*1b0909d5SConrad Meyer } 605091d81d1SSam Leffler return err; 606091d81d1SSam Leffler } 607091d81d1SSam Leffler 6084acae0acSPawel Jakub Dawidek static void 6094acae0acSPawel Jakub Dawidek crypto_remove(struct cryptocap *cap) 6104acae0acSPawel Jakub Dawidek { 6114acae0acSPawel Jakub Dawidek 6124acae0acSPawel Jakub Dawidek mtx_assert(&crypto_drivers_mtx, MA_OWNED); 6134acae0acSPawel Jakub Dawidek if (cap->cc_sessions == 0 && cap->cc_koperations == 0) 6144acae0acSPawel Jakub Dawidek bzero(cap, sizeof(*cap)); 6154acae0acSPawel Jakub Dawidek } 6164acae0acSPawel Jakub Dawidek 617091d81d1SSam Leffler /* 618091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 619091d81d1SSam Leffler * driver). 620091d81d1SSam Leffler */ 621*1b0909d5SConrad Meyer void 622*1b0909d5SConrad Meyer crypto_freesession(crypto_session_t cses) 623091d81d1SSam Leffler { 6244acae0acSPawel Jakub Dawidek struct cryptocap *cap; 625*1b0909d5SConrad Meyer void *ses; 626*1b0909d5SConrad Meyer size_t ses_size; 627091d81d1SSam Leffler u_int32_t hid; 628*1b0909d5SConrad Meyer 629*1b0909d5SConrad Meyer if (cses == NULL) 630*1b0909d5SConrad Meyer return; 631091d81d1SSam Leffler 632091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 633091d81d1SSam Leffler 634*1b0909d5SConrad Meyer hid = crypto_ses2hid(cses); 635*1b0909d5SConrad Meyer KASSERT(hid < crypto_drivers_num, 636*1b0909d5SConrad Meyer ("bogus crypto_session %p hid %u", cses, hid)); 6374acae0acSPawel Jakub Dawidek cap = &crypto_drivers[hid]; 638091d81d1SSam Leffler 639*1b0909d5SConrad Meyer ses = cses->softc; 640*1b0909d5SConrad Meyer ses_size = cap->cc_session_size; 641*1b0909d5SConrad Meyer 6424acae0acSPawel Jakub Dawidek if (cap->cc_sessions) 6434acae0acSPawel Jakub Dawidek cap->cc_sessions--; 644091d81d1SSam Leffler 645091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 646*1b0909d5SConrad Meyer CRYPTODEV_FREESESSION(cap->cc_dev, cses); 647*1b0909d5SConrad Meyer 648*1b0909d5SConrad Meyer explicit_bzero(ses, ses_size); 649*1b0909d5SConrad Meyer free(ses, M_CRYPTO_DATA); 650*1b0909d5SConrad Meyer uma_zfree(cryptoses_zone, cses); 651091d81d1SSam Leffler 6524acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 6534acae0acSPawel Jakub Dawidek crypto_remove(cap); 654091d81d1SSam Leffler 655091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 656091d81d1SSam Leffler } 657091d81d1SSam Leffler 658091d81d1SSam Leffler /* 659091d81d1SSam Leffler * Return an unused driver id. Used by drivers prior to registering 660091d81d1SSam Leffler * support for the algorithms they handle. 661091d81d1SSam Leffler */ 662091d81d1SSam Leffler int32_t 663*1b0909d5SConrad Meyer crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 664091d81d1SSam Leffler { 665091d81d1SSam Leffler struct cryptocap *newdrv; 666091d81d1SSam Leffler int i; 667091d81d1SSam Leffler 6686810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 6696810ad6fSSam Leffler printf("%s: no flags specified when registering driver\n", 6706810ad6fSSam Leffler device_get_nameunit(dev)); 6716810ad6fSSam Leffler return -1; 6726810ad6fSSam Leffler } 6736810ad6fSSam Leffler 674091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 675091d81d1SSam Leffler 6764acae0acSPawel Jakub Dawidek for (i = 0; i < crypto_drivers_num; i++) { 6776810ad6fSSam Leffler if (crypto_drivers[i].cc_dev == NULL && 6784acae0acSPawel Jakub Dawidek (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 679091d81d1SSam Leffler break; 6804acae0acSPawel Jakub Dawidek } 6814acae0acSPawel Jakub Dawidek } 682091d81d1SSam Leffler 683091d81d1SSam Leffler /* Out of entries, allocate some more. */ 684091d81d1SSam Leffler if (i == crypto_drivers_num) { 685091d81d1SSam Leffler /* Be careful about wrap-around. */ 686091d81d1SSam Leffler if (2 * crypto_drivers_num <= crypto_drivers_num) { 687091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 688091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 689091d81d1SSam Leffler return -1; 690091d81d1SSam Leffler } 691091d81d1SSam Leffler 692091d81d1SSam Leffler newdrv = malloc(2 * crypto_drivers_num * 693091d81d1SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 694091d81d1SSam Leffler if (newdrv == NULL) { 695091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 696091d81d1SSam Leffler printf("crypto: no space to expand driver table!\n"); 697091d81d1SSam Leffler return -1; 698091d81d1SSam Leffler } 699091d81d1SSam Leffler 700091d81d1SSam Leffler bcopy(crypto_drivers, newdrv, 701091d81d1SSam Leffler crypto_drivers_num * sizeof(struct cryptocap)); 702091d81d1SSam Leffler 703091d81d1SSam Leffler crypto_drivers_num *= 2; 704091d81d1SSam Leffler 705091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 706091d81d1SSam Leffler crypto_drivers = newdrv; 707091d81d1SSam Leffler } 708091d81d1SSam Leffler 709091d81d1SSam Leffler /* NB: state is zero'd on free */ 710091d81d1SSam Leffler crypto_drivers[i].cc_sessions = 1; /* Mark */ 7116810ad6fSSam Leffler crypto_drivers[i].cc_dev = dev; 712091d81d1SSam Leffler crypto_drivers[i].cc_flags = flags; 713*1b0909d5SConrad Meyer crypto_drivers[i].cc_session_size = sessionsize; 714091d81d1SSam Leffler if (bootverbose) 715d7d2f0d4SConrad Meyer printf("crypto: assign %s driver id %u, flags 0x%x\n", 7166810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 717091d81d1SSam Leffler 718091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 719091d81d1SSam Leffler 720091d81d1SSam Leffler return i; 721091d81d1SSam Leffler } 722091d81d1SSam Leffler 7236810ad6fSSam Leffler /* 7246810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 7256810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 7266810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 7276810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 7286810ad6fSSam Leffler */ 7296810ad6fSSam Leffler int 7306810ad6fSSam Leffler crypto_find_driver(const char *match) 731091d81d1SSam Leffler { 7326810ad6fSSam Leffler int i, len = strlen(match); 7336810ad6fSSam Leffler 7346810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 7356810ad6fSSam Leffler for (i = 0; i < crypto_drivers_num; i++) { 7366810ad6fSSam Leffler device_t dev = crypto_drivers[i].cc_dev; 7376810ad6fSSam Leffler if (dev == NULL || 7386810ad6fSSam Leffler (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) 7396810ad6fSSam Leffler continue; 7406810ad6fSSam Leffler if (strncmp(match, device_get_nameunit(dev), len) == 0 || 7416810ad6fSSam Leffler strncmp(match, device_get_name(dev), len) == 0) 7426810ad6fSSam Leffler break; 7436810ad6fSSam Leffler } 7446810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 7456810ad6fSSam Leffler return i < crypto_drivers_num ? i : -1; 7466810ad6fSSam Leffler } 7476810ad6fSSam Leffler 7486810ad6fSSam Leffler /* 7496810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 7506810ad6fSSam Leffler * if the driver identifier is invalid. 7516810ad6fSSam Leffler */ 7526810ad6fSSam Leffler device_t 7536810ad6fSSam Leffler crypto_find_device_byhid(int hid) 7546810ad6fSSam Leffler { 7556810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 7566810ad6fSSam Leffler return cap != NULL ? cap->cc_dev : NULL; 7576810ad6fSSam Leffler } 7586810ad6fSSam Leffler 7596810ad6fSSam Leffler /* 7606810ad6fSSam Leffler * Return the device/driver capabilities. 7616810ad6fSSam Leffler */ 7626810ad6fSSam Leffler int 7636810ad6fSSam Leffler crypto_getcaps(int hid) 7646810ad6fSSam Leffler { 7656810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 7666810ad6fSSam Leffler return cap != NULL ? cap->cc_flags : 0; 767091d81d1SSam Leffler } 768091d81d1SSam Leffler 769091d81d1SSam Leffler /* 770091d81d1SSam Leffler * Register support for a key-related algorithm. This routine 771091d81d1SSam Leffler * is called once for each algorithm supported a driver. 772091d81d1SSam Leffler */ 773091d81d1SSam Leffler int 7746810ad6fSSam Leffler crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) 775091d81d1SSam Leffler { 776091d81d1SSam Leffler struct cryptocap *cap; 777091d81d1SSam Leffler int err; 778091d81d1SSam Leffler 779091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 780091d81d1SSam Leffler 781091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 782091d81d1SSam Leffler if (cap != NULL && 783091d81d1SSam Leffler (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 784091d81d1SSam Leffler /* 785091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 786091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 787091d81d1SSam Leffler * XXX describes relative performances. 788091d81d1SSam Leffler */ 789091d81d1SSam Leffler 790091d81d1SSam Leffler cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 791091d81d1SSam Leffler if (bootverbose) 7926810ad6fSSam Leffler printf("crypto: %s registers key alg %u flags %u\n" 7936810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 794091d81d1SSam Leffler , kalg 795091d81d1SSam Leffler , flags 796091d81d1SSam Leffler ); 797091d81d1SSam Leffler err = 0; 798091d81d1SSam Leffler } else 799091d81d1SSam Leffler err = EINVAL; 800091d81d1SSam Leffler 801091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 802091d81d1SSam Leffler return err; 803091d81d1SSam Leffler } 804091d81d1SSam Leffler 805091d81d1SSam Leffler /* 806091d81d1SSam Leffler * Register support for a non-key-related algorithm. This routine 807091d81d1SSam Leffler * is called once for each such algorithm supported by a driver. 808091d81d1SSam Leffler */ 809091d81d1SSam Leffler int 810091d81d1SSam Leffler crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 8116810ad6fSSam Leffler u_int32_t flags) 812091d81d1SSam Leffler { 813091d81d1SSam Leffler struct cryptocap *cap; 814091d81d1SSam Leffler int err; 815091d81d1SSam Leffler 816091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 817091d81d1SSam Leffler 818091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 819091d81d1SSam Leffler /* NB: algorithms are in the range [1..max] */ 820091d81d1SSam Leffler if (cap != NULL && 821091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 822091d81d1SSam Leffler /* 823091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 824091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 825091d81d1SSam Leffler * XXX describes relative performances. 826091d81d1SSam Leffler */ 827091d81d1SSam Leffler 828091d81d1SSam Leffler cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 829091d81d1SSam Leffler cap->cc_max_op_len[alg] = maxoplen; 830091d81d1SSam Leffler if (bootverbose) 8316810ad6fSSam Leffler printf("crypto: %s registers alg %u flags %u maxoplen %u\n" 8326810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 833091d81d1SSam Leffler , alg 834091d81d1SSam Leffler , flags 835091d81d1SSam Leffler , maxoplen 836091d81d1SSam Leffler ); 837091d81d1SSam Leffler cap->cc_sessions = 0; /* Unmark */ 838091d81d1SSam Leffler err = 0; 839091d81d1SSam Leffler } else 840091d81d1SSam Leffler err = EINVAL; 841091d81d1SSam Leffler 842091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 843091d81d1SSam Leffler return err; 844091d81d1SSam Leffler } 845091d81d1SSam Leffler 8466810ad6fSSam Leffler static void 8476810ad6fSSam Leffler driver_finis(struct cryptocap *cap) 8486810ad6fSSam Leffler { 8496810ad6fSSam Leffler u_int32_t ses, kops; 8506810ad6fSSam Leffler 8516810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 8526810ad6fSSam Leffler 8536810ad6fSSam Leffler ses = cap->cc_sessions; 8546810ad6fSSam Leffler kops = cap->cc_koperations; 8556810ad6fSSam Leffler bzero(cap, sizeof(*cap)); 8566810ad6fSSam Leffler if (ses != 0 || kops != 0) { 8576810ad6fSSam Leffler /* 8586810ad6fSSam Leffler * If there are pending sessions, 8596810ad6fSSam Leffler * just mark as invalid. 8606810ad6fSSam Leffler */ 8616810ad6fSSam Leffler cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 8626810ad6fSSam Leffler cap->cc_sessions = ses; 8636810ad6fSSam Leffler cap->cc_koperations = kops; 8646810ad6fSSam Leffler } 8656810ad6fSSam Leffler } 8666810ad6fSSam Leffler 867091d81d1SSam Leffler /* 868091d81d1SSam Leffler * Unregister a crypto driver. If there are pending sessions using it, 869091d81d1SSam Leffler * leave enough information around so that subsequent calls using those 870091d81d1SSam Leffler * sessions will correctly detect the driver has been unregistered and 871091d81d1SSam Leffler * reroute requests. 872091d81d1SSam Leffler */ 873091d81d1SSam Leffler int 874091d81d1SSam Leffler crypto_unregister(u_int32_t driverid, int alg) 875091d81d1SSam Leffler { 876091d81d1SSam Leffler struct cryptocap *cap; 8774acae0acSPawel Jakub Dawidek int i, err; 878091d81d1SSam Leffler 879091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 880091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 881091d81d1SSam Leffler if (cap != NULL && 882091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 883091d81d1SSam Leffler cap->cc_alg[alg] != 0) { 884091d81d1SSam Leffler cap->cc_alg[alg] = 0; 885091d81d1SSam Leffler cap->cc_max_op_len[alg] = 0; 886091d81d1SSam Leffler 887091d81d1SSam Leffler /* Was this the last algorithm ? */ 888091d81d1SSam Leffler for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 889091d81d1SSam Leffler if (cap->cc_alg[i] != 0) 890091d81d1SSam Leffler break; 891091d81d1SSam Leffler 8926810ad6fSSam Leffler if (i == CRYPTO_ALGORITHM_MAX + 1) 8936810ad6fSSam Leffler driver_finis(cap); 894091d81d1SSam Leffler err = 0; 895091d81d1SSam Leffler } else 896091d81d1SSam Leffler err = EINVAL; 897091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 8986810ad6fSSam Leffler 899091d81d1SSam Leffler return err; 900091d81d1SSam Leffler } 901091d81d1SSam Leffler 902091d81d1SSam Leffler /* 903091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 904091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 905091d81d1SSam Leffler * around so that subsequent calls using those sessions will 906091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 907091d81d1SSam Leffler * requests. 908091d81d1SSam Leffler */ 909091d81d1SSam Leffler int 910091d81d1SSam Leffler crypto_unregister_all(u_int32_t driverid) 911091d81d1SSam Leffler { 912091d81d1SSam Leffler struct cryptocap *cap; 9136810ad6fSSam Leffler int err; 914091d81d1SSam Leffler 915091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 916091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 917091d81d1SSam Leffler if (cap != NULL) { 9186810ad6fSSam Leffler driver_finis(cap); 919091d81d1SSam Leffler err = 0; 920091d81d1SSam Leffler } else 921091d81d1SSam Leffler err = EINVAL; 922091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 9236810ad6fSSam Leffler 924091d81d1SSam Leffler return err; 925091d81d1SSam Leffler } 926091d81d1SSam Leffler 927091d81d1SSam Leffler /* 928091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 929091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 930091d81d1SSam Leffler */ 931091d81d1SSam Leffler int 932091d81d1SSam Leffler crypto_unblock(u_int32_t driverid, int what) 933091d81d1SSam Leffler { 934091d81d1SSam Leffler struct cryptocap *cap; 9353a865c82SPawel Jakub Dawidek int err; 936091d81d1SSam Leffler 937091d81d1SSam Leffler CRYPTO_Q_LOCK(); 938091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 939091d81d1SSam Leffler if (cap != NULL) { 9403a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 941091d81d1SSam Leffler cap->cc_qblocked = 0; 9423a865c82SPawel Jakub Dawidek if (what & CRYPTO_ASYMQ) 943091d81d1SSam Leffler cap->cc_kqblocked = 0; 9443a865c82SPawel Jakub Dawidek if (crp_sleep) 9451a91ccccSSam Leffler wakeup_one(&crp_q); 946091d81d1SSam Leffler err = 0; 947091d81d1SSam Leffler } else 948091d81d1SSam Leffler err = EINVAL; 949091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 950091d81d1SSam Leffler 951091d81d1SSam Leffler return err; 952091d81d1SSam Leffler } 953091d81d1SSam Leffler 954091d81d1SSam Leffler /* 955091d81d1SSam Leffler * Add a crypto request to a queue, to be processed by the kernel thread. 956091d81d1SSam Leffler */ 957091d81d1SSam Leffler int 958091d81d1SSam Leffler crypto_dispatch(struct cryptop *crp) 959091d81d1SSam Leffler { 9604acae0acSPawel Jakub Dawidek struct cryptocap *cap; 9614acae0acSPawel Jakub Dawidek u_int32_t hid; 9624acae0acSPawel Jakub Dawidek int result; 963091d81d1SSam Leffler 9647d1853eeSSam Leffler cryptostats.cs_ops++; 9657d1853eeSSam Leffler 9667d1853eeSSam Leffler #ifdef CRYPTO_TIMING 9677d1853eeSSam Leffler if (crypto_timing) 9687d1853eeSSam Leffler binuptime(&crp->crp_tstamp); 9697d1853eeSSam Leffler #endif 9707d1853eeSSam Leffler 971*1b0909d5SConrad Meyer crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; 972de2b2c90SFabien Thomas 97339bbca6fSFabien Thomas if (CRYPTOP_ASYNC(crp)) { 97439bbca6fSFabien Thomas if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { 97539bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 97639bbca6fSFabien Thomas 97739bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 97839bbca6fSFabien Thomas 97939bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 98039bbca6fSFabien Thomas crp->crp_seq = ret_worker->reorder_ops++; 98139bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 98239bbca6fSFabien Thomas } 98339bbca6fSFabien Thomas 98439bbca6fSFabien Thomas TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 98539bbca6fSFabien Thomas taskqueue_enqueue(crypto_tq, &crp->crp_task); 98639bbca6fSFabien Thomas return (0); 98739bbca6fSFabien Thomas } 9884acae0acSPawel Jakub Dawidek 989eb73a605SSam Leffler if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 990*1b0909d5SConrad Meyer hid = crypto_ses2hid(crp->crp_session); 99139bbca6fSFabien Thomas 992eb73a605SSam Leffler /* 993eb73a605SSam Leffler * Caller marked the request to be processed 994eb73a605SSam Leffler * immediately; dispatch it directly to the 995eb73a605SSam Leffler * driver unless the driver is currently blocked. 996eb73a605SSam Leffler */ 997f7890744SSam Leffler cap = crypto_checkdriver(hid); 9984acae0acSPawel Jakub Dawidek /* Driver cannot disappeared when there is an active session. */ 9994acae0acSPawel Jakub Dawidek KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); 10004acae0acSPawel Jakub Dawidek if (!cap->cc_qblocked) { 10014acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, crp, 0); 10024acae0acSPawel Jakub Dawidek if (result != ERESTART) 10034acae0acSPawel Jakub Dawidek return (result); 1004091d81d1SSam Leffler /* 1005bda0abc6SPawel Jakub Dawidek * The driver ran out of resources, put the request on 1006bda0abc6SPawel Jakub Dawidek * the queue. 1007091d81d1SSam Leffler */ 1008f7890744SSam Leffler } 1009eb73a605SSam Leffler } 101039bbca6fSFabien Thomas crypto_batch_enqueue(crp); 101139bbca6fSFabien Thomas return 0; 101239bbca6fSFabien Thomas } 101339bbca6fSFabien Thomas 101439bbca6fSFabien Thomas void 101539bbca6fSFabien Thomas crypto_batch_enqueue(struct cryptop *crp) 101639bbca6fSFabien Thomas { 101739bbca6fSFabien Thomas 10184acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 10194acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 10203a865c82SPawel Jakub Dawidek if (crp_sleep) 10213a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 10223569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 1023091d81d1SSam Leffler } 1024091d81d1SSam Leffler 1025091d81d1SSam Leffler /* 1026091d81d1SSam Leffler * Add an asymetric crypto request to a queue, 1027091d81d1SSam Leffler * to be processed by the kernel thread. 1028091d81d1SSam Leffler */ 1029091d81d1SSam Leffler int 1030091d81d1SSam Leffler crypto_kdispatch(struct cryptkop *krp) 1031091d81d1SSam Leffler { 10326810ad6fSSam Leffler int error; 1033091d81d1SSam Leffler 10347d1853eeSSam Leffler cryptostats.cs_kops++; 10357d1853eeSSam Leffler 10366810ad6fSSam Leffler error = crypto_kinvoke(krp, krp->krp_crid); 10376810ad6fSSam Leffler if (error == ERESTART) { 1038091d81d1SSam Leffler CRYPTO_Q_LOCK(); 10394acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 10403a865c82SPawel Jakub Dawidek if (crp_sleep) 10413a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 1042091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 10436810ad6fSSam Leffler error = 0; 10446810ad6fSSam Leffler } 10456810ad6fSSam Leffler return error; 1046091d81d1SSam Leffler } 1047091d81d1SSam Leffler 1048091d81d1SSam Leffler /* 10496810ad6fSSam Leffler * Verify a driver is suitable for the specified operation. 10506810ad6fSSam Leffler */ 10516810ad6fSSam Leffler static __inline int 10526810ad6fSSam Leffler kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 10536810ad6fSSam Leffler { 10546810ad6fSSam Leffler return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 10556810ad6fSSam Leffler } 10566810ad6fSSam Leffler 10576810ad6fSSam Leffler /* 10586810ad6fSSam Leffler * Select a driver for an asym operation. The driver must 10596810ad6fSSam Leffler * support the necessary algorithm. The caller can constrain 10606810ad6fSSam Leffler * which device is selected with the flags parameter. The 10616810ad6fSSam Leffler * algorithm we use here is pretty stupid; just use the first 10626810ad6fSSam Leffler * driver that supports the algorithms we need. If there are 10636810ad6fSSam Leffler * multiple suitable drivers we choose the driver with the 10646810ad6fSSam Leffler * fewest active operations. We prefer hardware-backed 10656810ad6fSSam Leffler * drivers to software ones when either may be used. 10666810ad6fSSam Leffler */ 10676810ad6fSSam Leffler static struct cryptocap * 10686810ad6fSSam Leffler crypto_select_kdriver(const struct cryptkop *krp, int flags) 10696810ad6fSSam Leffler { 1070151ba793SAlexander Kabaev struct cryptocap *cap, *best; 10716810ad6fSSam Leffler int match, hid; 10726810ad6fSSam Leffler 10736810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 10746810ad6fSSam Leffler 10756810ad6fSSam Leffler /* 10766810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 10776810ad6fSSam Leffler */ 10786810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 10796810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 10806810ad6fSSam Leffler else 10816810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 10826810ad6fSSam Leffler best = NULL; 10836810ad6fSSam Leffler again: 10846810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 10856810ad6fSSam Leffler cap = &crypto_drivers[hid]; 10866810ad6fSSam Leffler /* 10876810ad6fSSam Leffler * If it's not initialized, is in the process of 10886810ad6fSSam Leffler * going away, or is not appropriate (hardware 10896810ad6fSSam Leffler * or software based on match), then skip. 10906810ad6fSSam Leffler */ 10916810ad6fSSam Leffler if (cap->cc_dev == NULL || 10926810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 10936810ad6fSSam Leffler (cap->cc_flags & match) == 0) 10946810ad6fSSam Leffler continue; 10956810ad6fSSam Leffler 10966810ad6fSSam Leffler /* verify all the algorithms are supported. */ 10976810ad6fSSam Leffler if (kdriver_suitable(cap, krp)) { 10986810ad6fSSam Leffler if (best == NULL || 10996810ad6fSSam Leffler cap->cc_koperations < best->cc_koperations) 11006810ad6fSSam Leffler best = cap; 11016810ad6fSSam Leffler } 11026810ad6fSSam Leffler } 11036810ad6fSSam Leffler if (best != NULL) 11046810ad6fSSam Leffler return best; 11056810ad6fSSam Leffler if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 11066810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 11076810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 11086810ad6fSSam Leffler goto again; 11096810ad6fSSam Leffler } 11106810ad6fSSam Leffler return best; 11116810ad6fSSam Leffler } 11126810ad6fSSam Leffler 11136810ad6fSSam Leffler /* 11141762773dSPedro F. Giffuni * Dispatch an asymmetric crypto request. 1115091d81d1SSam Leffler */ 1116091d81d1SSam Leffler static int 11176810ad6fSSam Leffler crypto_kinvoke(struct cryptkop *krp, int crid) 1118091d81d1SSam Leffler { 11194acae0acSPawel Jakub Dawidek struct cryptocap *cap = NULL; 11206810ad6fSSam Leffler int error; 1121091d81d1SSam Leffler 11224acae0acSPawel Jakub Dawidek KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 11234acae0acSPawel Jakub Dawidek KASSERT(krp->krp_callback != NULL, 11244acae0acSPawel Jakub Dawidek ("%s: krp->crp_callback == NULL", __func__)); 1125091d81d1SSam Leffler 11264acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 11276810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 11286810ad6fSSam Leffler cap = crypto_checkdriver(crid); 11296810ad6fSSam Leffler if (cap != NULL) { 11306810ad6fSSam Leffler /* 11316810ad6fSSam Leffler * Driver present, it must support the necessary 11326810ad6fSSam Leffler * algorithm and, if s/w drivers are excluded, 11336810ad6fSSam Leffler * it must be registered as hardware-backed. 11346810ad6fSSam Leffler */ 11356810ad6fSSam Leffler if (!kdriver_suitable(cap, krp) || 11366810ad6fSSam Leffler (!crypto_devallowsoft && 11376810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 11386810ad6fSSam Leffler cap = NULL; 11394acae0acSPawel Jakub Dawidek } 11406810ad6fSSam Leffler } else { 11416810ad6fSSam Leffler /* 11426810ad6fSSam Leffler * No requested driver; select based on crid flags. 11436810ad6fSSam Leffler */ 11446810ad6fSSam Leffler if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 11456810ad6fSSam Leffler crid &= ~CRYPTOCAP_F_SOFTWARE; 11466810ad6fSSam Leffler cap = crypto_select_kdriver(krp, crid); 11474acae0acSPawel Jakub Dawidek } 11486810ad6fSSam Leffler if (cap != NULL && !cap->cc_kqblocked) { 11496810ad6fSSam Leffler krp->krp_hid = cap - crypto_drivers; 11504acae0acSPawel Jakub Dawidek cap->cc_koperations++; 11514acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 11526810ad6fSSam Leffler error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 11534acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 11544acae0acSPawel Jakub Dawidek if (error == ERESTART) { 11554acae0acSPawel Jakub Dawidek cap->cc_koperations--; 11564acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 11574acae0acSPawel Jakub Dawidek return (error); 11584acae0acSPawel Jakub Dawidek } 11594acae0acSPawel Jakub Dawidek } else { 11606810ad6fSSam Leffler /* 11616810ad6fSSam Leffler * NB: cap is !NULL if device is blocked; in 11626810ad6fSSam Leffler * that case return ERESTART so the operation 11636810ad6fSSam Leffler * is resubmitted if possible. 11646810ad6fSSam Leffler */ 11656810ad6fSSam Leffler error = (cap == NULL) ? ENODEV : ERESTART; 11664acae0acSPawel Jakub Dawidek } 11674acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 1168091d81d1SSam Leffler 1169091d81d1SSam Leffler if (error) { 1170091d81d1SSam Leffler krp->krp_status = error; 11711a91ccccSSam Leffler crypto_kdone(krp); 1172091d81d1SSam Leffler } 1173091d81d1SSam Leffler return 0; 1174091d81d1SSam Leffler } 1175091d81d1SSam Leffler 11767d1853eeSSam Leffler #ifdef CRYPTO_TIMING 11777d1853eeSSam Leffler static void 11787d1853eeSSam Leffler crypto_tstat(struct cryptotstat *ts, struct bintime *bt) 11797d1853eeSSam Leffler { 11807d1853eeSSam Leffler struct bintime now, delta; 11817d1853eeSSam Leffler struct timespec t; 11827d1853eeSSam Leffler uint64_t u; 11837d1853eeSSam Leffler 11847d1853eeSSam Leffler binuptime(&now); 11857d1853eeSSam Leffler u = now.frac; 11867d1853eeSSam Leffler delta.frac = now.frac - bt->frac; 11877d1853eeSSam Leffler delta.sec = now.sec - bt->sec; 11887d1853eeSSam Leffler if (u < delta.frac) 11897d1853eeSSam Leffler delta.sec--; 11907d1853eeSSam Leffler bintime2timespec(&delta, &t); 11917d1853eeSSam Leffler timespecadd(&ts->acc, &t); 11927d1853eeSSam Leffler if (timespeccmp(&t, &ts->min, <)) 11937d1853eeSSam Leffler ts->min = t; 11947d1853eeSSam Leffler if (timespeccmp(&t, &ts->max, >)) 11957d1853eeSSam Leffler ts->max = t; 11967d1853eeSSam Leffler ts->count++; 11977d1853eeSSam Leffler 11987d1853eeSSam Leffler *bt = now; 11997d1853eeSSam Leffler } 12007d1853eeSSam Leffler #endif 12017d1853eeSSam Leffler 120239bbca6fSFabien Thomas static void 120339bbca6fSFabien Thomas crypto_task_invoke(void *ctx, int pending) 120439bbca6fSFabien Thomas { 120539bbca6fSFabien Thomas struct cryptocap *cap; 120639bbca6fSFabien Thomas struct cryptop *crp; 120739bbca6fSFabien Thomas int hid, result; 120839bbca6fSFabien Thomas 120939bbca6fSFabien Thomas crp = (struct cryptop *)ctx; 121039bbca6fSFabien Thomas 1211*1b0909d5SConrad Meyer hid = crypto_ses2hid(crp->crp_session); 121239bbca6fSFabien Thomas cap = crypto_checkdriver(hid); 121339bbca6fSFabien Thomas 121439bbca6fSFabien Thomas result = crypto_invoke(cap, crp, 0); 121539bbca6fSFabien Thomas if (result == ERESTART) 121639bbca6fSFabien Thomas crypto_batch_enqueue(crp); 121739bbca6fSFabien Thomas } 121839bbca6fSFabien Thomas 1219091d81d1SSam Leffler /* 1220091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 1221091d81d1SSam Leffler */ 1222091d81d1SSam Leffler static int 12234acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1224091d81d1SSam Leffler { 12254acae0acSPawel Jakub Dawidek 12264acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 12274acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 12284acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 12294acae0acSPawel Jakub Dawidek KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); 1230091d81d1SSam Leffler 12317d1853eeSSam Leffler #ifdef CRYPTO_TIMING 12327d1853eeSSam Leffler if (crypto_timing) 12337d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 12347d1853eeSSam Leffler #endif 12354acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1236091d81d1SSam Leffler struct cryptodesc *crd; 1237*1b0909d5SConrad Meyer crypto_session_t nses; 1238091d81d1SSam Leffler 1239091d81d1SSam Leffler /* 1240091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1241091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 12424acae0acSPawel Jakub Dawidek * 12434acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 12444acae0acSPawel Jakub Dawidek * session? 1245091d81d1SSam Leffler */ 1246*1b0909d5SConrad Meyer crypto_freesession(crp->crp_session); 12474acae0acSPawel Jakub Dawidek 1248091d81d1SSam Leffler for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1249091d81d1SSam Leffler crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1250091d81d1SSam Leffler 12516810ad6fSSam Leffler /* XXX propagate flags from initial session? */ 1252*1b0909d5SConrad Meyer if (crypto_newsession(&nses, &(crp->crp_desc->CRD_INI), 12536810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1254*1b0909d5SConrad Meyer crp->crp_session = nses; 1255091d81d1SSam Leffler 1256091d81d1SSam Leffler crp->crp_etype = EAGAIN; 12571a91ccccSSam Leffler crypto_done(crp); 1258091d81d1SSam Leffler return 0; 1259091d81d1SSam Leffler } else { 1260091d81d1SSam Leffler /* 1261091d81d1SSam Leffler * Invoke the driver to process the request. 1262091d81d1SSam Leffler */ 12636810ad6fSSam Leffler return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1264091d81d1SSam Leffler } 1265091d81d1SSam Leffler } 1266091d81d1SSam Leffler 1267091d81d1SSam Leffler /* 1268091d81d1SSam Leffler * Release a set of crypto descriptors. 1269091d81d1SSam Leffler */ 1270091d81d1SSam Leffler void 1271091d81d1SSam Leffler crypto_freereq(struct cryptop *crp) 1272091d81d1SSam Leffler { 1273091d81d1SSam Leffler struct cryptodesc *crd; 1274091d81d1SSam Leffler 1275091d81d1SSam Leffler if (crp == NULL) 1276091d81d1SSam Leffler return; 1277091d81d1SSam Leffler 12780d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 12790d5c337bSPawel Jakub Dawidek { 12800d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 128139bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 12820d5c337bSPawel Jakub Dawidek 12830d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 12840d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 12850d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 12860d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 12870d5c337bSPawel Jakub Dawidek crp)); 12880d5c337bSPawel Jakub Dawidek } 12890d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 129039bbca6fSFabien Thomas 129139bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 129239bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 129339bbca6fSFabien Thomas TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 12940d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 12950d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 12960d5c337bSPawel Jakub Dawidek crp)); 12970d5c337bSPawel Jakub Dawidek } 129839bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 129939bbca6fSFabien Thomas } 13000d5c337bSPawel Jakub Dawidek } 13010d5c337bSPawel Jakub Dawidek #endif 13020d5c337bSPawel Jakub Dawidek 1303091d81d1SSam Leffler while ((crd = crp->crp_desc) != NULL) { 1304091d81d1SSam Leffler crp->crp_desc = crd->crd_next; 1305091d81d1SSam Leffler uma_zfree(cryptodesc_zone, crd); 1306091d81d1SSam Leffler } 1307091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1308091d81d1SSam Leffler } 1309091d81d1SSam Leffler 1310091d81d1SSam Leffler /* 1311091d81d1SSam Leffler * Acquire a set of crypto descriptors. 1312091d81d1SSam Leffler */ 1313091d81d1SSam Leffler struct cryptop * 1314091d81d1SSam Leffler crypto_getreq(int num) 1315091d81d1SSam Leffler { 1316091d81d1SSam Leffler struct cryptodesc *crd; 1317091d81d1SSam Leffler struct cryptop *crp; 1318091d81d1SSam Leffler 1319bc0c6d3cSSam Leffler crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); 1320091d81d1SSam Leffler if (crp != NULL) { 1321091d81d1SSam Leffler while (num--) { 1322bc0c6d3cSSam Leffler crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); 1323091d81d1SSam Leffler if (crd == NULL) { 1324091d81d1SSam Leffler crypto_freereq(crp); 1325091d81d1SSam Leffler return NULL; 1326091d81d1SSam Leffler } 1327091d81d1SSam Leffler 1328091d81d1SSam Leffler crd->crd_next = crp->crp_desc; 1329091d81d1SSam Leffler crp->crp_desc = crd; 1330091d81d1SSam Leffler } 1331091d81d1SSam Leffler } 1332091d81d1SSam Leffler return crp; 1333091d81d1SSam Leffler } 1334091d81d1SSam Leffler 1335091d81d1SSam Leffler /* 1336091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1337091d81d1SSam Leffler */ 1338091d81d1SSam Leffler void 1339091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1340091d81d1SSam Leffler { 13413569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 13423569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 13433569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 13447d1853eeSSam Leffler if (crp->crp_etype != 0) 13457d1853eeSSam Leffler cryptostats.cs_errs++; 13467d1853eeSSam Leffler #ifdef CRYPTO_TIMING 13477d1853eeSSam Leffler if (crypto_timing) 13487d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 13497d1853eeSSam Leffler #endif 1350d8409aafSSam Leffler /* 1351d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1352d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1353d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1354d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1355d8409aafSSam Leffler * used with the software crypto driver. 1356d8409aafSSam Leffler */ 135739bbca6fSFabien Thomas if (!CRYPTOP_ASYNC_KEEPORDER(crp) && 135839bbca6fSFabien Thomas ((crp->crp_flags & CRYPTO_F_CBIMM) || 1359d8409aafSSam Leffler ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 1360*1b0909d5SConrad Meyer (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { 1361eb73a605SSam Leffler /* 1362eb73a605SSam Leffler * Do the callback directly. This is ok when the 1363eb73a605SSam Leffler * callback routine does very little (e.g. the 1364eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1365eb73a605SSam Leffler */ 1366eb73a605SSam Leffler #ifdef CRYPTO_TIMING 1367eb73a605SSam Leffler if (crypto_timing) { 1368eb73a605SSam Leffler /* 1369eb73a605SSam Leffler * NB: We must copy the timestamp before 1370eb73a605SSam Leffler * doing the callback as the cryptop is 1371eb73a605SSam Leffler * likely to be reclaimed. 1372eb73a605SSam Leffler */ 1373eb73a605SSam Leffler struct bintime t = crp->crp_tstamp; 1374eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 1375eb73a605SSam Leffler crp->crp_callback(crp); 1376eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 1377eb73a605SSam Leffler } else 1378eb73a605SSam Leffler #endif 1379eb73a605SSam Leffler crp->crp_callback(crp); 1380eb73a605SSam Leffler } else { 138139bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 138239bbca6fSFabien Thomas bool wake; 138339bbca6fSFabien Thomas 138439bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 138539bbca6fSFabien Thomas wake = false; 138639bbca6fSFabien Thomas 1387eb73a605SSam Leffler /* 1388eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1389eb73a605SSam Leffler */ 139039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 139139bbca6fSFabien Thomas if (CRYPTOP_ASYNC_KEEPORDER(crp)) { 139239bbca6fSFabien Thomas struct cryptop *tmp; 139339bbca6fSFabien Thomas 139439bbca6fSFabien Thomas TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, 139539bbca6fSFabien Thomas cryptop_q, crp_next) { 139639bbca6fSFabien Thomas if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 139739bbca6fSFabien Thomas TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, 139839bbca6fSFabien Thomas tmp, crp, crp_next); 139939bbca6fSFabien Thomas break; 140039bbca6fSFabien Thomas } 140139bbca6fSFabien Thomas } 140239bbca6fSFabien Thomas if (tmp == NULL) { 140339bbca6fSFabien Thomas TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, 140439bbca6fSFabien Thomas crp, crp_next); 140539bbca6fSFabien Thomas } 140639bbca6fSFabien Thomas 140739bbca6fSFabien Thomas if (crp->crp_seq == ret_worker->reorder_cur_seq) 140839bbca6fSFabien Thomas wake = true; 140939bbca6fSFabien Thomas } 141039bbca6fSFabien Thomas else { 141139bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 141239bbca6fSFabien Thomas wake = true; 141339bbca6fSFabien Thomas 141439bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); 141539bbca6fSFabien Thomas } 141639bbca6fSFabien Thomas 141739bbca6fSFabien Thomas if (wake) 141839bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 141939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1420091d81d1SSam Leffler } 1421eb73a605SSam Leffler } 1422091d81d1SSam Leffler 1423091d81d1SSam Leffler /* 1424091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1425091d81d1SSam Leffler */ 1426091d81d1SSam Leffler void 1427091d81d1SSam Leffler crypto_kdone(struct cryptkop *krp) 1428091d81d1SSam Leffler { 142939bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 14304acae0acSPawel Jakub Dawidek struct cryptocap *cap; 1431091d81d1SSam Leffler 14327d1853eeSSam Leffler if (krp->krp_status != 0) 14337d1853eeSSam Leffler cryptostats.cs_kerrs++; 14344acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 14354acae0acSPawel Jakub Dawidek /* XXX: What if driver is loaded in the meantime? */ 14364acae0acSPawel Jakub Dawidek if (krp->krp_hid < crypto_drivers_num) { 14374acae0acSPawel Jakub Dawidek cap = &crypto_drivers[krp->krp_hid]; 1438fb17b4c5SJohn-Mark Gurney KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); 14394acae0acSPawel Jakub Dawidek cap->cc_koperations--; 14404acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 14414acae0acSPawel Jakub Dawidek crypto_remove(cap); 14424acae0acSPawel Jakub Dawidek } 14434acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 144439bbca6fSFabien Thomas 144539bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 144639bbca6fSFabien Thomas 144739bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 144839bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 144939bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 145039bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); 145139bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1452091d81d1SSam Leffler } 1453091d81d1SSam Leffler 1454091d81d1SSam Leffler int 1455091d81d1SSam Leffler crypto_getfeat(int *featp) 1456091d81d1SSam Leffler { 1457091d81d1SSam Leffler int hid, kalg, feat = 0; 1458091d81d1SSam Leffler 1459091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1460091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 14616810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 14626810ad6fSSam Leffler 14636810ad6fSSam Leffler if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1464091d81d1SSam Leffler !crypto_devallowsoft) { 1465091d81d1SSam Leffler continue; 1466091d81d1SSam Leffler } 1467091d81d1SSam Leffler for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 14686810ad6fSSam Leffler if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1469091d81d1SSam Leffler feat |= 1 << kalg; 1470091d81d1SSam Leffler } 1471091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1472091d81d1SSam Leffler *featp = feat; 1473091d81d1SSam Leffler return (0); 1474091d81d1SSam Leffler } 1475091d81d1SSam Leffler 147651e45326SSam Leffler /* 147751e45326SSam Leffler * Terminate a thread at module unload. The process that 147851e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 147951e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 148051e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 148151e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 148251e45326SSam Leffler * thread so it gets notified at the right time because of an 148351e45326SSam Leffler * extra wakeup that's done in exit1(). 148451e45326SSam Leffler */ 1485091d81d1SSam Leffler static void 148651e45326SSam Leffler crypto_finis(void *chan) 1487091d81d1SSam Leffler { 148851e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 148951e45326SSam Leffler wakeup_one(chan); 149051e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 14913745c395SJulian Elischer kproc_exit(0); 1492091d81d1SSam Leffler } 1493091d81d1SSam Leffler 1494091d81d1SSam Leffler /* 14951a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1496091d81d1SSam Leffler */ 1497091d81d1SSam Leffler static void 1498091d81d1SSam Leffler crypto_proc(void) 1499091d81d1SSam Leffler { 15001a91ccccSSam Leffler struct cryptop *crp, *submit; 15011a91ccccSSam Leffler struct cryptkop *krp; 1502091d81d1SSam Leffler struct cryptocap *cap; 15034acae0acSPawel Jakub Dawidek u_int32_t hid; 1504091d81d1SSam Leffler int result, hint; 1505091d81d1SSam Leffler 15066ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 150704c49e68SKonstantin Belousov fpu_kern_thread(FPU_KERN_NORMAL); 150804c49e68SKonstantin Belousov #endif 150904c49e68SKonstantin Belousov 15101a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1511091d81d1SSam Leffler for (;;) { 1512091d81d1SSam Leffler /* 1513091d81d1SSam Leffler * Find the first element in the queue that can be 1514091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1515091d81d1SSam Leffler * are ready for the same driver. 1516091d81d1SSam Leffler */ 1517091d81d1SSam Leffler submit = NULL; 1518091d81d1SSam Leffler hint = 0; 1519091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 1520*1b0909d5SConrad Meyer hid = crypto_ses2hid(crp->crp_session); 1521091d81d1SSam Leffler cap = crypto_checkdriver(hid); 15224acae0acSPawel Jakub Dawidek /* 15234acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 15244acae0acSPawel Jakub Dawidek * session. 15254acae0acSPawel Jakub Dawidek */ 1526c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1527c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 15286810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 1529091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1530091d81d1SSam Leffler if (submit == NULL) 1531091d81d1SSam Leffler submit = crp; 1532091d81d1SSam Leffler break; 1533091d81d1SSam Leffler } 1534091d81d1SSam Leffler if (!cap->cc_qblocked) { 1535091d81d1SSam Leffler if (submit != NULL) { 1536091d81d1SSam Leffler /* 1537091d81d1SSam Leffler * We stop on finding another op, 1538091d81d1SSam Leffler * regardless whether its for the same 1539091d81d1SSam Leffler * driver or not. We could keep 1540091d81d1SSam Leffler * searching the queue but it might be 1541091d81d1SSam Leffler * better to just use a per-driver 1542091d81d1SSam Leffler * queue instead. 1543091d81d1SSam Leffler */ 1544*1b0909d5SConrad Meyer if (crypto_ses2hid(submit->crp_session) == hid) 1545091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1546091d81d1SSam Leffler break; 1547091d81d1SSam Leffler } else { 1548091d81d1SSam Leffler submit = crp; 1549eb73a605SSam Leffler if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1550091d81d1SSam Leffler break; 1551091d81d1SSam Leffler /* keep scanning for more are q'd */ 1552091d81d1SSam Leffler } 1553091d81d1SSam Leffler } 1554091d81d1SSam Leffler } 1555091d81d1SSam Leffler if (submit != NULL) { 1556091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 1557*1b0909d5SConrad Meyer hid = crypto_ses2hid(submit->crp_session); 15584acae0acSPawel Jakub Dawidek cap = crypto_checkdriver(hid); 1559c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1560c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 15614acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1562091d81d1SSam Leffler if (result == ERESTART) { 1563091d81d1SSam Leffler /* 1564091d81d1SSam Leffler * The driver ran out of resources, mark the 1565091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1566091d81d1SSam Leffler * the request back in the queue. It would 1567091d81d1SSam Leffler * best to put the request back where we got 1568091d81d1SSam Leffler * it but that's hard so for now we put it 1569091d81d1SSam Leffler * at the front. This should be ok; putting 1570091d81d1SSam Leffler * it at the end does not work. 1571091d81d1SSam Leffler */ 1572091d81d1SSam Leffler /* XXX validate sid again? */ 1573*1b0909d5SConrad Meyer crypto_drivers[crypto_ses2hid(submit->crp_session)].cc_qblocked = 1; 1574091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 15757d1853eeSSam Leffler cryptostats.cs_blocks++; 1576091d81d1SSam Leffler } 1577091d81d1SSam Leffler } 1578091d81d1SSam Leffler 1579091d81d1SSam Leffler /* As above, but for key ops */ 1580091d81d1SSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1581091d81d1SSam Leffler cap = crypto_checkdriver(krp->krp_hid); 15826810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 15836810ad6fSSam Leffler /* 15846810ad6fSSam Leffler * Operation needs to be migrated, invalidate 15856810ad6fSSam Leffler * the assigned device so it will reselect a 15866810ad6fSSam Leffler * new one below. Propagate the original 15876810ad6fSSam Leffler * crid selection flags if supplied. 15886810ad6fSSam Leffler */ 15896810ad6fSSam Leffler krp->krp_hid = krp->krp_crid & 15906810ad6fSSam Leffler (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE); 15916810ad6fSSam Leffler if (krp->krp_hid == 0) 15926810ad6fSSam Leffler krp->krp_hid = 15936810ad6fSSam Leffler CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE; 1594091d81d1SSam Leffler break; 1595091d81d1SSam Leffler } 1596091d81d1SSam Leffler if (!cap->cc_kqblocked) 1597091d81d1SSam Leffler break; 1598091d81d1SSam Leffler } 1599091d81d1SSam Leffler if (krp != NULL) { 1600091d81d1SSam Leffler TAILQ_REMOVE(&crp_kq, krp, krp_next); 16016810ad6fSSam Leffler result = crypto_kinvoke(krp, krp->krp_hid); 1602091d81d1SSam Leffler if (result == ERESTART) { 1603091d81d1SSam Leffler /* 1604091d81d1SSam Leffler * The driver ran out of resources, mark the 1605091d81d1SSam Leffler * driver ``blocked'' for cryptkop's and put 1606091d81d1SSam Leffler * the request back in the queue. It would 1607091d81d1SSam Leffler * best to put the request back where we got 1608091d81d1SSam Leffler * it but that's hard so for now we put it 1609091d81d1SSam Leffler * at the front. This should be ok; putting 1610091d81d1SSam Leffler * it at the end does not work. 1611091d81d1SSam Leffler */ 1612091d81d1SSam Leffler /* XXX validate sid again? */ 1613091d81d1SSam Leffler crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 1614091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 16157d1853eeSSam Leffler cryptostats.cs_kblocks++; 1616091d81d1SSam Leffler } 1617091d81d1SSam Leffler } 1618091d81d1SSam Leffler 16191a91ccccSSam Leffler if (submit == NULL && krp == NULL) { 1620091d81d1SSam Leffler /* 1621091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 1622091d81d1SSam Leffler * woken because there are more ops to process. 1623091d81d1SSam Leffler * This happens either by submission or by a driver 1624091d81d1SSam Leffler * becoming unblocked and notifying us through 1625091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 1626091d81d1SSam Leffler * start processing each queue again from the 1627091d81d1SSam Leffler * front. It's not clear that it's important to 1628091d81d1SSam Leffler * preserve this ordering since ops may finish 1629091d81d1SSam Leffler * out of order if dispatched to different devices 1630091d81d1SSam Leffler * and some become blocked while others do not. 1631091d81d1SSam Leffler */ 16323a865c82SPawel Jakub Dawidek crp_sleep = 1; 16331a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 16343a865c82SPawel Jakub Dawidek crp_sleep = 0; 163551e45326SSam Leffler if (cryptoproc == NULL) 163651e45326SSam Leffler break; 16377d1853eeSSam Leffler cryptostats.cs_intrs++; 1638091d81d1SSam Leffler } 1639091d81d1SSam Leffler } 164051e45326SSam Leffler CRYPTO_Q_UNLOCK(); 16411a91ccccSSam Leffler 164251e45326SSam Leffler crypto_finis(&crp_q); 16431a91ccccSSam Leffler } 16441a91ccccSSam Leffler 16451a91ccccSSam Leffler /* 16461a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 16471a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 16481a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 16491a91ccccSSam Leffler */ 16501a91ccccSSam Leffler static void 165139bbca6fSFabien Thomas crypto_ret_proc(struct crypto_ret_worker *ret_worker) 16521a91ccccSSam Leffler { 16531a91ccccSSam Leffler struct cryptop *crpt; 16541a91ccccSSam Leffler struct cryptkop *krpt; 16551a91ccccSSam Leffler 165639bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 16571a91ccccSSam Leffler for (;;) { 16581a91ccccSSam Leffler /* Harvest return q's for completed ops */ 165939bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 166039bbca6fSFabien Thomas if (crpt != NULL) { 166139bbca6fSFabien Thomas if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 166239bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 166339bbca6fSFabien Thomas ret_worker->reorder_cur_seq++; 166439bbca6fSFabien Thomas } else { 166539bbca6fSFabien Thomas crpt = NULL; 166639bbca6fSFabien Thomas } 166739bbca6fSFabien Thomas } 16681a91ccccSSam Leffler 166939bbca6fSFabien Thomas if (crpt == NULL) { 167039bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 167139bbca6fSFabien Thomas if (crpt != NULL) 167239bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 167339bbca6fSFabien Thomas } 167439bbca6fSFabien Thomas 167539bbca6fSFabien Thomas krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); 16761a91ccccSSam Leffler if (krpt != NULL) 167739bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); 16781a91ccccSSam Leffler 16791a91ccccSSam Leffler if (crpt != NULL || krpt != NULL) { 168039bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 16811a91ccccSSam Leffler /* 16821a91ccccSSam Leffler * Run callbacks unlocked. 16831a91ccccSSam Leffler */ 16847d1853eeSSam Leffler if (crpt != NULL) { 16857d1853eeSSam Leffler #ifdef CRYPTO_TIMING 16867d1853eeSSam Leffler if (crypto_timing) { 16877d1853eeSSam Leffler /* 16887d1853eeSSam Leffler * NB: We must copy the timestamp before 16897d1853eeSSam Leffler * doing the callback as the cryptop is 16907d1853eeSSam Leffler * likely to be reclaimed. 16917d1853eeSSam Leffler */ 16927d1853eeSSam Leffler struct bintime t = crpt->crp_tstamp; 16937d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 16941a91ccccSSam Leffler crpt->crp_callback(crpt); 16957d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 16967d1853eeSSam Leffler } else 16977d1853eeSSam Leffler #endif 16987d1853eeSSam Leffler crpt->crp_callback(crpt); 16997d1853eeSSam Leffler } 17001a91ccccSSam Leffler if (krpt != NULL) 17011a91ccccSSam Leffler krpt->krp_callback(krpt); 170239bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 17031a91ccccSSam Leffler } else { 17041a91ccccSSam Leffler /* 17051a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 17061a91ccccSSam Leffler * woken because there are more returns to process. 17071a91ccccSSam Leffler */ 170839bbca6fSFabien Thomas msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 17091a91ccccSSam Leffler "crypto_ret_wait", 0); 171039bbca6fSFabien Thomas if (ret_worker->cryptoretproc == NULL) 171151e45326SSam Leffler break; 17127d1853eeSSam Leffler cryptostats.cs_rets++; 17131a91ccccSSam Leffler } 17141a91ccccSSam Leffler } 171539bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 171651e45326SSam Leffler 171739bbca6fSFabien Thomas crypto_finis(&ret_worker->crp_ret_q); 17181a91ccccSSam Leffler } 17196810ad6fSSam Leffler 17206810ad6fSSam Leffler #ifdef DDB 17216810ad6fSSam Leffler static void 17226810ad6fSSam Leffler db_show_drivers(void) 17236810ad6fSSam Leffler { 17246810ad6fSSam Leffler int hid; 17256810ad6fSSam Leffler 17266810ad6fSSam Leffler db_printf("%12s %4s %4s %8s %2s %2s\n" 17276810ad6fSSam Leffler , "Device" 17286810ad6fSSam Leffler , "Ses" 17296810ad6fSSam Leffler , "Kops" 17306810ad6fSSam Leffler , "Flags" 17316810ad6fSSam Leffler , "QB" 17326810ad6fSSam Leffler , "KB" 17336810ad6fSSam Leffler ); 17346810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 17356810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 17366810ad6fSSam Leffler if (cap->cc_dev == NULL) 17376810ad6fSSam Leffler continue; 17386810ad6fSSam Leffler db_printf("%-12s %4u %4u %08x %2u %2u\n" 17396810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 17406810ad6fSSam Leffler , cap->cc_sessions 17416810ad6fSSam Leffler , cap->cc_koperations 17426810ad6fSSam Leffler , cap->cc_flags 17436810ad6fSSam Leffler , cap->cc_qblocked 17446810ad6fSSam Leffler , cap->cc_kqblocked 17456810ad6fSSam Leffler ); 17466810ad6fSSam Leffler } 17476810ad6fSSam Leffler } 17486810ad6fSSam Leffler 17496810ad6fSSam Leffler DB_SHOW_COMMAND(crypto, db_show_crypto) 17506810ad6fSSam Leffler { 17516810ad6fSSam Leffler struct cryptop *crp; 175239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 17536810ad6fSSam Leffler 17546810ad6fSSam Leffler db_show_drivers(); 17556810ad6fSSam Leffler db_printf("\n"); 17566810ad6fSSam Leffler 17576810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 17586810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 17596810ad6fSSam Leffler "Desc", "Callback"); 17606810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 17616810ad6fSSam Leffler db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" 1762*1b0909d5SConrad Meyer , (int) crypto_ses2hid(crp->crp_session) 1763*1b0909d5SConrad Meyer , (int) crypto_ses2caps(crp->crp_session) 17646810ad6fSSam Leffler , crp->crp_ilen, crp->crp_olen 17656810ad6fSSam Leffler , crp->crp_etype 17666810ad6fSSam Leffler , crp->crp_flags 17676810ad6fSSam Leffler , crp->crp_desc 17686810ad6fSSam Leffler , crp->crp_callback 17696810ad6fSSam Leffler ); 17706810ad6fSSam Leffler } 177139bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 177239bbca6fSFabien Thomas db_printf("\n%8s %4s %4s %4s %8s\n", 177339bbca6fSFabien Thomas "ret_worker", "HID", "Etype", "Flags", "Callback"); 177439bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 177539bbca6fSFabien Thomas TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 177639bbca6fSFabien Thomas db_printf("%8td %4u %4u %04x %8p\n" 177739bbca6fSFabien Thomas , CRYPTO_RETW_ID(ret_worker) 1778*1b0909d5SConrad Meyer , (int) crypto_ses2hid(crp->crp_session) 17796810ad6fSSam Leffler , crp->crp_etype 17806810ad6fSSam Leffler , crp->crp_flags 17816810ad6fSSam Leffler , crp->crp_callback 17826810ad6fSSam Leffler ); 17836810ad6fSSam Leffler } 17846810ad6fSSam Leffler } 17856810ad6fSSam Leffler } 178639bbca6fSFabien Thomas } 17876810ad6fSSam Leffler 17886810ad6fSSam Leffler DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 17896810ad6fSSam Leffler { 17906810ad6fSSam Leffler struct cryptkop *krp; 179139bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 17926810ad6fSSam Leffler 17936810ad6fSSam Leffler db_show_drivers(); 17946810ad6fSSam Leffler db_printf("\n"); 17956810ad6fSSam Leffler 17966810ad6fSSam Leffler db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 17976810ad6fSSam Leffler "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 17986810ad6fSSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 17996810ad6fSSam Leffler db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 18006810ad6fSSam Leffler , krp->krp_op 18016810ad6fSSam Leffler , krp->krp_status 18026810ad6fSSam Leffler , krp->krp_iparams, krp->krp_oparams 18036810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 18046810ad6fSSam Leffler , krp->krp_callback 18056810ad6fSSam Leffler ); 18066810ad6fSSam Leffler } 180739bbca6fSFabien Thomas 180839bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 180939bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 18106810ad6fSSam Leffler db_printf("%4s %5s %8s %4s %8s\n", 18116810ad6fSSam Leffler "Op", "Status", "CRID", "HID", "Callback"); 181239bbca6fSFabien Thomas TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { 18136810ad6fSSam Leffler db_printf("%4u %5u %08x %4u %8p\n" 18146810ad6fSSam Leffler , krp->krp_op 18156810ad6fSSam Leffler , krp->krp_status 18166810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 18176810ad6fSSam Leffler , krp->krp_callback 18186810ad6fSSam Leffler ); 18196810ad6fSSam Leffler } 18206810ad6fSSam Leffler } 18216810ad6fSSam Leffler } 18226810ad6fSSam Leffler #endif 18236810ad6fSSam Leffler 18246810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 18256810ad6fSSam Leffler 18266810ad6fSSam Leffler /* 18276810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 18286810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 18296810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 18306810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 18316810ad6fSSam Leffler * calls on module load/unload. 18326810ad6fSSam Leffler */ 18336810ad6fSSam Leffler int 18346810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 18356810ad6fSSam Leffler { 18366810ad6fSSam Leffler int error = EINVAL; 18376810ad6fSSam Leffler 18386810ad6fSSam Leffler switch (type) { 18396810ad6fSSam Leffler case MOD_LOAD: 18406810ad6fSSam Leffler error = crypto_init(); 18416810ad6fSSam Leffler if (error == 0 && bootverbose) 18426810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 18436810ad6fSSam Leffler break; 18446810ad6fSSam Leffler case MOD_UNLOAD: 18456810ad6fSSam Leffler /*XXX disallow if active sessions */ 18466810ad6fSSam Leffler error = 0; 18476810ad6fSSam Leffler crypto_destroy(); 18486810ad6fSSam Leffler return 0; 18496810ad6fSSam Leffler } 18506810ad6fSSam Leffler return error; 18516810ad6fSSam Leffler } 18526810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 18536810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 1854