1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/sunddi.h>
28 #include <sys/errno.h>
29 #include <sys/disp.h>
30 #include <sys/modctl.h>
31 #include <sys/modhash.h>
32 #include <sys/crypto/common.h>
33 #include <sys/crypto/api.h>
34 #include <sys/crypto/impl.h>
35
36 /* Cryptographic mechanisms tables and their access functions */
37
38 /*
39 * Internal numbers assigned to mechanisms are coded as follows:
40 *
41 * +----------------+----------------+
42 * | mech. class | mech. index |
43 * <--- 32-bits --->+<--- 32-bits --->
44 *
45 * the mech_class identifies the table the mechanism belongs to.
46 * mech_index is the index for that mechanism in the table.
47 * A mechanism belongs to exactly 1 table.
48 * The tables are:
49 * . digest_mechs_tab[] for the msg digest mechs.
50 * . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
51 * . mac_mechs_tab[] for MAC mechs.
52 * . sign_mechs_tab[] for sign & verify mechs.
53 * . keyops_mechs_tab[] for key/key pair generation, and key derivation.
54 * . misc_mechs_tab[] for mechs that don't belong to any of the above.
55 *
56 * There are no holes in the tables.
57 */
58
59 /*
60 * Locking conventions:
61 * --------------------
62 * A global mutex, kcf_mech_tabs_lock, serializes writes to the
63 * mechanism table via kcf_create_mech_entry().
64 *
65 * A mutex is associated with every entry of the tables.
66 * The mutex is acquired whenever the entry is accessed for
67 * 1) retrieving the mech_id (comparing the mech name)
68 * 2) finding a provider for an xxx_init() or atomic operation.
69 * 3) altering the mechs entry to add or remove a provider.
70 *
71 * In 2), after a provider is chosen, its prov_desc is held and the
72 * entry's mutex must be dropped. The provider's working function (SPI) is
73 * called outside the mech_entry's mutex.
74 *
75 * The number of providers for a particular mechanism is not expected to be
76 * long enough to justify the cost of using rwlocks, so the per-mechanism
77 * entry mutex won't be very *hot*.
78 *
79 * When both kcf_mech_tabs_lock and a mech_entry mutex need to be held,
80 * kcf_mech_tabs_lock must always be acquired first.
81 *
82 */
83
84 /* Mechanisms tables */
85
86
87 /* RFE 4687834 Will deal with the extensibility of these tables later */
88
89 kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
90 kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
91 kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
92 kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
93 kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
94 kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
95
96 kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
97 {0, NULL}, /* No class zero */
98 {KCF_MAXDIGEST, kcf_digest_mechs_tab},
99 {KCF_MAXCIPHER, kcf_cipher_mechs_tab},
100 {KCF_MAXMAC, kcf_mac_mechs_tab},
101 {KCF_MAXSIGN, kcf_sign_mechs_tab},
102 {KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
103 {KCF_MAXMISC, kcf_misc_mechs_tab}
104 };
105
106 /*
107 * Protects fields in kcf_mech_entry. This is an array
108 * of locks indexed by the cpuid. A reader needs to hold
109 * a single lock while a writer needs to hold all locks.
110 * krwlock_t is not an option here because the hold time
111 * is very small for these locks.
112 */
113 kcf_lock_withpad_t *me_mutexes;
114
115 #define ME_MUTEXES_ENTER_ALL() \
116 for (int i = 0; i < max_ncpus; i++) \
117 mutex_enter(&me_mutexes[i].kl_lock);
118
119 #define ME_MUTEXES_EXIT_ALL() \
120 for (int i = 0; i < max_ncpus; i++) \
121 mutex_exit(&me_mutexes[i].kl_lock);
122
123 /*
124 * Per-algorithm internal thresholds for the minimum input size of before
125 * offloading to hardware provider.
126 * Dispatching a crypto operation to a hardware provider entails paying the
127 * cost of an additional context switch. Measurments with Sun Accelerator 4000
128 * shows that 512-byte jobs or smaller are better handled in software.
129 * There is room for refinement here.
130 *
131 */
132 int kcf_md5_threshold = 512;
133 int kcf_sha1_threshold = 512;
134 int kcf_des_threshold = 512;
135 int kcf_des3_threshold = 512;
136 int kcf_aes_threshold = 512;
137 int kcf_bf_threshold = 512;
138 int kcf_rc4_threshold = 512;
139
140 kmutex_t kcf_mech_tabs_lock;
141 static uint32_t kcf_gen_swprov = 0;
142
143 int kcf_mech_hash_size = 256;
144 mod_hash_t *kcf_mech_hash; /* mech name to id hash */
145
146 static crypto_mech_type_t
kcf_mech_hash_find(char * mechname)147 kcf_mech_hash_find(char *mechname)
148 {
149 mod_hash_val_t hv;
150 crypto_mech_type_t mt;
151
152 mt = CRYPTO_MECH_INVALID;
153 if (mod_hash_find(kcf_mech_hash, (mod_hash_key_t)mechname, &hv) == 0) {
154 mt = *(crypto_mech_type_t *)hv;
155 ASSERT(mt != CRYPTO_MECH_INVALID);
156 }
157
158 return (mt);
159 }
160
161 /*
162 * kcf_init_mech_tabs()
163 *
164 * Called by the misc/kcf's _init() routine to initialize the tables
165 * of mech_entry's.
166 */
167 void
kcf_init_mech_tabs()168 kcf_init_mech_tabs()
169 {
170 int i, max;
171 kcf_ops_class_t class;
172 kcf_mech_entry_t *me_tab;
173
174 /* Initializes the mutex locks. */
175
176 mutex_init(&kcf_mech_tabs_lock, NULL, MUTEX_DEFAULT, NULL);
177
178 /* Then the pre-defined mechanism entries */
179
180 /* Two digests */
181 (void) strncpy(kcf_digest_mechs_tab[0].me_name, SUN_CKM_MD5,
182 CRYPTO_MAX_MECH_NAME);
183 kcf_digest_mechs_tab[0].me_threshold = kcf_md5_threshold;
184
185 (void) strncpy(kcf_digest_mechs_tab[1].me_name, SUN_CKM_SHA1,
186 CRYPTO_MAX_MECH_NAME);
187 kcf_digest_mechs_tab[1].me_threshold = kcf_sha1_threshold;
188
189 /* The symmetric ciphers in various modes */
190 (void) strncpy(kcf_cipher_mechs_tab[0].me_name, SUN_CKM_DES_CBC,
191 CRYPTO_MAX_MECH_NAME);
192 kcf_cipher_mechs_tab[0].me_threshold = kcf_des_threshold;
193
194 (void) strncpy(kcf_cipher_mechs_tab[1].me_name, SUN_CKM_DES3_CBC,
195 CRYPTO_MAX_MECH_NAME);
196 kcf_cipher_mechs_tab[1].me_threshold = kcf_des3_threshold;
197
198 (void) strncpy(kcf_cipher_mechs_tab[2].me_name, SUN_CKM_DES_ECB,
199 CRYPTO_MAX_MECH_NAME);
200 kcf_cipher_mechs_tab[2].me_threshold = kcf_des_threshold;
201
202 (void) strncpy(kcf_cipher_mechs_tab[3].me_name, SUN_CKM_DES3_ECB,
203 CRYPTO_MAX_MECH_NAME);
204 kcf_cipher_mechs_tab[3].me_threshold = kcf_des3_threshold;
205
206 (void) strncpy(kcf_cipher_mechs_tab[4].me_name, SUN_CKM_BLOWFISH_CBC,
207 CRYPTO_MAX_MECH_NAME);
208 kcf_cipher_mechs_tab[4].me_threshold = kcf_bf_threshold;
209
210 (void) strncpy(kcf_cipher_mechs_tab[5].me_name, SUN_CKM_BLOWFISH_ECB,
211 CRYPTO_MAX_MECH_NAME);
212 kcf_cipher_mechs_tab[5].me_threshold = kcf_bf_threshold;
213
214 (void) strncpy(kcf_cipher_mechs_tab[6].me_name, SUN_CKM_AES_CBC,
215 CRYPTO_MAX_MECH_NAME);
216 kcf_cipher_mechs_tab[6].me_threshold = kcf_aes_threshold;
217
218 (void) strncpy(kcf_cipher_mechs_tab[7].me_name, SUN_CKM_AES_ECB,
219 CRYPTO_MAX_MECH_NAME);
220 kcf_cipher_mechs_tab[7].me_threshold = kcf_aes_threshold;
221
222 (void) strncpy(kcf_cipher_mechs_tab[8].me_name, SUN_CKM_RC4,
223 CRYPTO_MAX_MECH_NAME);
224 kcf_cipher_mechs_tab[8].me_threshold = kcf_rc4_threshold;
225
226
227 /* 5 HMACs */
228 (void) strncpy(kcf_mac_mechs_tab[0].me_name, SUN_CKM_MD5_HMAC,
229 CRYPTO_MAX_MECH_NAME);
230 kcf_mac_mechs_tab[0].me_threshold = kcf_md5_threshold;
231
232 (void) strncpy(kcf_mac_mechs_tab[1].me_name, SUN_CKM_MD5_HMAC_GENERAL,
233 CRYPTO_MAX_MECH_NAME);
234 kcf_mac_mechs_tab[1].me_threshold = kcf_md5_threshold;
235
236 (void) strncpy(kcf_mac_mechs_tab[2].me_name, SUN_CKM_SHA1_HMAC,
237 CRYPTO_MAX_MECH_NAME);
238 kcf_mac_mechs_tab[2].me_threshold = kcf_sha1_threshold;
239
240 (void) strncpy(kcf_mac_mechs_tab[3].me_name, SUN_CKM_SHA1_HMAC_GENERAL,
241 CRYPTO_MAX_MECH_NAME);
242 kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
243
244 (void) strncpy(kcf_mac_mechs_tab[4].me_name, SUN_CKM_AES_GMAC,
245 CRYPTO_MAX_MECH_NAME);
246 kcf_mac_mechs_tab[4].me_threshold = kcf_sha1_threshold;
247
248 /* 1 random number generation pseudo mechanism */
249 (void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
250 CRYPTO_MAX_MECH_NAME);
251
252 kcf_mech_hash = mod_hash_create_strhash("kcf mech2id hash",
253 kcf_mech_hash_size, mod_hash_null_valdtor);
254
255 for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) {
256 max = kcf_mech_tabs_tab[class].met_size;
257 me_tab = kcf_mech_tabs_tab[class].met_tab;
258 for (i = 0; i < max; i++) {
259 if (me_tab[i].me_name[0] != 0) {
260 me_tab[i].me_mechid = KCF_MECHID(class, i);
261 (void) mod_hash_insert(kcf_mech_hash,
262 (mod_hash_key_t)me_tab[i].me_name,
263 (mod_hash_val_t)&(me_tab[i].me_mechid));
264 }
265 }
266 }
267
268 me_mutexes = kmem_zalloc(max_ncpus * sizeof (kcf_lock_withpad_t),
269 KM_SLEEP);
270 for (i = 0; i < max_ncpus; i++) {
271 mutex_init(&me_mutexes[i].kl_lock, NULL, MUTEX_DEFAULT, NULL);
272 }
273 }
274
275 /*
276 * kcf_create_mech_entry()
277 *
278 * Arguments:
279 * . The class of mechanism.
280 * . the name of the new mechanism.
281 *
282 * Description:
283 * Creates a new mech_entry for a mechanism not yet known to the
284 * framework.
285 * This routine is called by kcf_add_mech_provider, which is
286 * in turn invoked for each mechanism supported by a provider.
287 * The'class' argument depends on the crypto_func_group_t bitmask
288 * in the registering provider's mech_info struct for this mechanism.
289 * When there is ambiguity in the mapping between the crypto_func_group_t
290 * and a class (dual ops, ...) the KCF_MISC_CLASS should be used.
291 *
292 * Context:
293 * User context only.
294 *
295 * Returns:
296 * KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or
297 * the mechname is bogus.
298 * KCF_MECH_TAB_FULL when there is no room left in the mech. tabs.
299 * KCF_SUCCESS otherwise.
300 */
301 static int
kcf_create_mech_entry(kcf_ops_class_t class,char * mechname)302 kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
303 {
304 crypto_mech_type_t mt;
305 kcf_mech_entry_t *me_tab;
306 int i = 0, size;
307
308 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS))
309 return (KCF_INVALID_MECH_CLASS);
310
311 if ((mechname == NULL) || (mechname[0] == 0))
312 return (KCF_INVALID_MECH_NAME);
313 /*
314 * First check if the mechanism is already in one of the tables.
315 * The mech_entry could be in another class.
316 */
317 mutex_enter(&kcf_mech_tabs_lock);
318 mt = kcf_mech_hash_find(mechname);
319 if (mt != CRYPTO_MECH_INVALID) {
320 /* Nothing to do, regardless the suggested class. */
321 mutex_exit(&kcf_mech_tabs_lock);
322 return (KCF_SUCCESS);
323 }
324 /* Now take the next unused mech entry in the class's tab */
325 me_tab = kcf_mech_tabs_tab[class].met_tab;
326 size = kcf_mech_tabs_tab[class].met_size;
327
328 while (i < size) {
329 ME_MUTEXES_ENTER_ALL();
330 if (me_tab[i].me_name[0] == 0) {
331 /* Found an empty spot */
332 (void) strncpy(me_tab[i].me_name, mechname,
333 CRYPTO_MAX_MECH_NAME);
334 me_tab[i].me_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
335 me_tab[i].me_mechid = KCF_MECHID(class, i);
336 /*
337 * No a-priori information about the new mechanism, so
338 * the threshold is set to zero.
339 */
340 me_tab[i].me_threshold = 0;
341
342 ME_MUTEXES_EXIT_ALL();
343 /* Add the new mechanism to the hash table */
344 (void) mod_hash_insert(kcf_mech_hash,
345 (mod_hash_key_t)me_tab[i].me_name,
346 (mod_hash_val_t)&(me_tab[i].me_mechid));
347 break;
348 }
349 ME_MUTEXES_EXIT_ALL();
350 i++;
351 }
352
353 mutex_exit(&kcf_mech_tabs_lock);
354
355 if (i == size) {
356 return (KCF_MECH_TAB_FULL);
357 }
358
359 return (KCF_SUCCESS);
360 }
361
362 /*
363 * kcf_add_mech_provider()
364 *
365 * Arguments:
366 * . An index in to the provider mechanism array
367 * . A pointer to the provider descriptor
368 * . A storage for the kcf_prov_mech_desc_t the entry was added at.
369 *
370 * Description:
371 * Adds a new provider of a mechanism to the mechanism's mech_entry
372 * chain.
373 *
374 * Context:
375 * User context only.
376 *
377 * Returns
378 * KCF_SUCCESS on success
379 * KCF_MECH_TAB_FULL otherwise.
380 */
381 int
kcf_add_mech_provider(short mech_indx,kcf_provider_desc_t * prov_desc,kcf_prov_mech_desc_t ** pmdpp)382 kcf_add_mech_provider(short mech_indx,
383 kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp)
384 {
385 int error;
386 kcf_mech_entry_t *mech_entry;
387 crypto_mech_info_t *mech_info;
388 crypto_mech_type_t kcf_mech_type, mt;
389 kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
390 crypto_func_group_t simple_fg_mask, dual_fg_mask;
391 crypto_mech_info_t *dmi;
392 crypto_mech_info_list_t *mil, *mil2;
393 kcf_mech_entry_t *me;
394 int i;
395
396 ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
397
398 mech_info = &prov_desc->pd_mechanisms[mech_indx];
399 /*
400 * Do not use the provider for the mechanism if
401 * policy does not allow it.
402 */
403 if (is_mech_disabled(prov_desc, mech_info->cm_mech_name)) {
404 *pmdpp = NULL;
405 return (KCF_SUCCESS);
406 }
407
408 /*
409 * A mechanism belongs to exactly one mechanism table.
410 * Find the class corresponding to the function group flag of
411 * the mechanism.
412 */
413 kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
414 if (kcf_mech_type == CRYPTO_MECH_INVALID) {
415 crypto_func_group_t fg = mech_info->cm_func_group_mask;
416 kcf_ops_class_t class;
417
418 if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
419 class = KCF_DIGEST_CLASS;
420 else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
421 fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
422 fg & CRYPTO_FG_DECRYPT_ATOMIC)
423 class = KCF_CIPHER_CLASS;
424 else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
425 class = KCF_MAC_CLASS;
426 else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
427 fg & CRYPTO_FG_SIGN_ATOMIC ||
428 fg & CRYPTO_FG_VERIFY_ATOMIC ||
429 fg & CRYPTO_FG_SIGN_RECOVER ||
430 fg & CRYPTO_FG_VERIFY_RECOVER)
431 class = KCF_SIGN_CLASS;
432 else if (fg & CRYPTO_FG_GENERATE ||
433 fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
434 fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
435 fg & CRYPTO_FG_DERIVE)
436 class = KCF_KEYOPS_CLASS;
437 else
438 class = KCF_MISC_CLASS;
439
440 /*
441 * Attempt to create a new mech_entry for the specified
442 * mechanism. kcf_create_mech_entry() can handle the case
443 * where such an entry already exists.
444 */
445 if ((error = kcf_create_mech_entry(class,
446 mech_info->cm_mech_name)) != KCF_SUCCESS) {
447 return (error);
448 }
449 /* get the KCF mech type that was assigned to the mechanism */
450 kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
451 ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID);
452 }
453
454 error = kcf_get_mech_entry(kcf_mech_type, &mech_entry);
455 ASSERT(error == KCF_SUCCESS);
456
457 /* allocate and initialize new kcf_prov_mech_desc */
458 prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
459 bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
460 prov_mech->pm_prov_desc = prov_desc;
461 prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
462 [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
463
464 KCF_PROV_REFHOLD(prov_desc);
465
466 dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
467
468 if (dual_fg_mask == ((crypto_func_group_t)0))
469 goto add_entry;
470
471 simple_fg_mask = mech_info->cm_func_group_mask &
472 CRYPTO_FG_SIMPLEOP_MASK | CRYPTO_FG_RANDOM;
473
474 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
475 dmi = &prov_desc->pd_mechanisms[i];
476
477 /* skip self */
478 if (dmi->cm_mech_number == mech_info->cm_mech_number)
479 continue;
480
481 /* skip if policy doesn't allow mechanism */
482 if (is_mech_disabled(prov_desc, dmi->cm_mech_name))
483 continue;
484
485 /* skip if not a dual operation mechanism */
486 if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
487 (dmi->cm_func_group_mask & simple_fg_mask))
488 continue;
489
490 mt = kcf_mech_hash_find(dmi->cm_mech_name);
491 if (mt == CRYPTO_MECH_INVALID)
492 continue;
493
494 if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
495 continue;
496
497 mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
498 mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
499
500 /*
501 * Ignore hard-coded entries in the mech table
502 * if the provider hasn't registered.
503 */
504 ME_MUTEXES_ENTER_ALL();
505 if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
506 ME_MUTEXES_EXIT_ALL();
507 kmem_free(mil, sizeof (*mil));
508 kmem_free(mil2, sizeof (*mil2));
509 continue;
510 }
511
512 /*
513 * Add other dual mechanisms that have registered
514 * with the framework to this mechanism's
515 * cross-reference list.
516 */
517 mil->ml_mech_info = *dmi; /* struct assignment */
518 mil->ml_kcf_mechid = mt;
519
520 /* add to head of list */
521 mil->ml_next = prov_mech->pm_mi_list;
522 prov_mech->pm_mi_list = mil;
523
524 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
525 prov_mech2 = me->me_hw_prov_chain;
526 else
527 prov_mech2 = me->me_sw_prov;
528
529 if (prov_mech2 == NULL) {
530 kmem_free(mil2, sizeof (*mil2));
531 ME_MUTEXES_EXIT_ALL();
532 continue;
533 }
534
535 /*
536 * Update all other cross-reference lists by
537 * adding this new mechanism.
538 */
539 while (prov_mech2 != NULL) {
540 if (prov_mech2->pm_prov_desc == prov_desc) {
541 /* struct assignment */
542 mil2->ml_mech_info = *mech_info;
543 mil2->ml_kcf_mechid = kcf_mech_type;
544
545 /* add to head of list */
546 mil2->ml_next = prov_mech2->pm_mi_list;
547 prov_mech2->pm_mi_list = mil2;
548 break;
549 }
550 prov_mech2 = prov_mech2->pm_next;
551 }
552 if (prov_mech2 == NULL)
553 kmem_free(mil2, sizeof (*mil2));
554
555 ME_MUTEXES_EXIT_ALL();
556 }
557
558 add_entry:
559 /*
560 * Add new kcf_prov_mech_desc at the front of HW providers
561 * chain.
562 */
563 switch (prov_desc->pd_prov_type) {
564
565 case CRYPTO_HW_PROVIDER:
566 ME_MUTEXES_ENTER_ALL();
567 prov_mech->pm_me = mech_entry;
568 prov_mech->pm_next = mech_entry->me_hw_prov_chain;
569 mech_entry->me_hw_prov_chain = prov_mech;
570 mech_entry->me_num_hwprov++;
571 ME_MUTEXES_EXIT_ALL();
572 break;
573
574 case CRYPTO_SW_PROVIDER:
575 ME_MUTEXES_ENTER_ALL();
576 if (mech_entry->me_sw_prov != NULL) {
577 /*
578 * There is already a SW provider for this mechanism.
579 * Since we allow only one SW provider per mechanism,
580 * report this condition.
581 */
582 cmn_err(CE_WARN, "The cryptographic software provider "
583 "\"%s\" will not be used for %s. The provider "
584 "\"%s\" will be used for this mechanism "
585 "instead.", prov_desc->pd_description,
586 mech_info->cm_mech_name,
587 mech_entry->me_sw_prov->pm_prov_desc->
588 pd_description);
589 KCF_PROV_REFRELE(prov_desc);
590 kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
591 prov_mech = NULL;
592 } else {
593 /*
594 * Set the provider as the software provider for
595 * this mechanism.
596 */
597 mech_entry->me_sw_prov = prov_mech;
598
599 /* We'll wrap around after 4 billion registrations! */
600 mech_entry->me_gen_swprov = kcf_gen_swprov++;
601 }
602 ME_MUTEXES_EXIT_ALL();
603 break;
604 }
605
606 *pmdpp = prov_mech;
607
608 return (KCF_SUCCESS);
609 }
610
611 /*
612 * kcf_remove_mech_provider()
613 *
614 * Arguments:
615 * . mech_name: the name of the mechanism.
616 * . prov_desc: The provider descriptor
617 *
618 * Description:
619 * Removes a provider from chain of provider descriptors.
620 * The provider is made unavailable to kernel consumers for the specified
621 * mechanism.
622 *
623 * Context:
624 * User context only.
625 */
626 void
kcf_remove_mech_provider(char * mech_name,kcf_provider_desc_t * prov_desc)627 kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
628 {
629 crypto_mech_type_t mech_type;
630 kcf_prov_mech_desc_t *prov_mech, *prov_chain;
631 kcf_prov_mech_desc_t **prev_entry_next;
632 kcf_mech_entry_t *mech_entry;
633 crypto_mech_info_list_t *mil, *mil2, *next, **prev_next;
634
635 ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
636
637 /* get the KCF mech type that was assigned to the mechanism */
638 if ((mech_type = kcf_mech_hash_find(mech_name)) ==
639 CRYPTO_MECH_INVALID) {
640 /*
641 * Provider was not allowed for this mech due to policy or
642 * configuration.
643 */
644 return;
645 }
646
647 /* get a ptr to the mech_entry that was created */
648 if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) {
649 /*
650 * Provider was not allowed for this mech due to policy or
651 * configuration.
652 */
653 return;
654 }
655
656 ME_MUTEXES_ENTER_ALL();
657
658 switch (prov_desc->pd_prov_type) {
659
660 case CRYPTO_HW_PROVIDER:
661 /* find the provider in the mech_entry chain */
662 prev_entry_next = &mech_entry->me_hw_prov_chain;
663 prov_mech = mech_entry->me_hw_prov_chain;
664 while (prov_mech != NULL &&
665 prov_mech->pm_prov_desc != prov_desc) {
666 prev_entry_next = &prov_mech->pm_next;
667 prov_mech = prov_mech->pm_next;
668 }
669
670 if (prov_mech == NULL) {
671 /* entry not found, simply return */
672 ME_MUTEXES_EXIT_ALL();
673 return;
674 }
675
676 /* remove provider entry from mech_entry chain */
677 *prev_entry_next = prov_mech->pm_next;
678 ASSERT(mech_entry->me_num_hwprov > 0);
679 mech_entry->me_num_hwprov--;
680 break;
681
682 case CRYPTO_SW_PROVIDER:
683 if (mech_entry->me_sw_prov == NULL ||
684 mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
685 /* not the software provider for this mechanism */
686 ME_MUTEXES_EXIT_ALL();
687 return;
688 }
689 prov_mech = mech_entry->me_sw_prov;
690 mech_entry->me_sw_prov = NULL;
691 break;
692 }
693
694 ME_MUTEXES_EXIT_ALL();
695
696 /* Free the dual ops cross-reference lists */
697 mil = prov_mech->pm_mi_list;
698 while (mil != NULL) {
699 next = mil->ml_next;
700 if (kcf_get_mech_entry(mil->ml_kcf_mechid,
701 &mech_entry) != KCF_SUCCESS) {
702 mil = next;
703 continue;
704 }
705
706 ME_MUTEXES_ENTER_ALL();
707 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
708 prov_chain = mech_entry->me_hw_prov_chain;
709 else
710 prov_chain = mech_entry->me_sw_prov;
711
712 while (prov_chain != NULL) {
713 if (prov_chain->pm_prov_desc == prov_desc) {
714 prev_next = &prov_chain->pm_mi_list;
715 mil2 = prov_chain->pm_mi_list;
716 while (mil2 != NULL &&
717 mil2->ml_kcf_mechid != mech_type) {
718 prev_next = &mil2->ml_next;
719 mil2 = mil2->ml_next;
720 }
721 if (mil2 != NULL) {
722 *prev_next = mil2->ml_next;
723 kmem_free(mil2, sizeof (*mil2));
724 }
725 break;
726 }
727 prov_chain = prov_chain->pm_next;
728 }
729
730 ME_MUTEXES_EXIT_ALL();
731 kmem_free(mil, sizeof (crypto_mech_info_list_t));
732 mil = next;
733 }
734
735 /* free entry */
736 KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
737 kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
738 }
739
740 /*
741 * kcf_get_mech_entry()
742 *
743 * Arguments:
744 * . The framework mechanism type
745 * . Storage for the mechanism entry
746 *
747 * Description:
748 * Retrieves the mechanism entry for the mech.
749 *
750 * Context:
751 * User and interrupt contexts.
752 *
753 * Returns:
754 * KCF_MECHANISM_XXX appropriate error code.
755 * KCF_SUCCESS otherwise.
756 */
757 int
kcf_get_mech_entry(crypto_mech_type_t mech_type,kcf_mech_entry_t ** mep)758 kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
759 {
760 kcf_ops_class_t class;
761 int index;
762 kcf_mech_entry_tab_t *me_tab;
763
764 ASSERT(mep != NULL);
765
766 class = KCF_MECH2CLASS(mech_type);
767
768 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
769 /* the caller won't need to know it's an invalid class */
770 return (KCF_INVALID_MECH_NUMBER);
771 }
772
773 me_tab = &kcf_mech_tabs_tab[class];
774 index = KCF_MECH2INDEX(mech_type);
775
776 if ((index < 0) || (index >= me_tab->met_size)) {
777 return (KCF_INVALID_MECH_NUMBER);
778 }
779
780 *mep = &((me_tab->met_tab)[index]);
781
782 return (KCF_SUCCESS);
783 }
784
785 /*
786 * Returns TRUE if the provider is usable and the MOD_NOAUTOUNLOAD flag
787 * is set in the modctl structure.
788 */
789 static boolean_t
auto_unload_flag_set(kcf_prov_mech_desc_t * pm)790 auto_unload_flag_set(kcf_prov_mech_desc_t *pm)
791 {
792 kcf_provider_desc_t *pd;
793 struct modctl *mp;
794 boolean_t ret = B_FALSE;
795
796 if (pm != NULL) {
797 pd = pm->pm_prov_desc;
798 KCF_PROV_REFHOLD(pd);
799
800 if (KCF_IS_PROV_USABLE(pd)) {
801 mp = pd->pd_mctlp;
802 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) {
803 ret = B_TRUE;
804 }
805 }
806 KCF_PROV_REFRELE(pd);
807 }
808
809 return (ret);
810 }
811
812 /*
813 * Lookup the hash table for an entry that matches the mechname.
814 * If there are no hardware or software providers for the mechanism,
815 * but there is an unloaded software provider, this routine will attempt
816 * to load it.
817 *
818 * If the MOD_NOAUTOUNLOAD flag is not set, a software provider is
819 * in constant danger of being unloaded. For consumers that call
820 * crypto_mech2id() only once, the provider will not be reloaded
821 * if it becomes unloaded. If a provider gets loaded elsewhere
822 * without the MOD_NOAUTOUNLOAD flag being set, we set it now.
823 */
824 crypto_mech_type_t
crypto_mech2id_common(char * mechname,boolean_t load_module)825 crypto_mech2id_common(char *mechname, boolean_t load_module)
826 {
827 crypto_mech_type_t mt;
828 kcf_mech_entry_t *me;
829 int i;
830 kcf_ops_class_t class;
831 boolean_t second_time = B_FALSE;
832 boolean_t try_to_load_software_provider = B_FALSE;
833 kcf_lock_withpad_t *mp;
834
835 try_again:
836 mt = kcf_mech_hash_find(mechname);
837 if (!load_module || second_time == B_TRUE || servicing_interrupt())
838 return (mt);
839
840 if (mt != CRYPTO_MECH_INVALID) {
841 class = KCF_MECH2CLASS(mt);
842 i = KCF_MECH2INDEX(mt);
843 me = &(kcf_mech_tabs_tab[class].met_tab[i]);
844 mp = &me_mutexes[CPU_SEQID];
845 mutex_enter(&mp->kl_lock);
846
847 if (load_module && !auto_unload_flag_set(me->me_sw_prov)) {
848 try_to_load_software_provider = B_TRUE;
849 }
850 mutex_exit(&mp->kl_lock);
851 }
852
853 if (mt == CRYPTO_MECH_INVALID || try_to_load_software_provider) {
854 struct modctl *mcp;
855 boolean_t load_again = B_FALSE;
856 char *module_name;
857 int module_name_size;
858
859 /* try to find a software provider for the mechanism */
860 if (get_sw_provider_for_mech(mechname, &module_name)
861 != CRYPTO_SUCCESS) {
862 /* mt may already be set for a hw provider */
863 return (mt);
864 }
865
866 module_name_size = strlen(module_name) + 1;
867 if (modload("crypto", module_name) == -1 ||
868 (mcp = mod_hold_by_name(module_name)) == NULL) {
869 kmem_free(module_name, module_name_size);
870 /* mt may already be set for a hw provider */
871 return (mt);
872 }
873
874 mcp->mod_loadflags |= MOD_NOAUTOUNLOAD;
875
876 /* memory pressure may have unloaded the module */
877 if (!mcp->mod_installed)
878 load_again = B_TRUE;
879 mod_release_mod(mcp);
880
881 if (load_again)
882 (void) modload("crypto", module_name);
883
884 kmem_free(module_name, module_name_size);
885
886 /* mt may already be set for a hw provider */
887 if (mt != CRYPTO_MECH_INVALID)
888 return (mt);
889
890 /*
891 * Try again. Should find a software provider in the
892 * table this time around.
893 */
894 second_time = B_TRUE;
895 goto try_again;
896 }
897
898 return (mt);
899 }
900