1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/sunddi.h>
28 #include <sys/errno.h>
29 #include <sys/disp.h>
30 #include <sys/modctl.h>
31 #include <sys/modhash.h>
32 #include <sys/crypto/common.h>
33 #include <sys/crypto/api.h>
34 #include <sys/crypto/impl.h>
35
36 /* Cryptographic mechanisms tables and their access functions */
37
38 /*
39 * Internal numbers assigned to mechanisms are coded as follows:
40 *
41 * +----------------+----------------+
42 * | mech. class | mech. index |
43 * <--- 32-bits --->+<--- 32-bits --->
44 *
45 * the mech_class identifies the table the mechanism belongs to.
46 * mech_index is the index for that mechanism in the table.
47 * A mechanism belongs to exactly 1 table.
48 * The tables are:
49 * . digest_mechs_tab[] for the msg digest mechs.
50 * . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
51 * . mac_mechs_tab[] for MAC mechs.
52 * . sign_mechs_tab[] for sign & verify mechs.
53 * . keyops_mechs_tab[] for key/key pair generation, and key derivation.
54 * . misc_mechs_tab[] for mechs that don't belong to any of the above.
55 *
56 * There are no holes in the tables.
57 */
58
59 /*
60 * Locking conventions:
61 * --------------------
62 * A global mutex, kcf_mech_tabs_lock, serializes writes to the
63 * mechanism table via kcf_create_mech_entry().
64 *
65 * A mutex is associated with every entry of the tables.
66 * The mutex is acquired whenever the entry is accessed for
67 * 1) retrieving the mech_id (comparing the mech name)
68 * 2) finding a provider for an xxx_init() or atomic operation.
69 * 3) altering the mechs entry to add or remove a provider.
70 *
71 * In 2), after a provider is chosen, its prov_desc is held and the
72 * entry's mutex must be dropped. The provider's working function (SPI) is
73 * called outside the mech_entry's mutex.
74 *
75 * The number of providers for a particular mechanism is not expected to be
76 * long enough to justify the cost of using rwlocks, so the per-mechanism
77 * entry mutex won't be very *hot*.
78 *
79 * When both kcf_mech_tabs_lock and a mech_entry mutex need to be held,
80 * kcf_mech_tabs_lock must always be acquired first.
81 *
82 */
83
84 /* Mechanisms tables */
85
86
87 /* RFE 4687834 Will deal with the extensibility of these tables later */
88
89 kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
90 kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
91 kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
92 kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
93 kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
94 kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
95
96 kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
97 {0, NULL}, /* No class zero */
98 {KCF_MAXDIGEST, kcf_digest_mechs_tab},
99 {KCF_MAXCIPHER, kcf_cipher_mechs_tab},
100 {KCF_MAXMAC, kcf_mac_mechs_tab},
101 {KCF_MAXSIGN, kcf_sign_mechs_tab},
102 {KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
103 {KCF_MAXMISC, kcf_misc_mechs_tab}
104 };
105
106 /*
107 * Protects fields in kcf_mech_entry. This is an array
108 * of locks indexed by the cpuid. A reader needs to hold
109 * a single lock while a writer needs to hold all locks.
110 * krwlock_t is not an option here because the hold time
111 * is very small for these locks.
112 */
113 kcf_lock_withpad_t *me_mutexes;
114
115 #define ME_MUTEXES_ENTER_ALL() \
116 for (int i = 0; i < max_ncpus; i++) \
117 mutex_enter(&me_mutexes[i].kl_lock);
118
119 #define ME_MUTEXES_EXIT_ALL() \
120 for (int i = 0; i < max_ncpus; i++) \
121 mutex_exit(&me_mutexes[i].kl_lock);
122
123 /*
124 * Per-algorithm internal thresholds for the minimum input size of before
125 * offloading to hardware provider.
126 * Dispatching a crypto operation to a hardware provider entails paying the
127 * cost of an additional context switch. Measurments with Sun Accelerator 4000
128 * shows that 512-byte jobs or smaller are better handled in software.
129 * There is room for refinement here.
130 *
131 */
132 int kcf_md5_threshold = 512;
133 int kcf_sha1_threshold = 512;
134 int kcf_des_threshold = 512;
135 int kcf_des3_threshold = 512;
136 int kcf_aes_threshold = 512;
137 int kcf_bf_threshold = 512;
138 int kcf_rc4_threshold = 512;
139
140 kmutex_t kcf_mech_tabs_lock;
141 static uint32_t kcf_gen_swprov = 0;
142
143 int kcf_mech_hash_size = 256;
144 mod_hash_t *kcf_mech_hash; /* mech name to id hash */
145
146 static crypto_mech_type_t
kcf_mech_hash_find(char * mechname)147 kcf_mech_hash_find(char *mechname)
148 {
149 mod_hash_val_t hv;
150 crypto_mech_type_t mt;
151
152 mt = CRYPTO_MECH_INVALID;
153 if (mod_hash_find(kcf_mech_hash, (mod_hash_key_t)mechname, &hv) == 0) {
154 mt = *(crypto_mech_type_t *)hv;
155 ASSERT(mt != CRYPTO_MECH_INVALID);
156 }
157
158 return (mt);
159 }
160
161 /*
162 * kcf_init_mech_tabs()
163 *
164 * Called by the misc/kcf's _init() routine to initialize the tables
165 * of mech_entry's.
166 */
167 void
kcf_init_mech_tabs()168 kcf_init_mech_tabs()
169 {
170 int i, max;
171 kcf_ops_class_t class;
172 kcf_mech_entry_t *me_tab;
173
174 /* Initializes the mutex locks. */
175
176 mutex_init(&kcf_mech_tabs_lock, NULL, MUTEX_DEFAULT, NULL);
177
178 /* Then the pre-defined mechanism entries */
179
180 /* Two digests */
181 (void) strncpy(kcf_digest_mechs_tab[0].me_name, SUN_CKM_MD5,
182 CRYPTO_MAX_MECH_NAME);
183 kcf_digest_mechs_tab[0].me_threshold = kcf_md5_threshold;
184
185 (void) strncpy(kcf_digest_mechs_tab[1].me_name, SUN_CKM_SHA1,
186 CRYPTO_MAX_MECH_NAME);
187 kcf_digest_mechs_tab[1].me_threshold = kcf_sha1_threshold;
188
189 /* The symmetric ciphers in various modes */
190 (void) strncpy(kcf_cipher_mechs_tab[0].me_name, SUN_CKM_DES_CBC,
191 CRYPTO_MAX_MECH_NAME);
192 kcf_cipher_mechs_tab[0].me_threshold = kcf_des_threshold;
193
194 (void) strncpy(kcf_cipher_mechs_tab[1].me_name, SUN_CKM_DES3_CBC,
195 CRYPTO_MAX_MECH_NAME);
196 kcf_cipher_mechs_tab[1].me_threshold = kcf_des3_threshold;
197
198 (void) strncpy(kcf_cipher_mechs_tab[2].me_name, SUN_CKM_DES_ECB,
199 CRYPTO_MAX_MECH_NAME);
200 kcf_cipher_mechs_tab[2].me_threshold = kcf_des_threshold;
201
202 (void) strncpy(kcf_cipher_mechs_tab[3].me_name, SUN_CKM_DES3_ECB,
203 CRYPTO_MAX_MECH_NAME);
204 kcf_cipher_mechs_tab[3].me_threshold = kcf_des3_threshold;
205
206 (void) strncpy(kcf_cipher_mechs_tab[4].me_name, SUN_CKM_BLOWFISH_CBC,
207 CRYPTO_MAX_MECH_NAME);
208 kcf_cipher_mechs_tab[4].me_threshold = kcf_bf_threshold;
209
210 (void) strncpy(kcf_cipher_mechs_tab[5].me_name, SUN_CKM_BLOWFISH_ECB,
211 CRYPTO_MAX_MECH_NAME);
212 kcf_cipher_mechs_tab[5].me_threshold = kcf_bf_threshold;
213
214 (void) strncpy(kcf_cipher_mechs_tab[6].me_name, SUN_CKM_AES_CBC,
215 CRYPTO_MAX_MECH_NAME);
216 kcf_cipher_mechs_tab[6].me_threshold = kcf_aes_threshold;
217
218 (void) strncpy(kcf_cipher_mechs_tab[7].me_name, SUN_CKM_AES_ECB,
219 CRYPTO_MAX_MECH_NAME);
220 kcf_cipher_mechs_tab[7].me_threshold = kcf_aes_threshold;
221
222 (void) strncpy(kcf_cipher_mechs_tab[8].me_name, SUN_CKM_RC4,
223 CRYPTO_MAX_MECH_NAME);
224 kcf_cipher_mechs_tab[8].me_threshold = kcf_rc4_threshold;
225
226
227 /* 6 HMACs */
228 (void) strncpy(kcf_mac_mechs_tab[0].me_name, SUN_CKM_MD5_HMAC,
229 CRYPTO_MAX_MECH_NAME);
230 kcf_mac_mechs_tab[0].me_threshold = kcf_md5_threshold;
231
232 (void) strncpy(kcf_mac_mechs_tab[1].me_name, SUN_CKM_MD5_HMAC_GENERAL,
233 CRYPTO_MAX_MECH_NAME);
234 kcf_mac_mechs_tab[1].me_threshold = kcf_md5_threshold;
235
236 (void) strncpy(kcf_mac_mechs_tab[2].me_name, SUN_CKM_SHA1_HMAC,
237 CRYPTO_MAX_MECH_NAME);
238 kcf_mac_mechs_tab[2].me_threshold = kcf_sha1_threshold;
239
240 (void) strncpy(kcf_mac_mechs_tab[3].me_name, SUN_CKM_SHA1_HMAC_GENERAL,
241 CRYPTO_MAX_MECH_NAME);
242 kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
243
244 (void) strncpy(kcf_mac_mechs_tab[4].me_name, SUN_CKM_AES_GMAC,
245 CRYPTO_MAX_MECH_NAME);
246 kcf_mac_mechs_tab[4].me_threshold = kcf_sha1_threshold;
247
248 (void) strncpy(kcf_mac_mechs_tab[5].me_name, SUN_CKM_AES_CMAC,
249 CRYPTO_MAX_MECH_NAME);
250 kcf_mac_mechs_tab[5].me_threshold = kcf_sha1_threshold;
251
252 /* 1 random number generation pseudo mechanism */
253 (void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
254 CRYPTO_MAX_MECH_NAME);
255
256 kcf_mech_hash = mod_hash_create_strhash("kcf mech2id hash",
257 kcf_mech_hash_size, mod_hash_null_valdtor);
258
259 for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) {
260 max = kcf_mech_tabs_tab[class].met_size;
261 me_tab = kcf_mech_tabs_tab[class].met_tab;
262 for (i = 0; i < max; i++) {
263 if (me_tab[i].me_name[0] != 0) {
264 me_tab[i].me_mechid = KCF_MECHID(class, i);
265 (void) mod_hash_insert(kcf_mech_hash,
266 (mod_hash_key_t)me_tab[i].me_name,
267 (mod_hash_val_t)&(me_tab[i].me_mechid));
268 }
269 }
270 }
271
272 me_mutexes = kmem_zalloc(max_ncpus * sizeof (kcf_lock_withpad_t),
273 KM_SLEEP);
274 for (i = 0; i < max_ncpus; i++) {
275 mutex_init(&me_mutexes[i].kl_lock, NULL, MUTEX_DEFAULT, NULL);
276 }
277 }
278
279 /*
280 * kcf_create_mech_entry()
281 *
282 * Arguments:
283 * . The class of mechanism.
284 * . the name of the new mechanism.
285 *
286 * Description:
287 * Creates a new mech_entry for a mechanism not yet known to the
288 * framework.
289 * This routine is called by kcf_add_mech_provider, which is
290 * in turn invoked for each mechanism supported by a provider.
291 * The'class' argument depends on the crypto_func_group_t bitmask
292 * in the registering provider's mech_info struct for this mechanism.
293 * When there is ambiguity in the mapping between the crypto_func_group_t
294 * and a class (dual ops, ...) the KCF_MISC_CLASS should be used.
295 *
296 * Context:
297 * User context only.
298 *
299 * Returns:
300 * KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or
301 * the mechname is bogus.
302 * KCF_MECH_TAB_FULL when there is no room left in the mech. tabs.
303 * KCF_SUCCESS otherwise.
304 */
305 static int
kcf_create_mech_entry(kcf_ops_class_t class,char * mechname)306 kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
307 {
308 crypto_mech_type_t mt;
309 kcf_mech_entry_t *me_tab;
310 int i = 0, size;
311
312 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS))
313 return (KCF_INVALID_MECH_CLASS);
314
315 if ((mechname == NULL) || (mechname[0] == 0))
316 return (KCF_INVALID_MECH_NAME);
317 /*
318 * First check if the mechanism is already in one of the tables.
319 * The mech_entry could be in another class.
320 */
321 mutex_enter(&kcf_mech_tabs_lock);
322 mt = kcf_mech_hash_find(mechname);
323 if (mt != CRYPTO_MECH_INVALID) {
324 /* Nothing to do, regardless the suggested class. */
325 mutex_exit(&kcf_mech_tabs_lock);
326 return (KCF_SUCCESS);
327 }
328 /* Now take the next unused mech entry in the class's tab */
329 me_tab = kcf_mech_tabs_tab[class].met_tab;
330 size = kcf_mech_tabs_tab[class].met_size;
331
332 while (i < size) {
333 ME_MUTEXES_ENTER_ALL();
334 if (me_tab[i].me_name[0] == 0) {
335 /* Found an empty spot */
336 (void) strncpy(me_tab[i].me_name, mechname,
337 CRYPTO_MAX_MECH_NAME);
338 me_tab[i].me_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
339 me_tab[i].me_mechid = KCF_MECHID(class, i);
340 /*
341 * No a-priori information about the new mechanism, so
342 * the threshold is set to zero.
343 */
344 me_tab[i].me_threshold = 0;
345
346 ME_MUTEXES_EXIT_ALL();
347 /* Add the new mechanism to the hash table */
348 (void) mod_hash_insert(kcf_mech_hash,
349 (mod_hash_key_t)me_tab[i].me_name,
350 (mod_hash_val_t)&(me_tab[i].me_mechid));
351 break;
352 }
353 ME_MUTEXES_EXIT_ALL();
354 i++;
355 }
356
357 mutex_exit(&kcf_mech_tabs_lock);
358
359 if (i == size) {
360 return (KCF_MECH_TAB_FULL);
361 }
362
363 return (KCF_SUCCESS);
364 }
365
366 /*
367 * kcf_add_mech_provider()
368 *
369 * Arguments:
370 * . An index in to the provider mechanism array
371 * . A pointer to the provider descriptor
372 * . A storage for the kcf_prov_mech_desc_t the entry was added at.
373 *
374 * Description:
375 * Adds a new provider of a mechanism to the mechanism's mech_entry
376 * chain.
377 *
378 * Context:
379 * User context only.
380 *
381 * Returns
382 * KCF_SUCCESS on success
383 * KCF_MECH_TAB_FULL otherwise.
384 */
385 int
kcf_add_mech_provider(short mech_indx,kcf_provider_desc_t * prov_desc,kcf_prov_mech_desc_t ** pmdpp)386 kcf_add_mech_provider(short mech_indx,
387 kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp)
388 {
389 int error;
390 kcf_mech_entry_t *mech_entry;
391 crypto_mech_info_t *mech_info;
392 crypto_mech_type_t kcf_mech_type, mt;
393 kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
394 crypto_func_group_t simple_fg_mask, dual_fg_mask;
395 crypto_mech_info_t *dmi;
396 crypto_mech_info_list_t *mil, *mil2;
397 kcf_mech_entry_t *me;
398 int i;
399
400 ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
401
402 mech_info = &prov_desc->pd_mechanisms[mech_indx];
403 /*
404 * Do not use the provider for the mechanism if
405 * policy does not allow it.
406 */
407 if (is_mech_disabled(prov_desc, mech_info->cm_mech_name)) {
408 *pmdpp = NULL;
409 return (KCF_SUCCESS);
410 }
411
412 /*
413 * A mechanism belongs to exactly one mechanism table.
414 * Find the class corresponding to the function group flag of
415 * the mechanism.
416 */
417 kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
418 if (kcf_mech_type == CRYPTO_MECH_INVALID) {
419 crypto_func_group_t fg = mech_info->cm_func_group_mask;
420 kcf_ops_class_t class;
421
422 if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
423 class = KCF_DIGEST_CLASS;
424 else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
425 fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
426 fg & CRYPTO_FG_DECRYPT_ATOMIC)
427 class = KCF_CIPHER_CLASS;
428 else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
429 class = KCF_MAC_CLASS;
430 else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
431 fg & CRYPTO_FG_SIGN_ATOMIC ||
432 fg & CRYPTO_FG_VERIFY_ATOMIC ||
433 fg & CRYPTO_FG_SIGN_RECOVER ||
434 fg & CRYPTO_FG_VERIFY_RECOVER)
435 class = KCF_SIGN_CLASS;
436 else if (fg & CRYPTO_FG_GENERATE ||
437 fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
438 fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
439 fg & CRYPTO_FG_DERIVE)
440 class = KCF_KEYOPS_CLASS;
441 else
442 class = KCF_MISC_CLASS;
443
444 /*
445 * Attempt to create a new mech_entry for the specified
446 * mechanism. kcf_create_mech_entry() can handle the case
447 * where such an entry already exists.
448 */
449 if ((error = kcf_create_mech_entry(class,
450 mech_info->cm_mech_name)) != KCF_SUCCESS) {
451 return (error);
452 }
453 /* get the KCF mech type that was assigned to the mechanism */
454 kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
455 ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID);
456 }
457
458 error = kcf_get_mech_entry(kcf_mech_type, &mech_entry);
459 ASSERT(error == KCF_SUCCESS);
460
461 /* allocate and initialize new kcf_prov_mech_desc */
462 prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
463 bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
464 prov_mech->pm_prov_desc = prov_desc;
465 prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
466 [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
467
468 KCF_PROV_REFHOLD(prov_desc);
469
470 dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
471
472 if (dual_fg_mask == ((crypto_func_group_t)0))
473 goto add_entry;
474
475 simple_fg_mask = mech_info->cm_func_group_mask &
476 CRYPTO_FG_SIMPLEOP_MASK | CRYPTO_FG_RANDOM;
477
478 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
479 dmi = &prov_desc->pd_mechanisms[i];
480
481 /* skip self */
482 if (dmi->cm_mech_number == mech_info->cm_mech_number)
483 continue;
484
485 /* skip if policy doesn't allow mechanism */
486 if (is_mech_disabled(prov_desc, dmi->cm_mech_name))
487 continue;
488
489 /* skip if not a dual operation mechanism */
490 if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
491 (dmi->cm_func_group_mask & simple_fg_mask))
492 continue;
493
494 mt = kcf_mech_hash_find(dmi->cm_mech_name);
495 if (mt == CRYPTO_MECH_INVALID)
496 continue;
497
498 if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
499 continue;
500
501 mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
502 mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
503
504 /*
505 * Ignore hard-coded entries in the mech table
506 * if the provider hasn't registered.
507 */
508 ME_MUTEXES_ENTER_ALL();
509 if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
510 ME_MUTEXES_EXIT_ALL();
511 kmem_free(mil, sizeof (*mil));
512 kmem_free(mil2, sizeof (*mil2));
513 continue;
514 }
515
516 /*
517 * Add other dual mechanisms that have registered
518 * with the framework to this mechanism's
519 * cross-reference list.
520 */
521 mil->ml_mech_info = *dmi; /* struct assignment */
522 mil->ml_kcf_mechid = mt;
523
524 /* add to head of list */
525 mil->ml_next = prov_mech->pm_mi_list;
526 prov_mech->pm_mi_list = mil;
527
528 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
529 prov_mech2 = me->me_hw_prov_chain;
530 else
531 prov_mech2 = me->me_sw_prov;
532
533 if (prov_mech2 == NULL) {
534 kmem_free(mil2, sizeof (*mil2));
535 ME_MUTEXES_EXIT_ALL();
536 continue;
537 }
538
539 /*
540 * Update all other cross-reference lists by
541 * adding this new mechanism.
542 */
543 while (prov_mech2 != NULL) {
544 if (prov_mech2->pm_prov_desc == prov_desc) {
545 /* struct assignment */
546 mil2->ml_mech_info = *mech_info;
547 mil2->ml_kcf_mechid = kcf_mech_type;
548
549 /* add to head of list */
550 mil2->ml_next = prov_mech2->pm_mi_list;
551 prov_mech2->pm_mi_list = mil2;
552 break;
553 }
554 prov_mech2 = prov_mech2->pm_next;
555 }
556 if (prov_mech2 == NULL)
557 kmem_free(mil2, sizeof (*mil2));
558
559 ME_MUTEXES_EXIT_ALL();
560 }
561
562 add_entry:
563 /*
564 * Add new kcf_prov_mech_desc at the front of HW providers
565 * chain.
566 */
567 switch (prov_desc->pd_prov_type) {
568
569 case CRYPTO_HW_PROVIDER:
570 ME_MUTEXES_ENTER_ALL();
571 prov_mech->pm_me = mech_entry;
572 prov_mech->pm_next = mech_entry->me_hw_prov_chain;
573 mech_entry->me_hw_prov_chain = prov_mech;
574 mech_entry->me_num_hwprov++;
575 ME_MUTEXES_EXIT_ALL();
576 break;
577
578 case CRYPTO_SW_PROVIDER:
579 ME_MUTEXES_ENTER_ALL();
580 if (mech_entry->me_sw_prov != NULL) {
581 /*
582 * There is already a SW provider for this mechanism.
583 * Since we allow only one SW provider per mechanism,
584 * report this condition.
585 */
586 cmn_err(CE_WARN, "The cryptographic software provider "
587 "\"%s\" will not be used for %s. The provider "
588 "\"%s\" will be used for this mechanism "
589 "instead.", prov_desc->pd_description,
590 mech_info->cm_mech_name,
591 mech_entry->me_sw_prov->pm_prov_desc->
592 pd_description);
593 KCF_PROV_REFRELE(prov_desc);
594 kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
595 prov_mech = NULL;
596 } else {
597 /*
598 * Set the provider as the software provider for
599 * this mechanism.
600 */
601 mech_entry->me_sw_prov = prov_mech;
602
603 /* We'll wrap around after 4 billion registrations! */
604 mech_entry->me_gen_swprov = kcf_gen_swprov++;
605 }
606 ME_MUTEXES_EXIT_ALL();
607 break;
608 }
609
610 *pmdpp = prov_mech;
611
612 return (KCF_SUCCESS);
613 }
614
615 /*
616 * kcf_remove_mech_provider()
617 *
618 * Arguments:
619 * . mech_name: the name of the mechanism.
620 * . prov_desc: The provider descriptor
621 *
622 * Description:
623 * Removes a provider from chain of provider descriptors.
624 * The provider is made unavailable to kernel consumers for the specified
625 * mechanism.
626 *
627 * Context:
628 * User context only.
629 */
630 void
kcf_remove_mech_provider(char * mech_name,kcf_provider_desc_t * prov_desc)631 kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
632 {
633 crypto_mech_type_t mech_type;
634 kcf_prov_mech_desc_t *prov_mech, *prov_chain;
635 kcf_prov_mech_desc_t **prev_entry_next;
636 kcf_mech_entry_t *mech_entry;
637 crypto_mech_info_list_t *mil, *mil2, *next, **prev_next;
638
639 ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
640
641 /* get the KCF mech type that was assigned to the mechanism */
642 if ((mech_type = kcf_mech_hash_find(mech_name)) ==
643 CRYPTO_MECH_INVALID) {
644 /*
645 * Provider was not allowed for this mech due to policy or
646 * configuration.
647 */
648 return;
649 }
650
651 /* get a ptr to the mech_entry that was created */
652 if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) {
653 /*
654 * Provider was not allowed for this mech due to policy or
655 * configuration.
656 */
657 return;
658 }
659
660 ME_MUTEXES_ENTER_ALL();
661
662 switch (prov_desc->pd_prov_type) {
663
664 case CRYPTO_HW_PROVIDER:
665 /* find the provider in the mech_entry chain */
666 prev_entry_next = &mech_entry->me_hw_prov_chain;
667 prov_mech = mech_entry->me_hw_prov_chain;
668 while (prov_mech != NULL &&
669 prov_mech->pm_prov_desc != prov_desc) {
670 prev_entry_next = &prov_mech->pm_next;
671 prov_mech = prov_mech->pm_next;
672 }
673
674 if (prov_mech == NULL) {
675 /* entry not found, simply return */
676 ME_MUTEXES_EXIT_ALL();
677 return;
678 }
679
680 /* remove provider entry from mech_entry chain */
681 *prev_entry_next = prov_mech->pm_next;
682 ASSERT(mech_entry->me_num_hwprov > 0);
683 mech_entry->me_num_hwprov--;
684 break;
685
686 case CRYPTO_SW_PROVIDER:
687 if (mech_entry->me_sw_prov == NULL ||
688 mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
689 /* not the software provider for this mechanism */
690 ME_MUTEXES_EXIT_ALL();
691 return;
692 }
693 prov_mech = mech_entry->me_sw_prov;
694 mech_entry->me_sw_prov = NULL;
695 break;
696 }
697
698 ME_MUTEXES_EXIT_ALL();
699
700 /* Free the dual ops cross-reference lists */
701 mil = prov_mech->pm_mi_list;
702 while (mil != NULL) {
703 next = mil->ml_next;
704 if (kcf_get_mech_entry(mil->ml_kcf_mechid,
705 &mech_entry) != KCF_SUCCESS) {
706 mil = next;
707 continue;
708 }
709
710 ME_MUTEXES_ENTER_ALL();
711 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
712 prov_chain = mech_entry->me_hw_prov_chain;
713 else
714 prov_chain = mech_entry->me_sw_prov;
715
716 while (prov_chain != NULL) {
717 if (prov_chain->pm_prov_desc == prov_desc) {
718 prev_next = &prov_chain->pm_mi_list;
719 mil2 = prov_chain->pm_mi_list;
720 while (mil2 != NULL &&
721 mil2->ml_kcf_mechid != mech_type) {
722 prev_next = &mil2->ml_next;
723 mil2 = mil2->ml_next;
724 }
725 if (mil2 != NULL) {
726 *prev_next = mil2->ml_next;
727 kmem_free(mil2, sizeof (*mil2));
728 }
729 break;
730 }
731 prov_chain = prov_chain->pm_next;
732 }
733
734 ME_MUTEXES_EXIT_ALL();
735 kmem_free(mil, sizeof (crypto_mech_info_list_t));
736 mil = next;
737 }
738
739 /* free entry */
740 KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
741 kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
742 }
743
744 /*
745 * kcf_get_mech_entry()
746 *
747 * Arguments:
748 * . The framework mechanism type
749 * . Storage for the mechanism entry
750 *
751 * Description:
752 * Retrieves the mechanism entry for the mech.
753 *
754 * Context:
755 * User and interrupt contexts.
756 *
757 * Returns:
758 * KCF_MECHANISM_XXX appropriate error code.
759 * KCF_SUCCESS otherwise.
760 */
761 int
kcf_get_mech_entry(crypto_mech_type_t mech_type,kcf_mech_entry_t ** mep)762 kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
763 {
764 kcf_ops_class_t class;
765 int index;
766 kcf_mech_entry_tab_t *me_tab;
767
768 ASSERT(mep != NULL);
769
770 class = KCF_MECH2CLASS(mech_type);
771
772 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
773 /* the caller won't need to know it's an invalid class */
774 return (KCF_INVALID_MECH_NUMBER);
775 }
776
777 me_tab = &kcf_mech_tabs_tab[class];
778 index = KCF_MECH2INDEX(mech_type);
779
780 if ((index < 0) || (index >= me_tab->met_size)) {
781 return (KCF_INVALID_MECH_NUMBER);
782 }
783
784 *mep = &((me_tab->met_tab)[index]);
785
786 return (KCF_SUCCESS);
787 }
788
789 /*
790 * Returns TRUE if the provider is usable and the MOD_NOAUTOUNLOAD flag
791 * is set in the modctl structure.
792 */
793 static boolean_t
auto_unload_flag_set(kcf_prov_mech_desc_t * pm)794 auto_unload_flag_set(kcf_prov_mech_desc_t *pm)
795 {
796 kcf_provider_desc_t *pd;
797 struct modctl *mp;
798 boolean_t ret = B_FALSE;
799
800 if (pm != NULL) {
801 pd = pm->pm_prov_desc;
802 KCF_PROV_REFHOLD(pd);
803
804 if (KCF_IS_PROV_USABLE(pd)) {
805 mp = pd->pd_mctlp;
806 if (mp->mod_loadflags & MOD_NOAUTOUNLOAD) {
807 ret = B_TRUE;
808 }
809 }
810 KCF_PROV_REFRELE(pd);
811 }
812
813 return (ret);
814 }
815
816 /*
817 * Lookup the hash table for an entry that matches the mechname.
818 * If there are no hardware or software providers for the mechanism,
819 * but there is an unloaded software provider, this routine will attempt
820 * to load it.
821 *
822 * If the MOD_NOAUTOUNLOAD flag is not set, a software provider is
823 * in constant danger of being unloaded. For consumers that call
824 * crypto_mech2id() only once, the provider will not be reloaded
825 * if it becomes unloaded. If a provider gets loaded elsewhere
826 * without the MOD_NOAUTOUNLOAD flag being set, we set it now.
827 */
828 crypto_mech_type_t
crypto_mech2id_common(char * mechname,boolean_t load_module)829 crypto_mech2id_common(char *mechname, boolean_t load_module)
830 {
831 crypto_mech_type_t mt;
832 kcf_mech_entry_t *me;
833 int i;
834 kcf_ops_class_t class;
835 boolean_t second_time = B_FALSE;
836 boolean_t try_to_load_software_provider = B_FALSE;
837 kcf_lock_withpad_t *mp;
838
839 try_again:
840 mt = kcf_mech_hash_find(mechname);
841 if (!load_module || second_time == B_TRUE || servicing_interrupt())
842 return (mt);
843
844 if (mt != CRYPTO_MECH_INVALID) {
845 class = KCF_MECH2CLASS(mt);
846 i = KCF_MECH2INDEX(mt);
847 me = &(kcf_mech_tabs_tab[class].met_tab[i]);
848 mp = &me_mutexes[CPU_SEQID];
849 mutex_enter(&mp->kl_lock);
850
851 if (load_module && !auto_unload_flag_set(me->me_sw_prov)) {
852 try_to_load_software_provider = B_TRUE;
853 }
854 mutex_exit(&mp->kl_lock);
855 }
856
857 if (mt == CRYPTO_MECH_INVALID || try_to_load_software_provider) {
858 struct modctl *mcp;
859 boolean_t load_again = B_FALSE;
860 char *module_name;
861 int module_name_size;
862
863 /* try to find a software provider for the mechanism */
864 if (get_sw_provider_for_mech(mechname, &module_name)
865 != CRYPTO_SUCCESS) {
866 /* mt may already be set for a hw provider */
867 return (mt);
868 }
869
870 module_name_size = strlen(module_name) + 1;
871 if (modload("crypto", module_name) == -1 ||
872 (mcp = mod_hold_by_name(module_name)) == NULL) {
873 kmem_free(module_name, module_name_size);
874 /* mt may already be set for a hw provider */
875 return (mt);
876 }
877
878 mcp->mod_loadflags |= MOD_NOAUTOUNLOAD;
879
880 /* memory pressure may have unloaded the module */
881 if (!mcp->mod_installed)
882 load_again = B_TRUE;
883 mod_release_mod(mcp);
884
885 if (load_again)
886 (void) modload("crypto", module_name);
887
888 kmem_free(module_name, module_name_size);
889
890 /* mt may already be set for a hw provider */
891 if (mt != CRYPTO_MECH_INVALID)
892 return (mt);
893
894 /*
895 * Try again. Should find a software provider in the
896 * table this time around.
897 */
898 second_time = B_TRUE;
899 goto try_again;
900 }
901
902 return (mt);
903 }
904