1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_CRYPTO_SCHED_IMPL_H 27 #define _SYS_CRYPTO_SCHED_IMPL_H 28 29 /* 30 * Scheduler internal structures. 31 */ 32 33 #ifdef __cplusplus 34 extern "C" { 35 #endif 36 37 #include <sys/types.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/door.h> 41 #include <sys/crypto/api.h> 42 #include <sys/crypto/spi.h> 43 #include <sys/crypto/impl.h> 44 #include <sys/crypto/common.h> 45 #include <sys/crypto/ops_impl.h> 46 47 typedef void (kcf_func_t)(void *, int); 48 49 typedef enum kcf_req_status { 50 REQ_ALLOCATED = 1, 51 REQ_WAITING, /* At the framework level */ 52 REQ_INPROGRESS, /* At the provider level */ 53 REQ_DONE, 54 REQ_CANCELED 55 } kcf_req_status_t; 56 57 typedef enum kcf_call_type { 58 CRYPTO_SYNCH = 1, 59 CRYPTO_ASYNCH 60 } kcf_call_type_t; 61 62 #define CHECK_RESTRICT(crq) (crq != NULL && \ 63 ((crq)->cr_flag & CRYPTO_RESTRICTED)) 64 65 #define CHECK_RESTRICT_FALSE B_FALSE 66 67 #define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \ 68 !((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) && \ 69 (pd)->pd_prov_type == CRYPTO_SW_PROVIDER 70 71 #define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP) 72 73 /* 74 * The framework keeps an internal handle to use in the adaptive 75 * asynchronous case. This is the case when a client has the 76 * CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for 77 * the request. The request is completed in the context of the calling 78 * thread and kernel memory must be allocated with KM_NOSLEEP. 79 * 80 * The framework passes a pointer to the handle in crypto_req_handle_t 81 * argument when it calls the SPI of the software provider. The macros 82 * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this. 83 * 84 * When a provider asks the framework for kmflag value via 85 * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro. 86 */ 87 extern ulong_t kcf_swprov_hndl; 88 #define KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl) 89 #define KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl) 90 #define REQHNDL2_KMFLAG(rhndl) \ 91 ((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP) 92 93 /* Internal call_req flags. They start after the public ones in api.h */ 94 95 #define CRYPTO_SETDUAL 0x00001000 /* Set the 'cont' boolean before */ 96 /* submitting the request */ 97 #define KCF_ISDUALREQ(crq) \ 98 (((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL)) 99 100 typedef struct kcf_prov_tried { 101 kcf_provider_desc_t *pt_pd; 102 struct kcf_prov_tried *pt_next; 103 } kcf_prov_tried_t; 104 105 /* Must be different from KM_SLEEP and KM_NOSLEEP */ 106 #define KCF_HOLD_PROV 0x1000 107 108 #define IS_FG_SUPPORTED(mdesc, fg) \ 109 (((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0) 110 111 #define IS_PROVIDER_TRIED(pd, tlist) \ 112 (tlist != NULL && is_in_triedlist(pd, tlist)) 113 114 #define IS_RECOVERABLE(error) \ 115 (error == CRYPTO_BUFFER_TOO_BIG || \ 116 error == CRYPTO_BUSY || \ 117 error == CRYPTO_DEVICE_ERROR || \ 118 error == CRYPTO_DEVICE_MEMORY || \ 119 error == CRYPTO_KEY_SIZE_RANGE || \ 120 error == CRYPTO_NO_PERMISSION) 121 122 #define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1) 123 #define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1) 124 125 /* 126 * Node structure for synchronous requests. 127 */ 128 typedef struct kcf_sreq_node { 129 /* Should always be the first field in this structure */ 130 kcf_call_type_t sn_type; 131 /* 132 * sn_cv and sr_lock are used to wait for the 133 * operation to complete. sn_lock also protects 134 * the sn_state field. 135 */ 136 kcondvar_t sn_cv; 137 kmutex_t sn_lock; 138 kcf_req_status_t sn_state; 139 140 /* 141 * Return value from the operation. This will be 142 * one of the CRYPTO_* errors defined in common.h. 143 */ 144 int sn_rv; 145 146 /* 147 * parameters to call the SPI with. This can be 148 * a pointer as we know the caller context/stack stays. 149 */ 150 struct kcf_req_params *sn_params; 151 152 /* Internal context for this request */ 153 struct kcf_context *sn_context; 154 155 /* Provider handling this request */ 156 kcf_provider_desc_t *sn_provider; 157 158 kcf_prov_cpu_t *sn_mp; 159 } kcf_sreq_node_t; 160 161 /* 162 * Node structure for asynchronous requests. A node can be on 163 * on a chain of requests hanging of the internal context 164 * structure and can be in the global software provider queue. 165 */ 166 typedef struct kcf_areq_node { 167 /* Should always be the first field in this structure */ 168 kcf_call_type_t an_type; 169 170 /* an_lock protects the field an_state */ 171 kmutex_t an_lock; 172 kcf_req_status_t an_state; 173 crypto_call_req_t an_reqarg; 174 175 /* 176 * parameters to call the SPI with. We need to 177 * save the params since the caller stack can go away. 178 */ 179 struct kcf_req_params an_params; 180 181 /* 182 * The next two fields should be NULL for operations that 183 * don't need a context. 184 */ 185 /* Internal context for this request */ 186 struct kcf_context *an_context; 187 188 /* next in chain of requests for context */ 189 struct kcf_areq_node *an_ctxchain_next; 190 191 kcondvar_t an_turn_cv; 192 boolean_t an_is_my_turn; 193 boolean_t an_isdual; /* for internal reuse */ 194 195 /* 196 * Next and previous nodes in the global software 197 * queue. These fields are NULL for a hardware 198 * provider since we use a taskq there. 199 */ 200 struct kcf_areq_node *an_next; 201 struct kcf_areq_node *an_prev; 202 203 /* Provider handling this request */ 204 kcf_provider_desc_t *an_provider; 205 kcf_prov_cpu_t *an_mp; 206 kcf_prov_tried_t *an_tried_plist; 207 208 struct kcf_areq_node *an_idnext; /* Next in ID hash */ 209 struct kcf_areq_node *an_idprev; /* Prev in ID hash */ 210 kcondvar_t an_done; /* Signal request completion */ 211 uint_t an_refcnt; 212 } kcf_areq_node_t; 213 214 #define KCF_AREQ_REFHOLD(areq) { \ 215 atomic_add_32(&(areq)->an_refcnt, 1); \ 216 ASSERT((areq)->an_refcnt != 0); \ 217 } 218 219 #define KCF_AREQ_REFRELE(areq) { \ 220 ASSERT((areq)->an_refcnt != 0); \ 221 membar_exit(); \ 222 if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \ 223 kcf_free_req(areq); \ 224 } 225 226 #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg)) 227 228 #define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\ 229 (areq)->an_reqarg.cr_callback_arg, err); 230 231 /* For internally generated call requests for dual operations */ 232 typedef struct kcf_call_req { 233 crypto_call_req_t kr_callreq; /* external client call req */ 234 kcf_req_params_t kr_params; /* Params saved for next call */ 235 kcf_areq_node_t *kr_areq; /* Use this areq */ 236 off_t kr_saveoffset; 237 size_t kr_savelen; 238 } kcf_dual_req_t; 239 240 /* 241 * The following are some what similar to macros in callo.h, which implement 242 * callout tables. 243 * 244 * The lower four bits of the ID are used to encode the table ID to 245 * index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for 246 * wrap around when generating ID. We assume that there won't be a request 247 * which takes more time than 2^^(sizeof (long) - 5) other requests submitted 248 * after it. This ensures there won't be any ID collision. 249 */ 250 #define REQID_COUNTER_HIGH (1UL << (8 * sizeof (long) - 1)) 251 #define REQID_COUNTER_SHIFT 4 252 #define REQID_COUNTER_LOW (1 << REQID_COUNTER_SHIFT) 253 #define REQID_TABLES 16 254 #define REQID_TABLE_MASK (REQID_TABLES - 1) 255 256 #define REQID_BUCKETS 512 257 #define REQID_BUCKET_MASK (REQID_BUCKETS - 1) 258 #define REQID_HASH(id) (((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK) 259 260 #define GET_REQID(areq) (areq)->an_reqarg.cr_reqid 261 #define SET_REQID(areq, val) GET_REQID(areq) = val 262 263 /* 264 * Hash table for async requests. 265 */ 266 typedef struct kcf_reqid_table { 267 kmutex_t rt_lock; 268 crypto_req_id_t rt_curid; 269 kcf_areq_node_t *rt_idhash[REQID_BUCKETS]; 270 } kcf_reqid_table_t; 271 272 /* 273 * Global software provider queue structure. Requests to be 274 * handled by a SW provider and have the ALWAYS_QUEUE flag set 275 * get queued here. 276 */ 277 typedef struct kcf_global_swq { 278 /* 279 * gs_cv and gs_lock are used to wait for new requests. 280 * gs_lock protects the changes to the queue. 281 */ 282 kcondvar_t gs_cv; 283 kmutex_t gs_lock; 284 uint_t gs_njobs; 285 uint_t gs_maxjobs; 286 kcf_areq_node_t *gs_first; 287 kcf_areq_node_t *gs_last; 288 } kcf_global_swq_t; 289 290 291 /* 292 * Internal representation of a canonical context. We contain crypto_ctx_t 293 * structure in order to have just one memory allocation. The SPI 294 * ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure. 295 */ 296 typedef struct kcf_context { 297 crypto_ctx_t kc_glbl_ctx; 298 uint_t kc_refcnt; 299 kmutex_t kc_in_use_lock; 300 /* 301 * kc_req_chain_first and kc_req_chain_last are used to chain 302 * multiple async requests using the same context. They should be 303 * NULL for sync requests. 304 */ 305 kcf_areq_node_t *kc_req_chain_first; 306 kcf_areq_node_t *kc_req_chain_last; 307 kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */ 308 kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */ 309 kcf_mech_entry_t *kc_mech; 310 struct kcf_context *kc_secondctx; /* for dual contexts */ 311 } kcf_context_t; 312 313 /* 314 * Bump up the reference count on the framework private context. A 315 * global context or a request that references this structure should 316 * do a hold. 317 */ 318 #define KCF_CONTEXT_REFHOLD(ictx) { \ 319 atomic_add_32(&(ictx)->kc_refcnt, 1); \ 320 ASSERT((ictx)->kc_refcnt != 0); \ 321 } 322 323 /* 324 * Decrement the reference count on the framework private context. 325 * When the last reference is released, the framework private 326 * context structure is freed along with the global context. 327 */ 328 #define KCF_CONTEXT_REFRELE(ictx) { \ 329 ASSERT((ictx)->kc_refcnt != 0); \ 330 membar_exit(); \ 331 if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \ 332 kcf_free_context(ictx); \ 333 } 334 335 /* 336 * Check if we can release the context now. In case of CRYPTO_QUEUED 337 * we do not release it as we can do it only after the provider notified 338 * us. In case of CRYPTO_BUSY, the client can retry the request using 339 * the context, so we do not release the context. 340 * 341 * This macro should be called only from the final routine in 342 * an init/update/final sequence. We do not release the context in case 343 * of update operations. We require the consumer to free it 344 * explicitly, in case it wants to abandon the operation. This is done 345 * as there may be mechanisms in ECB mode that can continue even if 346 * an operation on a block fails. 347 */ 348 #define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) { \ 349 if (KCF_CONTEXT_DONE(rv)) \ 350 KCF_CONTEXT_REFRELE(kcf_ctx); \ 351 } 352 353 /* 354 * This macro determines whether we're done with a context. 355 */ 356 #define KCF_CONTEXT_DONE(rv) \ 357 ((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY && \ 358 (rv) != CRYPTO_BUFFER_TOO_SMALL) 359 360 /* 361 * A crypto_ctx_template_t is internally a pointer to this struct 362 */ 363 typedef struct kcf_ctx_template { 364 crypto_kcf_provider_handle_t ct_prov_handle; /* provider handle */ 365 uint_t ct_generation; /* generation # */ 366 size_t ct_size; /* for freeing */ 367 crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */ 368 /* from the SW prov */ 369 } kcf_ctx_template_t; 370 371 /* 372 * Structure for pool of threads working on global software queue. 373 */ 374 typedef struct kcf_pool { 375 uint32_t kp_threads; /* Number of threads in pool */ 376 uint32_t kp_idlethreads; /* Idle threads in pool */ 377 uint32_t kp_blockedthreads; /* Blocked threads in pool */ 378 379 /* 380 * cv & lock to monitor the condition when no threads 381 * are around. In this case the failover thread kicks in. 382 */ 383 kcondvar_t kp_nothr_cv; 384 kmutex_t kp_thread_lock; 385 386 /* Userspace thread creator variables. */ 387 boolean_t kp_signal_create_thread; /* Create requested flag */ 388 int kp_nthrs; /* # of threads to create */ 389 boolean_t kp_user_waiting; /* Thread waiting for work */ 390 391 /* 392 * cv & lock for the condition where more threads need to be 393 * created. kp_user_lock also protects the three fileds above. 394 */ 395 kcondvar_t kp_user_cv; /* Creator cond. variable */ 396 kmutex_t kp_user_lock; /* Creator lock */ 397 } kcf_pool_t; 398 399 400 /* 401 * State of a crypto bufcall element. 402 */ 403 typedef enum cbuf_state { 404 CBUF_FREE = 1, 405 CBUF_WAITING, 406 CBUF_RUNNING 407 } cbuf_state_t; 408 409 /* 410 * Structure of a crypto bufcall element. 411 */ 412 typedef struct kcf_cbuf_elem { 413 /* 414 * lock and cv to wait for CBUF_RUNNING to be done 415 * kc_lock also protects kc_state. 416 */ 417 kmutex_t kc_lock; 418 kcondvar_t kc_cv; 419 cbuf_state_t kc_state; 420 421 struct kcf_cbuf_elem *kc_next; 422 struct kcf_cbuf_elem *kc_prev; 423 424 void (*kc_func)(void *arg); 425 void *kc_arg; 426 } kcf_cbuf_elem_t; 427 428 /* 429 * State of a notify element. 430 */ 431 typedef enum ntfy_elem_state { 432 NTFY_WAITING = 1, 433 NTFY_RUNNING 434 } ntfy_elem_state_t; 435 436 /* 437 * Structure of a notify list element. 438 */ 439 typedef struct kcf_ntfy_elem { 440 /* 441 * lock and cv to wait for NTFY_RUNNING to be done. 442 * kn_lock also protects kn_state. 443 */ 444 kmutex_t kn_lock; 445 kcondvar_t kn_cv; 446 ntfy_elem_state_t kn_state; 447 448 struct kcf_ntfy_elem *kn_next; 449 struct kcf_ntfy_elem *kn_prev; 450 451 crypto_notify_callback_t kn_func; 452 uint32_t kn_event_mask; 453 } kcf_ntfy_elem_t; 454 455 456 /* 457 * The following values are based on the assumption that it would 458 * take around eight cpus to load a hardware provider (This is true for 459 * at least one product) and a kernel client may come from different 460 * low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number 461 * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on 462 * a throughput of 1GB/s using 512-byte buffers. These are just 463 * reasonable estimates and might need to change in future. 464 */ 465 #define CRYPTO_TASKQ_THREADS 8 466 #define CYRPTO_TASKQ_MIN 64 467 #define CRYPTO_TASKQ_MAX 2 * 1024 * 1024 468 469 extern int crypto_taskq_threads; 470 extern int crypto_taskq_minalloc; 471 extern int crypto_taskq_maxalloc; 472 extern kcf_global_swq_t *gswq; 473 extern int kcf_maxthreads; 474 extern int kcf_minthreads; 475 476 /* Door handle for talking to kcfd */ 477 extern door_handle_t kcf_dh; 478 extern kmutex_t kcf_dh_lock; 479 480 /* 481 * All pending crypto bufcalls are put on a list. cbuf_list_lock 482 * protects changes to this list. 483 */ 484 extern kmutex_t cbuf_list_lock; 485 extern kcondvar_t cbuf_list_cv; 486 487 /* 488 * All event subscribers are put on a list. kcf_notify_list_lock 489 * protects changes to this list. 490 */ 491 extern kmutex_t ntfy_list_lock; 492 extern kcondvar_t ntfy_list_cv; 493 494 boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *, 495 kcf_provider_desc_t *, kcf_provider_desc_t **); 496 extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_key_t *, 497 crypto_mech_type_t, crypto_key_t *, 498 boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **, 499 crypto_func_group_t); 500 extern int kcf_get_hardware_provider_nomech(offset_t, offset_t, 501 boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **); 502 extern void kcf_free_triedlist(kcf_prov_tried_t *); 503 extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **, 504 kcf_provider_desc_t *, int); 505 extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t, 506 crypto_key_t *, kcf_mech_entry_t **, int *, kcf_prov_tried_t *, 507 crypto_func_group_t, boolean_t, size_t); 508 extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *, 509 crypto_key_t *, crypto_mechanism_t *, crypto_key_t *, 510 kcf_mech_entry_t **, crypto_mech_type_t *, 511 crypto_mech_type_t *, int *, kcf_prov_tried_t *, 512 crypto_func_group_t, crypto_func_group_t, boolean_t, size_t); 513 extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *, 514 crypto_session_id_t); 515 extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *, 516 crypto_call_req_t *, kcf_req_params_t *, boolean_t); 517 extern void kcf_sched_init(void); 518 extern void kcf_sched_start(void); 519 extern void kcf_sop_done(kcf_sreq_node_t *, int); 520 extern void kcf_aop_done(kcf_areq_node_t *, int); 521 extern int common_submit_request(kcf_provider_desc_t *, 522 crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t); 523 extern void kcf_free_context(kcf_context_t *); 524 525 extern int kcf_svc_wait(int *); 526 extern int kcf_svc_do_run(void); 527 extern int kcf_need_signature_verification(kcf_provider_desc_t *); 528 extern void kcf_verify_signature(void *); 529 extern struct modctl *kcf_get_modctl(crypto_provider_info_t *); 530 extern void verify_unverified_providers(); 531 extern void kcf_free_req(kcf_areq_node_t *areq); 532 extern void crypto_bufcall_service(void); 533 534 extern void kcf_walk_ntfylist(uint32_t, void *); 535 extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t); 536 537 extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *); 538 extern void kcf_next_req(void *, int); 539 extern void kcf_last_req(void *, int); 540 541 #ifdef __cplusplus 542 } 543 #endif 544 545 #endif /* _SYS_CRYPTO_SCHED_IMPL_H */ 546