1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Hash: Hash algorithms under the crypto API 4 * 5 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #ifndef _CRYPTO_HASH_H 9 #define _CRYPTO_HASH_H 10 11 #include <linux/atomic.h> 12 #include <linux/crypto.h> 13 #include <linux/slab.h> 14 #include <linux/string.h> 15 16 /* Set this bit for virtual address instead of SG list. */ 17 #define CRYPTO_AHASH_REQ_VIRT 0x00000001 18 19 #define CRYPTO_AHASH_REQ_PRIVATE \ 20 CRYPTO_AHASH_REQ_VIRT 21 22 struct crypto_ahash; 23 24 /** 25 * DOC: Message Digest Algorithm Definitions 26 * 27 * These data structures define modular message digest algorithm 28 * implementations, managed via crypto_register_ahash(), 29 * crypto_register_shash(), crypto_unregister_ahash() and 30 * crypto_unregister_shash(). 31 */ 32 33 /* 34 * struct hash_alg_common - define properties of message digest 35 * @digestsize: Size of the result of the transformation. A buffer of this size 36 * must be available to the @final and @finup calls, so they can 37 * store the resulting hash into it. For various predefined sizes, 38 * search include/crypto/ using 39 * git grep _DIGEST_SIZE include/crypto. 40 * @statesize: Size of the block for partial state of the transformation. A 41 * buffer of this size must be passed to the @export function as it 42 * will save the partial state of the transformation into it. On the 43 * other side, the @import function will load the state from a 44 * buffer of this size as well. 45 * @base: Start of data structure of cipher algorithm. The common data 46 * structure of crypto_alg contains information common to all ciphers. 47 * The hash_alg_common data structure now adds the hash-specific 48 * information. 49 */ 50 #define HASH_ALG_COMMON { \ 51 unsigned int digestsize; \ 52 unsigned int statesize; \ 53 \ 54 struct crypto_alg base; \ 55 } 56 struct hash_alg_common HASH_ALG_COMMON; 57 58 struct ahash_request { 59 struct crypto_async_request base; 60 61 unsigned int nbytes; 62 union { 63 struct scatterlist *src; 64 const u8 *svirt; 65 }; 66 u8 *result; 67 68 void *__ctx[] CRYPTO_MINALIGN_ATTR; 69 }; 70 71 /** 72 * struct ahash_alg - asynchronous message digest definition 73 * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the 74 * state of the HASH transformation at the beginning. This shall fill in 75 * the internal structures used during the entire duration of the whole 76 * transformation. No data processing happens at this point. Driver code 77 * implementation must not use req->result. 78 * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This 79 * function actually pushes blocks of data from upper layers into the 80 * driver, which then passes those to the hardware as seen fit. This 81 * function must not finalize the HASH transformation by calculating the 82 * final message digest as this only adds more data into the 83 * transformation. This function shall not modify the transformation 84 * context, as this function may be called in parallel with the same 85 * transformation object. Data processing can happen synchronously 86 * [SHASH] or asynchronously [AHASH] at this point. Driver must not use 87 * req->result. 88 * For block-only algorithms, @update must return the number 89 * of bytes to store in the API partial block buffer. 90 * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the 91 * transformation and retrieves the resulting hash from the driver and 92 * pushes it back to upper layers. No data processing happens at this 93 * point unless hardware requires it to finish the transformation 94 * (then the data buffered by the device driver is processed). 95 * @finup: **[optional]** Combination of @update and @final. This function is effectively a 96 * combination of @update and @final calls issued in sequence. As some 97 * hardware cannot do @update and @final separately, this callback was 98 * added to allow such hardware to be used at least by IPsec. Data 99 * processing can happen synchronously [SHASH] or asynchronously [AHASH] 100 * at this point. 101 * @digest: Combination of @init and @update and @final. This function 102 * effectively behaves as the entire chain of operations, @init, 103 * @update and @final issued in sequence. Just like @finup, this was 104 * added for hardware which cannot do even the @finup, but can only do 105 * the whole transformation in one run. Data processing can happen 106 * synchronously [SHASH] or asynchronously [AHASH] at this point. 107 * @setkey: Set optional key used by the hashing algorithm. Intended to push 108 * optional key used by the hashing algorithm from upper layers into 109 * the driver. This function can store the key in the transformation 110 * context or can outright program it into the hardware. In the former 111 * case, one must be careful to program the key into the hardware at 112 * appropriate time and one must be careful that .setkey() can be 113 * called multiple times during the existence of the transformation 114 * object. Not all hashing algorithms do implement this function as it 115 * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT 116 * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement 117 * this function. This function must be called before any other of the 118 * @init, @update, @final, @finup, @digest is called. No data 119 * processing happens at this point. 120 * @export: Export partial state of the transformation. This function dumps the 121 * entire state of the ongoing transformation into a provided block of 122 * data so it can be @import 'ed back later on. This is useful in case 123 * you want to save partial result of the transformation after 124 * processing certain amount of data and reload this partial result 125 * multiple times later on for multiple re-use. No data processing 126 * happens at this point. Driver must not use req->result. 127 * @import: Import partial state of the transformation. This function loads the 128 * entire state of the ongoing transformation from a provided block of 129 * data so the transformation can continue from this point onward. No 130 * data processing happens at this point. Driver must not use 131 * req->result. 132 * @init_tfm: Initialize the cryptographic transformation object. 133 * This function is called only once at the instantiation 134 * time, right after the transformation context was 135 * allocated. In case the cryptographic hardware has 136 * some special requirements which need to be handled 137 * by software, this function shall check for the precise 138 * requirement of the transformation and put any software 139 * fallbacks in place. 140 * @exit_tfm: Deinitialize the cryptographic transformation object. 141 * This is a counterpart to @init_tfm, used to remove 142 * various changes set in @init_tfm. 143 * @clone_tfm: Copy transform into new object, may allocate memory. 144 * @halg: see struct hash_alg_common 145 */ 146 struct ahash_alg { 147 int (*init)(struct ahash_request *req); 148 int (*update)(struct ahash_request *req); 149 int (*final)(struct ahash_request *req); 150 int (*finup)(struct ahash_request *req); 151 int (*digest)(struct ahash_request *req); 152 int (*export)(struct ahash_request *req, void *out); 153 int (*import)(struct ahash_request *req, const void *in); 154 int (*setkey)(struct crypto_ahash *tfm, const u8 *key, 155 unsigned int keylen); 156 int (*init_tfm)(struct crypto_ahash *tfm); 157 void (*exit_tfm)(struct crypto_ahash *tfm); 158 int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); 159 160 struct hash_alg_common halg; 161 }; 162 163 struct shash_desc { 164 struct crypto_shash *tfm; 165 void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); 166 }; 167 168 #define HASH_MAX_DIGESTSIZE 64 169 170 /* Worst case is sha3-224. */ 171 #define HASH_MAX_STATESIZE 200 + 144 + 1 172 173 /* 174 * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc' 175 * containing a 'struct s390_sha_ctx'. 176 */ 177 #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) 178 #define MAX_SYNC_HASH_REQSIZE HASH_MAX_DESCSIZE 179 180 #define SHASH_DESC_ON_STACK(shash, ctx) \ 181 char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ 182 __aligned(__alignof__(struct shash_desc)); \ 183 struct shash_desc *shash = (struct shash_desc *)__##shash##_desc 184 185 #define HASH_REQUEST_ON_STACK(name, _tfm) \ 186 char __##name##_req[sizeof(struct ahash_request) + \ 187 MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \ 188 struct ahash_request *name = \ 189 ahash_request_on_stack_init(__##name##_req, (_tfm)) 190 191 #define HASH_REQUEST_CLONE(name, gfp) \ 192 hash_request_clone(name, sizeof(__##name##_req), gfp) 193 194 /** 195 * struct shash_alg - synchronous message digest definition 196 * @init: see struct ahash_alg 197 * @update: see struct ahash_alg 198 * @final: see struct ahash_alg 199 * @finup: see struct ahash_alg 200 * @digest: see struct ahash_alg 201 * @export: see struct ahash_alg 202 * @import: see struct ahash_alg 203 * @setkey: see struct ahash_alg 204 * @init_tfm: Initialize the cryptographic transformation object. 205 * This function is called only once at the instantiation 206 * time, right after the transformation context was 207 * allocated. In case the cryptographic hardware has 208 * some special requirements which need to be handled 209 * by software, this function shall check for the precise 210 * requirement of the transformation and put any software 211 * fallbacks in place. 212 * @exit_tfm: Deinitialize the cryptographic transformation object. 213 * This is a counterpart to @init_tfm, used to remove 214 * various changes set in @init_tfm. 215 * @clone_tfm: Copy transform into new object, may allocate memory. 216 * @descsize: Size of the operational state for the message digest. This state 217 * size is the memory size that needs to be allocated for 218 * shash_desc.__ctx 219 * @halg: see struct hash_alg_common 220 * @HASH_ALG_COMMON: see struct hash_alg_common 221 */ 222 struct shash_alg { 223 int (*init)(struct shash_desc *desc); 224 int (*update)(struct shash_desc *desc, const u8 *data, 225 unsigned int len); 226 int (*final)(struct shash_desc *desc, u8 *out); 227 int (*finup)(struct shash_desc *desc, const u8 *data, 228 unsigned int len, u8 *out); 229 int (*digest)(struct shash_desc *desc, const u8 *data, 230 unsigned int len, u8 *out); 231 int (*export)(struct shash_desc *desc, void *out); 232 int (*import)(struct shash_desc *desc, const void *in); 233 int (*setkey)(struct crypto_shash *tfm, const u8 *key, 234 unsigned int keylen); 235 int (*init_tfm)(struct crypto_shash *tfm); 236 void (*exit_tfm)(struct crypto_shash *tfm); 237 int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src); 238 239 unsigned int descsize; 240 241 union { 242 struct HASH_ALG_COMMON; 243 struct hash_alg_common halg; 244 }; 245 }; 246 #undef HASH_ALG_COMMON 247 248 struct crypto_ahash { 249 bool using_shash; /* Underlying algorithm is shash, not ahash */ 250 unsigned int statesize; 251 unsigned int reqsize; 252 struct crypto_tfm base; 253 }; 254 255 struct crypto_shash { 256 struct crypto_tfm base; 257 }; 258 259 /** 260 * DOC: Asynchronous Message Digest API 261 * 262 * The asynchronous message digest API is used with the ciphers of type 263 * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) 264 * 265 * The asynchronous cipher operation discussion provided for the 266 * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. 267 */ 268 269 static inline bool ahash_req_on_stack(struct ahash_request *req) 270 { 271 return crypto_req_on_stack(&req->base); 272 } 273 274 static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) 275 { 276 return container_of(tfm, struct crypto_ahash, base); 277 } 278 279 /** 280 * crypto_alloc_ahash() - allocate ahash cipher handle 281 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 282 * ahash cipher 283 * @type: specifies the type of the cipher 284 * @mask: specifies the mask for the cipher 285 * 286 * Allocate a cipher handle for an ahash. The returned struct 287 * crypto_ahash is the cipher handle that is required for any subsequent 288 * API invocation for that ahash. 289 * 290 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 291 * of an error, PTR_ERR() returns the error code. 292 */ 293 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 294 u32 mask); 295 296 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm); 297 298 static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) 299 { 300 return &tfm->base; 301 } 302 303 /** 304 * crypto_free_ahash() - zeroize and free the ahash handle 305 * @tfm: cipher handle to be freed 306 * 307 * If @tfm is a NULL or error pointer, this function does nothing. 308 */ 309 static inline void crypto_free_ahash(struct crypto_ahash *tfm) 310 { 311 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); 312 } 313 314 /** 315 * crypto_has_ahash() - Search for the availability of an ahash. 316 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 317 * ahash 318 * @type: specifies the type of the ahash 319 * @mask: specifies the mask for the ahash 320 * 321 * Return: true when the ahash is known to the kernel crypto API; false 322 * otherwise 323 */ 324 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); 325 326 static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) 327 { 328 return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 329 } 330 331 static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) 332 { 333 return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); 334 } 335 336 /** 337 * crypto_ahash_blocksize() - obtain block size for cipher 338 * @tfm: cipher handle 339 * 340 * The block size for the message digest cipher referenced with the cipher 341 * handle is returned. 342 * 343 * Return: block size of cipher 344 */ 345 static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) 346 { 347 return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 348 } 349 350 static inline struct hash_alg_common *__crypto_hash_alg_common( 351 struct crypto_alg *alg) 352 { 353 return container_of(alg, struct hash_alg_common, base); 354 } 355 356 static inline struct hash_alg_common *crypto_hash_alg_common( 357 struct crypto_ahash *tfm) 358 { 359 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); 360 } 361 362 /** 363 * crypto_ahash_digestsize() - obtain message digest size 364 * @tfm: cipher handle 365 * 366 * The size for the message digest created by the message digest cipher 367 * referenced with the cipher handle is returned. 368 * 369 * 370 * Return: message digest size of cipher 371 */ 372 static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) 373 { 374 return crypto_hash_alg_common(tfm)->digestsize; 375 } 376 377 /** 378 * crypto_ahash_statesize() - obtain size of the ahash state 379 * @tfm: cipher handle 380 * 381 * Return the size of the ahash state. With the crypto_ahash_export() 382 * function, the caller can export the state into a buffer whose size is 383 * defined with this function. 384 * 385 * Return: size of the ahash state 386 */ 387 static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) 388 { 389 return tfm->statesize; 390 } 391 392 static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) 393 { 394 return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); 395 } 396 397 static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) 398 { 399 crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); 400 } 401 402 static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) 403 { 404 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); 405 } 406 407 /** 408 * crypto_ahash_reqtfm() - obtain cipher handle from request 409 * @req: asynchronous request handle that contains the reference to the ahash 410 * cipher handle 411 * 412 * Return the ahash cipher handle that is registered with the asynchronous 413 * request handle ahash_request. 414 * 415 * Return: ahash cipher handle 416 */ 417 static inline struct crypto_ahash *crypto_ahash_reqtfm( 418 struct ahash_request *req) 419 { 420 return __crypto_ahash_cast(req->base.tfm); 421 } 422 423 /** 424 * crypto_ahash_reqsize() - obtain size of the request data structure 425 * @tfm: cipher handle 426 * 427 * Return: size of the request data 428 */ 429 static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) 430 { 431 return tfm->reqsize; 432 } 433 434 static inline void *ahash_request_ctx(struct ahash_request *req) 435 { 436 return req->__ctx; 437 } 438 439 /** 440 * crypto_ahash_setkey - set key for cipher handle 441 * @tfm: cipher handle 442 * @key: buffer holding the key 443 * @keylen: length of the key in bytes 444 * 445 * The caller provided key is set for the ahash cipher. The cipher 446 * handle must point to a keyed hash in order for this function to succeed. 447 * 448 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 449 */ 450 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 451 unsigned int keylen); 452 453 /** 454 * crypto_ahash_finup() - update and finalize message digest 455 * @req: reference to the ahash_request handle that holds all information 456 * needed to perform the cipher operation 457 * 458 * This function is a "short-hand" for the function calls of 459 * crypto_ahash_update and crypto_ahash_final. The parameters have the same 460 * meaning as discussed for those separate functions. 461 * 462 * Return: see crypto_ahash_final() 463 */ 464 int crypto_ahash_finup(struct ahash_request *req); 465 466 /** 467 * crypto_ahash_final() - calculate message digest 468 * @req: reference to the ahash_request handle that holds all information 469 * needed to perform the cipher operation 470 * 471 * Finalize the message digest operation and create the message digest 472 * based on all data added to the cipher handle. The message digest is placed 473 * into the output buffer registered with the ahash_request handle. 474 * 475 * Return: 476 * 0 if the message digest was successfully calculated; 477 * -EINPROGRESS if data is fed into hardware (DMA) or queued for later; 478 * -EBUSY if queue is full and request should be resubmitted later; 479 * other < 0 if an error occurred 480 */ 481 int crypto_ahash_final(struct ahash_request *req); 482 483 /** 484 * crypto_ahash_digest() - calculate message digest for a buffer 485 * @req: reference to the ahash_request handle that holds all information 486 * needed to perform the cipher operation 487 * 488 * This function is a "short-hand" for the function calls of crypto_ahash_init, 489 * crypto_ahash_update and crypto_ahash_final. The parameters have the same 490 * meaning as discussed for those separate three functions. 491 * 492 * Return: see crypto_ahash_final() 493 */ 494 int crypto_ahash_digest(struct ahash_request *req); 495 496 /** 497 * crypto_ahash_export() - extract current message digest state 498 * @req: reference to the ahash_request handle whose state is exported 499 * @out: output buffer of sufficient size that can hold the hash state 500 * 501 * This function exports the hash state of the ahash_request handle into the 502 * caller-allocated output buffer out which must have sufficient size (e.g. by 503 * calling crypto_ahash_statesize()). 504 * 505 * Return: 0 if the export was successful; < 0 if an error occurred 506 */ 507 int crypto_ahash_export(struct ahash_request *req, void *out); 508 509 /** 510 * crypto_ahash_export_core() - extract core state for message digest 511 * @req: reference to the ahash_request handle whose state is exported 512 * @out: output buffer of sufficient size that can hold the hash state 513 * 514 * Export the hash state without the partial block buffer. 515 * 516 * Context: Softirq or process context. 517 * Return: 0 if the export creation was successful; < 0 if an error occurred 518 */ 519 int crypto_ahash_export_core(struct ahash_request *req, void *out); 520 521 /** 522 * crypto_ahash_import() - import message digest state 523 * @req: reference to ahash_request handle the state is imported into 524 * @in: buffer holding the state 525 * 526 * This function imports the hash state into the ahash_request handle from the 527 * input buffer. That buffer should have been generated with the 528 * crypto_ahash_export function. 529 * 530 * Return: 0 if the import was successful; < 0 if an error occurred 531 */ 532 int crypto_ahash_import(struct ahash_request *req, const void *in); 533 534 /** 535 * crypto_ahash_import_core() - import core state 536 * @req: reference to ahash_request handle the state is imported into 537 * @in: buffer holding the state 538 * 539 * Import the hash state without the partial block buffer. 540 * 541 * Context: Softirq or process context. 542 * Return: 0 if the import was successful; < 0 if an error occurred 543 */ 544 int crypto_ahash_import_core(struct ahash_request *req, const void *in); 545 546 /** 547 * crypto_ahash_init() - (re)initialize message digest handle 548 * @req: ahash_request handle that already is initialized with all necessary 549 * data using the ahash_request_* API functions 550 * 551 * The call (re-)initializes the message digest referenced by the ahash_request 552 * handle. Any potentially existing state created by previous operations is 553 * discarded. 554 * 555 * Return: see crypto_ahash_final() 556 */ 557 int crypto_ahash_init(struct ahash_request *req); 558 559 /** 560 * crypto_ahash_update() - add data to message digest for processing 561 * @req: ahash_request handle that was previously initialized with the 562 * crypto_ahash_init call. 563 * 564 * Updates the message digest state of the &ahash_request handle. The input data 565 * is pointed to by the scatter/gather list registered in the &ahash_request 566 * handle 567 * 568 * Return: see crypto_ahash_final() 569 */ 570 int crypto_ahash_update(struct ahash_request *req); 571 572 /** 573 * DOC: Asynchronous Hash Request Handle 574 * 575 * The &ahash_request data structure contains all pointers to data 576 * required for the asynchronous cipher operation. This includes the cipher 577 * handle (which can be used by multiple &ahash_request instances), pointer 578 * to plaintext and the message digest output buffer, asynchronous callback 579 * function, etc. It acts as a handle to the ahash_request_* API calls in a 580 * similar way as ahash handle to the crypto_ahash_* API calls. 581 */ 582 583 /** 584 * ahash_request_set_tfm() - update cipher handle reference in request 585 * @req: request handle to be modified 586 * @tfm: cipher handle that shall be added to the request handle 587 * 588 * Allow the caller to replace the existing ahash handle in the request 589 * data structure with a different one. 590 */ 591 static inline void ahash_request_set_tfm(struct ahash_request *req, 592 struct crypto_ahash *tfm) 593 { 594 crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm)); 595 } 596 597 /** 598 * ahash_request_alloc() - allocate request data structure 599 * @tfm: cipher handle to be registered with the request 600 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 601 * 602 * Allocate the request data structure that must be used with the ahash 603 * message digest API calls. During 604 * the allocation, the provided ahash handle 605 * is registered in the request data structure. 606 * 607 * Return: allocated request handle in case of success, or NULL if out of memory 608 */ 609 static inline struct ahash_request *ahash_request_alloc_noprof( 610 struct crypto_ahash *tfm, gfp_t gfp) 611 { 612 struct ahash_request *req; 613 614 req = kmalloc_noprof(sizeof(struct ahash_request) + 615 crypto_ahash_reqsize(tfm), gfp); 616 617 if (likely(req)) 618 ahash_request_set_tfm(req, tfm); 619 620 return req; 621 } 622 #define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__)) 623 624 /** 625 * ahash_request_free() - zeroize and free the request data structure 626 * @req: request data structure cipher handle to be freed 627 */ 628 void ahash_request_free(struct ahash_request *req); 629 630 static inline void ahash_request_zero(struct ahash_request *req) 631 { 632 memzero_explicit(req, sizeof(*req) + 633 crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); 634 } 635 636 static inline struct ahash_request *ahash_request_cast( 637 struct crypto_async_request *req) 638 { 639 return container_of(req, struct ahash_request, base); 640 } 641 642 /** 643 * ahash_request_set_callback() - set asynchronous callback function 644 * @req: request handle 645 * @flags: specify zero or an ORing of the flags 646 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 647 * increase the wait queue beyond the initial maximum size; 648 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 649 * @compl: callback function pointer to be registered with the request handle 650 * @data: The data pointer refers to memory that is not used by the kernel 651 * crypto API, but provided to the callback function for it to use. Here, 652 * the caller can provide a reference to memory the callback function can 653 * operate on. As the callback function is invoked asynchronously to the 654 * related functionality, it may need to access data structures of the 655 * related functionality which can be referenced using this pointer. The 656 * callback function can access the memory via the "data" field in the 657 * &crypto_async_request data structure provided to the callback function. 658 * 659 * This function allows setting the callback function that is triggered once 660 * the cipher operation completes. 661 * 662 * The callback function is registered with the &ahash_request handle and 663 * must comply with the following template:: 664 * 665 * void callback_function(struct crypto_async_request *req, int error) 666 */ 667 static inline void ahash_request_set_callback(struct ahash_request *req, 668 u32 flags, 669 crypto_completion_t compl, 670 void *data) 671 { 672 flags &= ~CRYPTO_AHASH_REQ_PRIVATE; 673 flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE; 674 crypto_request_set_callback(&req->base, flags, compl, data); 675 } 676 677 /** 678 * ahash_request_set_crypt() - set data buffers 679 * @req: ahash_request handle to be updated 680 * @src: source scatter/gather list 681 * @result: buffer that is filled with the message digest -- the caller must 682 * ensure that the buffer has sufficient space by, for example, calling 683 * crypto_ahash_digestsize() 684 * @nbytes: number of bytes to process from the source scatter/gather list 685 * 686 * By using this call, the caller references the source scatter/gather list. 687 * The source scatter/gather list points to the data the message digest is to 688 * be calculated for. 689 */ 690 static inline void ahash_request_set_crypt(struct ahash_request *req, 691 struct scatterlist *src, u8 *result, 692 unsigned int nbytes) 693 { 694 req->src = src; 695 req->nbytes = nbytes; 696 req->result = result; 697 req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT; 698 } 699 700 /** 701 * ahash_request_set_virt() - set virtual address data buffers 702 * @req: ahash_request handle to be updated 703 * @src: source virtual address 704 * @result: buffer that is filled with the message digest -- the caller must 705 * ensure that the buffer has sufficient space by, for example, calling 706 * crypto_ahash_digestsize() 707 * @nbytes: number of bytes to process from the source virtual address 708 * 709 * By using this call, the caller references the source virtual address. 710 * The source virtual address points to the data the message digest is to 711 * be calculated for. 712 */ 713 static inline void ahash_request_set_virt(struct ahash_request *req, 714 const u8 *src, u8 *result, 715 unsigned int nbytes) 716 { 717 req->svirt = src; 718 req->nbytes = nbytes; 719 req->result = result; 720 req->base.flags |= CRYPTO_AHASH_REQ_VIRT; 721 } 722 723 /** 724 * DOC: Synchronous Message Digest API 725 * 726 * The synchronous message digest API is used with the ciphers of type 727 * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) 728 * 729 * The message digest API is able to maintain state information for the 730 * caller. 731 * 732 * The synchronous message digest API can store user-related context in its 733 * shash_desc request data structure. 734 */ 735 736 /** 737 * crypto_alloc_shash() - allocate message digest handle 738 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 739 * message digest cipher 740 * @type: specifies the type of the cipher 741 * @mask: specifies the mask for the cipher 742 * 743 * Allocate a cipher handle for a message digest. The returned &struct 744 * crypto_shash is the cipher handle that is required for any subsequent 745 * API invocation for that message digest. 746 * 747 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 748 * of an error, PTR_ERR() returns the error code. 749 */ 750 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, 751 u32 mask); 752 753 struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm); 754 755 int crypto_has_shash(const char *alg_name, u32 type, u32 mask); 756 757 static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) 758 { 759 return &tfm->base; 760 } 761 762 /** 763 * crypto_free_shash() - zeroize and free the message digest handle 764 * @tfm: cipher handle to be freed 765 * 766 * If @tfm is a NULL or error pointer, this function does nothing. 767 */ 768 static inline void crypto_free_shash(struct crypto_shash *tfm) 769 { 770 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); 771 } 772 773 static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) 774 { 775 return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); 776 } 777 778 static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) 779 { 780 return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); 781 } 782 783 /** 784 * crypto_shash_blocksize() - obtain block size for cipher 785 * @tfm: cipher handle 786 * 787 * The block size for the message digest cipher referenced with the cipher 788 * handle is returned. 789 * 790 * Return: block size of cipher 791 */ 792 static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) 793 { 794 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); 795 } 796 797 static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) 798 { 799 return container_of(alg, struct shash_alg, base); 800 } 801 802 static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) 803 { 804 return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); 805 } 806 807 /** 808 * crypto_shash_digestsize() - obtain message digest size 809 * @tfm: cipher handle 810 * 811 * The size for the message digest created by the message digest cipher 812 * referenced with the cipher handle is returned. 813 * 814 * Return: digest size of cipher 815 */ 816 static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) 817 { 818 return crypto_shash_alg(tfm)->digestsize; 819 } 820 821 static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) 822 { 823 return crypto_shash_alg(tfm)->statesize; 824 } 825 826 static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) 827 { 828 return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); 829 } 830 831 static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) 832 { 833 crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); 834 } 835 836 static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) 837 { 838 crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); 839 } 840 841 /** 842 * crypto_shash_descsize() - obtain the operational state size 843 * @tfm: cipher handle 844 * 845 * The size of the operational state the cipher needs during operation is 846 * returned for the hash referenced with the cipher handle. This size is 847 * required to calculate the memory requirements to allow the caller allocating 848 * sufficient memory for operational state. 849 * 850 * The operational state is defined with struct shash_desc where the size of 851 * that data structure is to be calculated as 852 * sizeof(struct shash_desc) + crypto_shash_descsize(alg) 853 * 854 * Return: size of the operational state 855 */ 856 static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) 857 { 858 return crypto_shash_alg(tfm)->descsize; 859 } 860 861 static inline void *shash_desc_ctx(struct shash_desc *desc) 862 { 863 return desc->__ctx; 864 } 865 866 /** 867 * crypto_shash_setkey() - set key for message digest 868 * @tfm: cipher handle 869 * @key: buffer holding the key 870 * @keylen: length of the key in bytes 871 * 872 * The caller provided key is set for the keyed message digest cipher. The 873 * cipher handle must point to a keyed message digest cipher in order for this 874 * function to succeed. 875 * 876 * Context: Softirq or process context. 877 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 878 */ 879 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, 880 unsigned int keylen); 881 882 /** 883 * crypto_shash_digest() - calculate message digest for buffer 884 * @desc: see crypto_shash_final() 885 * @data: see crypto_shash_update() 886 * @len: see crypto_shash_update() 887 * @out: see crypto_shash_final() 888 * 889 * This function is a "short-hand" for the function calls of crypto_shash_init, 890 * crypto_shash_update and crypto_shash_final. The parameters have the same 891 * meaning as discussed for those separate three functions. 892 * 893 * Context: Softirq or process context. 894 * Return: 0 if the message digest creation was successful; < 0 if an error 895 * occurred 896 */ 897 int crypto_shash_digest(struct shash_desc *desc, const u8 *data, 898 unsigned int len, u8 *out); 899 900 /** 901 * crypto_shash_tfm_digest() - calculate message digest for buffer 902 * @tfm: hash transformation object 903 * @data: see crypto_shash_update() 904 * @len: see crypto_shash_update() 905 * @out: see crypto_shash_final() 906 * 907 * This is a simplified version of crypto_shash_digest() for users who don't 908 * want to allocate their own hash descriptor (shash_desc). Instead, 909 * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) 910 * directly, and it allocates a hash descriptor on the stack internally. 911 * Note that this stack allocation may be fairly large. 912 * 913 * Context: Softirq or process context. 914 * Return: 0 on success; < 0 if an error occurred. 915 */ 916 int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, 917 unsigned int len, u8 *out); 918 919 int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, 920 unsigned int len, u8 *out); 921 922 /** 923 * crypto_shash_export() - extract operational state for message digest 924 * @desc: reference to the operational state handle whose state is exported 925 * @out: output buffer of sufficient size that can hold the hash state 926 * 927 * This function exports the hash state of the operational state handle into the 928 * caller-allocated output buffer out which must have sufficient size (e.g. by 929 * calling crypto_shash_descsize). 930 * 931 * Context: Softirq or process context. 932 * Return: 0 if the export creation was successful; < 0 if an error occurred 933 */ 934 int crypto_shash_export(struct shash_desc *desc, void *out); 935 936 /** 937 * crypto_shash_export_core() - extract core state for message digest 938 * @desc: reference to the operational state handle whose state is exported 939 * @out: output buffer of sufficient size that can hold the hash state 940 * 941 * Export the hash state without the partial block buffer. 942 * 943 * Context: Softirq or process context. 944 * Return: 0 if the export creation was successful; < 0 if an error occurred 945 */ 946 int crypto_shash_export_core(struct shash_desc *desc, void *out); 947 948 /** 949 * crypto_shash_import() - import operational state 950 * @desc: reference to the operational state handle the state imported into 951 * @in: buffer holding the state 952 * 953 * This function imports the hash state into the operational state handle from 954 * the input buffer. That buffer should have been generated with the 955 * crypto_ahash_export function. 956 * 957 * Context: Softirq or process context. 958 * Return: 0 if the import was successful; < 0 if an error occurred 959 */ 960 int crypto_shash_import(struct shash_desc *desc, const void *in); 961 962 /** 963 * crypto_shash_import_core() - import core state 964 * @desc: reference to the operational state handle the state imported into 965 * @in: buffer holding the state 966 * 967 * Import the hash state without the partial block buffer. 968 * 969 * Context: Softirq or process context. 970 * Return: 0 if the import was successful; < 0 if an error occurred 971 */ 972 int crypto_shash_import_core(struct shash_desc *desc, const void *in); 973 974 /** 975 * crypto_shash_init() - (re)initialize message digest 976 * @desc: operational state handle that is already filled 977 * 978 * The call (re-)initializes the message digest referenced by the 979 * operational state handle. Any potentially existing state created by 980 * previous operations is discarded. 981 * 982 * Context: Softirq or process context. 983 * Return: 0 if the message digest initialization was successful; < 0 if an 984 * error occurred 985 */ 986 int crypto_shash_init(struct shash_desc *desc); 987 988 /** 989 * crypto_shash_finup() - calculate message digest of buffer 990 * @desc: see crypto_shash_final() 991 * @data: see crypto_shash_update() 992 * @len: see crypto_shash_update() 993 * @out: see crypto_shash_final() 994 * 995 * This function is a "short-hand" for the function calls of 996 * crypto_shash_update and crypto_shash_final. The parameters have the same 997 * meaning as discussed for those separate functions. 998 * 999 * Context: Softirq or process context. 1000 * Return: 0 if the message digest creation was successful; < 0 if an error 1001 * occurred 1002 */ 1003 int crypto_shash_finup(struct shash_desc *desc, const u8 *data, 1004 unsigned int len, u8 *out); 1005 1006 /** 1007 * crypto_shash_update() - add data to message digest for processing 1008 * @desc: operational state handle that is already initialized 1009 * @data: input data to be added to the message digest 1010 * @len: length of the input data 1011 * 1012 * Updates the message digest state of the operational state handle. 1013 * 1014 * Context: Softirq or process context. 1015 * Return: 0 if the message digest update was successful; < 0 if an error 1016 * occurred 1017 */ 1018 static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data, 1019 unsigned int len) 1020 { 1021 return crypto_shash_finup(desc, data, len, NULL); 1022 } 1023 1024 /** 1025 * crypto_shash_final() - calculate message digest 1026 * @desc: operational state handle that is already filled with data 1027 * @out: output buffer filled with the message digest 1028 * 1029 * Finalize the message digest operation and create the message digest 1030 * based on all data added to the cipher handle. The message digest is placed 1031 * into the output buffer. The caller must ensure that the output buffer is 1032 * large enough by using crypto_shash_digestsize. 1033 * 1034 * Context: Softirq or process context. 1035 * Return: 0 if the message digest creation was successful; < 0 if an error 1036 * occurred 1037 */ 1038 static inline int crypto_shash_final(struct shash_desc *desc, u8 *out) 1039 { 1040 return crypto_shash_finup(desc, NULL, 0, out); 1041 } 1042 1043 static inline void shash_desc_zero(struct shash_desc *desc) 1044 { 1045 memzero_explicit(desc, 1046 sizeof(*desc) + crypto_shash_descsize(desc->tfm)); 1047 } 1048 1049 static inline bool ahash_is_async(struct crypto_ahash *tfm) 1050 { 1051 return crypto_tfm_is_async(&tfm->base); 1052 } 1053 1054 static inline struct ahash_request *ahash_request_on_stack_init( 1055 char *buf, struct crypto_ahash *tfm) 1056 { 1057 struct ahash_request *req = (void *)buf; 1058 1059 crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm)); 1060 return req; 1061 } 1062 1063 static inline struct ahash_request *ahash_request_clone( 1064 struct ahash_request *req, size_t total, gfp_t gfp) 1065 { 1066 return container_of(crypto_request_clone(&req->base, total, gfp), 1067 struct ahash_request, base); 1068 } 1069 1070 #endif /* _CRYPTO_HASH_H */ 1071