xref: /linux/include/crypto/hash.h (revision 8a7c601e14576a22c2bbf7f67455ccf3f3d2737f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Hash: Hash algorithms under the crypto API
4  *
5  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 
8 #ifndef _CRYPTO_HASH_H
9 #define _CRYPTO_HASH_H
10 
11 #include <linux/crypto.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 
16 /* Set this bit for virtual address instead of SG list. */
17 #define CRYPTO_AHASH_REQ_VIRT	0x00000001
18 
19 #define CRYPTO_AHASH_REQ_PRIVATE \
20 	CRYPTO_AHASH_REQ_VIRT
21 
22 struct crypto_ahash;
23 
24 /**
25  * DOC: Message Digest Algorithm Definitions
26  *
27  * These data structures define modular message digest algorithm
28  * implementations, managed via crypto_register_ahash(),
29  * crypto_register_shash(), crypto_unregister_ahash() and
30  * crypto_unregister_shash().
31  */
32 
33 /*
34  * struct hash_alg_common - define properties of message digest
35  * @digestsize: Size of the result of the transformation. A buffer of this size
36  *	        must be available to the @final and @finup calls, so they can
37  *	        store the resulting hash into it. For various predefined sizes,
38  *	        search include/crypto/ using
39  *	        git grep _DIGEST_SIZE include/crypto.
40  * @statesize: Size of the block for partial state of the transformation. A
41  *	       buffer of this size must be passed to the @export function as it
42  *	       will save the partial state of the transformation into it. On the
43  *	       other side, the @import function will load the state from a
44  *	       buffer of this size as well.
45  * @base: Start of data structure of cipher algorithm. The common data
46  *	  structure of crypto_alg contains information common to all ciphers.
47  *	  The hash_alg_common data structure now adds the hash-specific
48  *	  information.
49  */
50 #define HASH_ALG_COMMON {		\
51 	unsigned int digestsize;	\
52 	unsigned int statesize;		\
53 					\
54 	struct crypto_alg base;		\
55 }
56 struct hash_alg_common HASH_ALG_COMMON;
57 
58 struct ahash_request {
59 	struct crypto_async_request base;
60 
61 	unsigned int nbytes;
62 	union {
63 		struct scatterlist *src;
64 		const u8 *svirt;
65 	};
66 	u8 *result;
67 
68 	struct scatterlist sg_head[2];
69 	crypto_completion_t saved_complete;
70 	void *saved_data;
71 
72 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
73 };
74 
75 /**
76  * struct ahash_alg - asynchronous message digest definition
77  * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
78  *	  state of the HASH transformation at the beginning. This shall fill in
79  *	  the internal structures used during the entire duration of the whole
80  *	  transformation. No data processing happens at this point. Driver code
81  *	  implementation must not use req->result.
82  * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
83  *	   function actually pushes blocks of data from upper layers into the
84  *	   driver, which then passes those to the hardware as seen fit. This
85  *	   function must not finalize the HASH transformation by calculating the
86  *	   final message digest as this only adds more data into the
87  *	   transformation. This function shall not modify the transformation
88  *	   context, as this function may be called in parallel with the same
89  *	   transformation object. Data processing can happen synchronously
90  *	   [SHASH] or asynchronously [AHASH] at this point. Driver must not use
91  *	   req->result.
92  *	   For block-only algorithms, @update must return the number
93  *	   of bytes to store in the API partial block buffer.
94  * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
95  *	   transformation and retrieves the resulting hash from the driver and
96  *	   pushes it back to upper layers. No data processing happens at this
97  *	   point unless hardware requires it to finish the transformation
98  *	   (then the data buffered by the device driver is processed).
99  * @finup: **[optional]** Combination of @update and @final. This function is effectively a
100  *	   combination of @update and @final calls issued in sequence. As some
101  *	   hardware cannot do @update and @final separately, this callback was
102  *	   added to allow such hardware to be used at least by IPsec. Data
103  *	   processing can happen synchronously [SHASH] or asynchronously [AHASH]
104  *	   at this point.
105  * @digest: Combination of @init and @update and @final. This function
106  *	    effectively behaves as the entire chain of operations, @init,
107  *	    @update and @final issued in sequence. Just like @finup, this was
108  *	    added for hardware which cannot do even the @finup, but can only do
109  *	    the whole transformation in one run. Data processing can happen
110  *	    synchronously [SHASH] or asynchronously [AHASH] at this point.
111  * @setkey: Set optional key used by the hashing algorithm. Intended to push
112  *	    optional key used by the hashing algorithm from upper layers into
113  *	    the driver. This function can store the key in the transformation
114  *	    context or can outright program it into the hardware. In the former
115  *	    case, one must be careful to program the key into the hardware at
116  *	    appropriate time and one must be careful that .setkey() can be
117  *	    called multiple times during the existence of the transformation
118  *	    object. Not  all hashing algorithms do implement this function as it
119  *	    is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
120  *	    implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
121  *	    this function. This function must be called before any other of the
122  *	    @init, @update, @final, @finup, @digest is called. No data
123  *	    processing happens at this point.
124  * @export: Export partial state of the transformation. This function dumps the
125  *	    entire state of the ongoing transformation into a provided block of
126  *	    data so it can be @import 'ed back later on. This is useful in case
127  *	    you want to save partial result of the transformation after
128  *	    processing certain amount of data and reload this partial result
129  *	    multiple times later on for multiple re-use. No data processing
130  *	    happens at this point. Driver must not use req->result.
131  * @import: Import partial state of the transformation. This function loads the
132  *	    entire state of the ongoing transformation from a provided block of
133  *	    data so the transformation can continue from this point onward. No
134  *	    data processing happens at this point. Driver must not use
135  *	    req->result.
136  * @export_core: Export partial state without partial block.  Only defined
137  *		 for algorithms that are not block-only.
138  * @import_core: Import partial state without partial block.  Only defined
139  *		 for algorithms that are not block-only.
140  * @init_tfm: Initialize the cryptographic transformation object.
141  *	      This function is called only once at the instantiation
142  *	      time, right after the transformation context was
143  *	      allocated. In case the cryptographic hardware has
144  *	      some special requirements which need to be handled
145  *	      by software, this function shall check for the precise
146  *	      requirement of the transformation and put any software
147  *	      fallbacks in place.
148  * @exit_tfm: Deinitialize the cryptographic transformation object.
149  *	      This is a counterpart to @init_tfm, used to remove
150  *	      various changes set in @init_tfm.
151  * @clone_tfm: Copy transform into new object, may allocate memory.
152  * @halg: see struct hash_alg_common
153  */
154 struct ahash_alg {
155 	int (*init)(struct ahash_request *req);
156 	int (*update)(struct ahash_request *req);
157 	int (*final)(struct ahash_request *req);
158 	int (*finup)(struct ahash_request *req);
159 	int (*digest)(struct ahash_request *req);
160 	int (*export)(struct ahash_request *req, void *out);
161 	int (*import)(struct ahash_request *req, const void *in);
162 	int (*export_core)(struct ahash_request *req, void *out);
163 	int (*import_core)(struct ahash_request *req, const void *in);
164 	int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
165 		      unsigned int keylen);
166 	int (*init_tfm)(struct crypto_ahash *tfm);
167 	void (*exit_tfm)(struct crypto_ahash *tfm);
168 	int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
169 
170 	struct hash_alg_common halg;
171 };
172 
173 struct shash_desc {
174 	struct crypto_shash *tfm;
175 	void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
176 };
177 
178 #define HASH_MAX_DIGESTSIZE	 64
179 
180 /*
181  * The size of a core hash state and a partial block.  The final byte
182  * is the length of the partial block.
183  */
184 #define HASH_STATE_AND_BLOCK(state, block) ((state) + (block) + 1)
185 
186 
187 /* Worst case is sha3-224. */
188 #define HASH_MAX_STATESIZE	 HASH_STATE_AND_BLOCK(200, 144)
189 
190 /* This needs to match arch/s390/crypto/sha.h. */
191 #define S390_SHA_CTX_SIZE	216
192 
193 /*
194  * Worst case is hmac(sha3-224-s390).  Its context is a nested 'shash_desc'
195  * containing a 'struct s390_sha_ctx'.
196  */
197 #define SHA3_224_S390_DESCSIZE	HASH_STATE_AND_BLOCK(S390_SHA_CTX_SIZE, 144)
198 #define HASH_MAX_DESCSIZE	(sizeof(struct shash_desc) + \
199 				 SHA3_224_S390_DESCSIZE)
200 #define MAX_SYNC_HASH_REQSIZE	(sizeof(struct ahash_request) + \
201 				 HASH_MAX_DESCSIZE)
202 
203 #define SHASH_DESC_ON_STACK(shash, ctx)					     \
204 	char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
205 		__aligned(__alignof__(struct shash_desc));		     \
206 	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
207 
208 #define HASH_REQUEST_ON_STACK(name, _tfm) \
209 	char __##name##_req[sizeof(struct ahash_request) + \
210 			    MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
211 	struct ahash_request *name = \
212 		ahash_request_on_stack_init(__##name##_req, (_tfm))
213 
214 #define HASH_REQUEST_CLONE(name, gfp) \
215 	hash_request_clone(name, sizeof(__##name##_req), gfp)
216 
217 #define CRYPTO_HASH_STATESIZE(coresize, blocksize) (coresize + blocksize + 1)
218 
219 /**
220  * struct shash_alg - synchronous message digest definition
221  * @init: see struct ahash_alg
222  * @update: see struct ahash_alg
223  * @final: see struct ahash_alg
224  * @finup: see struct ahash_alg
225  * @digest: see struct ahash_alg
226  * @export: see struct ahash_alg
227  * @import: see struct ahash_alg
228  * @export_core: see struct ahash_alg
229  * @import_core: see struct ahash_alg
230  * @setkey: see struct ahash_alg
231  * @init_tfm: Initialize the cryptographic transformation object.
232  *	      This function is called only once at the instantiation
233  *	      time, right after the transformation context was
234  *	      allocated. In case the cryptographic hardware has
235  *	      some special requirements which need to be handled
236  *	      by software, this function shall check for the precise
237  *	      requirement of the transformation and put any software
238  *	      fallbacks in place.
239  * @exit_tfm: Deinitialize the cryptographic transformation object.
240  *	      This is a counterpart to @init_tfm, used to remove
241  *	      various changes set in @init_tfm.
242  * @clone_tfm: Copy transform into new object, may allocate memory.
243  * @descsize: Size of the operational state for the message digest. This state
244  * 	      size is the memory size that needs to be allocated for
245  *	      shash_desc.__ctx
246  * @halg: see struct hash_alg_common
247  * @HASH_ALG_COMMON: see struct hash_alg_common
248  */
249 struct shash_alg {
250 	int (*init)(struct shash_desc *desc);
251 	int (*update)(struct shash_desc *desc, const u8 *data,
252 		      unsigned int len);
253 	int (*final)(struct shash_desc *desc, u8 *out);
254 	int (*finup)(struct shash_desc *desc, const u8 *data,
255 		     unsigned int len, u8 *out);
256 	int (*digest)(struct shash_desc *desc, const u8 *data,
257 		      unsigned int len, u8 *out);
258 	int (*export)(struct shash_desc *desc, void *out);
259 	int (*import)(struct shash_desc *desc, const void *in);
260 	int (*export_core)(struct shash_desc *desc, void *out);
261 	int (*import_core)(struct shash_desc *desc, const void *in);
262 	int (*setkey)(struct crypto_shash *tfm, const u8 *key,
263 		      unsigned int keylen);
264 	int (*init_tfm)(struct crypto_shash *tfm);
265 	void (*exit_tfm)(struct crypto_shash *tfm);
266 	int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
267 
268 	unsigned int descsize;
269 
270 	union {
271 		struct HASH_ALG_COMMON;
272 		struct hash_alg_common halg;
273 	};
274 };
275 #undef HASH_ALG_COMMON
276 
277 struct crypto_ahash {
278 	bool using_shash; /* Underlying algorithm is shash, not ahash */
279 	unsigned int statesize;
280 	unsigned int reqsize;
281 	struct crypto_tfm base;
282 };
283 
284 struct crypto_shash {
285 	struct crypto_tfm base;
286 };
287 
288 /**
289  * DOC: Asynchronous Message Digest API
290  *
291  * The asynchronous message digest API is used with the ciphers of type
292  * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
293  *
294  * The asynchronous cipher operation discussion provided for the
295  * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
296  */
297 
298 static inline bool ahash_req_on_stack(struct ahash_request *req)
299 {
300 	return crypto_req_on_stack(&req->base);
301 }
302 
303 static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
304 {
305 	return container_of(tfm, struct crypto_ahash, base);
306 }
307 
308 /**
309  * crypto_alloc_ahash() - allocate ahash cipher handle
310  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
311  *	      ahash cipher
312  * @type: specifies the type of the cipher
313  * @mask: specifies the mask for the cipher
314  *
315  * Allocate a cipher handle for an ahash. The returned struct
316  * crypto_ahash is the cipher handle that is required for any subsequent
317  * API invocation for that ahash.
318  *
319  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
320  *	   of an error, PTR_ERR() returns the error code.
321  */
322 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
323 					u32 mask);
324 
325 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
326 
327 static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
328 {
329 	return &tfm->base;
330 }
331 
332 /**
333  * crypto_free_ahash() - zeroize and free the ahash handle
334  * @tfm: cipher handle to be freed
335  *
336  * If @tfm is a NULL or error pointer, this function does nothing.
337  */
338 static inline void crypto_free_ahash(struct crypto_ahash *tfm)
339 {
340 	crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
341 }
342 
343 /**
344  * crypto_has_ahash() - Search for the availability of an ahash.
345  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
346  *	      ahash
347  * @type: specifies the type of the ahash
348  * @mask: specifies the mask for the ahash
349  *
350  * Return: true when the ahash is known to the kernel crypto API; false
351  *	   otherwise
352  */
353 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
354 
355 static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
356 {
357 	return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
358 }
359 
360 static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
361 {
362 	return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
363 }
364 
365 /**
366  * crypto_ahash_blocksize() - obtain block size for cipher
367  * @tfm: cipher handle
368  *
369  * The block size for the message digest cipher referenced with the cipher
370  * handle is returned.
371  *
372  * Return: block size of cipher
373  */
374 static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm)
375 {
376 	return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
377 }
378 
379 static inline struct hash_alg_common *__crypto_hash_alg_common(
380 	struct crypto_alg *alg)
381 {
382 	return container_of(alg, struct hash_alg_common, base);
383 }
384 
385 static inline struct hash_alg_common *crypto_hash_alg_common(
386 	struct crypto_ahash *tfm)
387 {
388 	return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
389 }
390 
391 /**
392  * crypto_ahash_digestsize() - obtain message digest size
393  * @tfm: cipher handle
394  *
395  * The size for the message digest created by the message digest cipher
396  * referenced with the cipher handle is returned.
397  *
398  *
399  * Return: message digest size of cipher
400  */
401 static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
402 {
403 	return crypto_hash_alg_common(tfm)->digestsize;
404 }
405 
406 /**
407  * crypto_ahash_statesize() - obtain size of the ahash state
408  * @tfm: cipher handle
409  *
410  * Return the size of the ahash state. With the crypto_ahash_export()
411  * function, the caller can export the state into a buffer whose size is
412  * defined with this function.
413  *
414  * Return: size of the ahash state
415  */
416 static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
417 {
418 	return tfm->statesize;
419 }
420 
421 static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
422 {
423 	return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
424 }
425 
426 static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
427 {
428 	crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
429 }
430 
431 static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
432 {
433 	crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
434 }
435 
436 /**
437  * crypto_ahash_reqtfm() - obtain cipher handle from request
438  * @req: asynchronous request handle that contains the reference to the ahash
439  *	 cipher handle
440  *
441  * Return the ahash cipher handle that is registered with the asynchronous
442  * request handle ahash_request.
443  *
444  * Return: ahash cipher handle
445  */
446 static inline struct crypto_ahash *crypto_ahash_reqtfm(
447 	struct ahash_request *req)
448 {
449 	return __crypto_ahash_cast(req->base.tfm);
450 }
451 
452 /**
453  * crypto_ahash_reqsize() - obtain size of the request data structure
454  * @tfm: cipher handle
455  *
456  * Return: size of the request data
457  */
458 static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
459 {
460 	return tfm->reqsize;
461 }
462 
463 static inline void *ahash_request_ctx(struct ahash_request *req)
464 {
465 	return req->__ctx;
466 }
467 
468 /**
469  * crypto_ahash_setkey - set key for cipher handle
470  * @tfm: cipher handle
471  * @key: buffer holding the key
472  * @keylen: length of the key in bytes
473  *
474  * The caller provided key is set for the ahash cipher. The cipher
475  * handle must point to a keyed hash in order for this function to succeed.
476  *
477  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
478  */
479 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
480 			unsigned int keylen);
481 
482 /**
483  * crypto_ahash_finup() - update and finalize message digest
484  * @req: reference to the ahash_request handle that holds all information
485  *	 needed to perform the cipher operation
486  *
487  * This function is a "short-hand" for the function calls of
488  * crypto_ahash_update and crypto_ahash_final. The parameters have the same
489  * meaning as discussed for those separate functions.
490  *
491  * Return: see crypto_ahash_final()
492  */
493 int crypto_ahash_finup(struct ahash_request *req);
494 
495 /**
496  * crypto_ahash_final() - calculate message digest
497  * @req: reference to the ahash_request handle that holds all information
498  *	 needed to perform the cipher operation
499  *
500  * Finalize the message digest operation and create the message digest
501  * based on all data added to the cipher handle. The message digest is placed
502  * into the output buffer registered with the ahash_request handle.
503  *
504  * Return:
505  * 0		if the message digest was successfully calculated;
506  * -EINPROGRESS	if data is fed into hardware (DMA) or queued for later;
507  * -EBUSY	if queue is full and request should be resubmitted later;
508  * other < 0	if an error occurred
509  */
510 static inline int crypto_ahash_final(struct ahash_request *req)
511 {
512 	req->nbytes = 0;
513 	return crypto_ahash_finup(req);
514 }
515 
516 /**
517  * crypto_ahash_digest() - calculate message digest for a buffer
518  * @req: reference to the ahash_request handle that holds all information
519  *	 needed to perform the cipher operation
520  *
521  * This function is a "short-hand" for the function calls of crypto_ahash_init,
522  * crypto_ahash_update and crypto_ahash_final. The parameters have the same
523  * meaning as discussed for those separate three functions.
524  *
525  * Return: see crypto_ahash_final()
526  */
527 int crypto_ahash_digest(struct ahash_request *req);
528 
529 /**
530  * crypto_ahash_export() - extract current message digest state
531  * @req: reference to the ahash_request handle whose state is exported
532  * @out: output buffer of sufficient size that can hold the hash state
533  *
534  * This function exports the hash state of the ahash_request handle into the
535  * caller-allocated output buffer out which must have sufficient size (e.g. by
536  * calling crypto_ahash_statesize()).
537  *
538  * Return: 0 if the export was successful; < 0 if an error occurred
539  */
540 int crypto_ahash_export(struct ahash_request *req, void *out);
541 
542 /**
543  * crypto_ahash_import() - import message digest state
544  * @req: reference to ahash_request handle the state is imported into
545  * @in: buffer holding the state
546  *
547  * This function imports the hash state into the ahash_request handle from the
548  * input buffer. That buffer should have been generated with the
549  * crypto_ahash_export function.
550  *
551  * Return: 0 if the import was successful; < 0 if an error occurred
552  */
553 int crypto_ahash_import(struct ahash_request *req, const void *in);
554 
555 /**
556  * crypto_ahash_init() - (re)initialize message digest handle
557  * @req: ahash_request handle that already is initialized with all necessary
558  *	 data using the ahash_request_* API functions
559  *
560  * The call (re-)initializes the message digest referenced by the ahash_request
561  * handle. Any potentially existing state created by previous operations is
562  * discarded.
563  *
564  * Return: see crypto_ahash_final()
565  */
566 int crypto_ahash_init(struct ahash_request *req);
567 
568 /**
569  * crypto_ahash_update() - add data to message digest for processing
570  * @req: ahash_request handle that was previously initialized with the
571  *	 crypto_ahash_init call.
572  *
573  * Updates the message digest state of the &ahash_request handle. The input data
574  * is pointed to by the scatter/gather list registered in the &ahash_request
575  * handle
576  *
577  * Return: see crypto_ahash_final()
578  */
579 int crypto_ahash_update(struct ahash_request *req);
580 
581 /**
582  * DOC: Asynchronous Hash Request Handle
583  *
584  * The &ahash_request data structure contains all pointers to data
585  * required for the asynchronous cipher operation. This includes the cipher
586  * handle (which can be used by multiple &ahash_request instances), pointer
587  * to plaintext and the message digest output buffer, asynchronous callback
588  * function, etc. It acts as a handle to the ahash_request_* API calls in a
589  * similar way as ahash handle to the crypto_ahash_* API calls.
590  */
591 
592 /**
593  * ahash_request_set_tfm() - update cipher handle reference in request
594  * @req: request handle to be modified
595  * @tfm: cipher handle that shall be added to the request handle
596  *
597  * Allow the caller to replace the existing ahash handle in the request
598  * data structure with a different one.
599  */
600 static inline void ahash_request_set_tfm(struct ahash_request *req,
601 					 struct crypto_ahash *tfm)
602 {
603 	crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
604 }
605 
606 /**
607  * ahash_request_alloc() - allocate request data structure
608  * @tfm: cipher handle to be registered with the request
609  * @gfp: memory allocation flag that is handed to kmalloc by the API call.
610  *
611  * Allocate the request data structure that must be used with the ahash
612  * message digest API calls. During
613  * the allocation, the provided ahash handle
614  * is registered in the request data structure.
615  *
616  * Return: allocated request handle in case of success, or NULL if out of memory
617  */
618 static inline struct ahash_request *ahash_request_alloc_noprof(
619 	struct crypto_ahash *tfm, gfp_t gfp)
620 {
621 	struct ahash_request *req;
622 
623 	req = kmalloc_noprof(sizeof(struct ahash_request) +
624 			     crypto_ahash_reqsize(tfm), gfp);
625 
626 	if (likely(req))
627 		ahash_request_set_tfm(req, tfm);
628 
629 	return req;
630 }
631 #define ahash_request_alloc(...)	alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
632 
633 /**
634  * ahash_request_free() - zeroize and free the request data structure
635  * @req: request data structure cipher handle to be freed
636  */
637 void ahash_request_free(struct ahash_request *req);
638 
639 static inline void ahash_request_zero(struct ahash_request *req)
640 {
641 	memzero_explicit(req, sizeof(*req) +
642 			      crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
643 }
644 
645 static inline struct ahash_request *ahash_request_cast(
646 	struct crypto_async_request *req)
647 {
648 	return container_of(req, struct ahash_request, base);
649 }
650 
651 /**
652  * ahash_request_set_callback() - set asynchronous callback function
653  * @req: request handle
654  * @flags: specify zero or an ORing of the flags
655  *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
656  *	   increase the wait queue beyond the initial maximum size;
657  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
658  * @compl: callback function pointer to be registered with the request handle
659  * @data: The data pointer refers to memory that is not used by the kernel
660  *	  crypto API, but provided to the callback function for it to use. Here,
661  *	  the caller can provide a reference to memory the callback function can
662  *	  operate on. As the callback function is invoked asynchronously to the
663  *	  related functionality, it may need to access data structures of the
664  *	  related functionality which can be referenced using this pointer. The
665  *	  callback function can access the memory via the "data" field in the
666  *	  &crypto_async_request data structure provided to the callback function.
667  *
668  * This function allows setting the callback function that is triggered once
669  * the cipher operation completes.
670  *
671  * The callback function is registered with the &ahash_request handle and
672  * must comply with the following template::
673  *
674  *	void callback_function(struct crypto_async_request *req, int error)
675  */
676 static inline void ahash_request_set_callback(struct ahash_request *req,
677 					      u32 flags,
678 					      crypto_completion_t compl,
679 					      void *data)
680 {
681 	flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
682 	flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
683 	crypto_request_set_callback(&req->base, flags, compl, data);
684 }
685 
686 /**
687  * ahash_request_set_crypt() - set data buffers
688  * @req: ahash_request handle to be updated
689  * @src: source scatter/gather list
690  * @result: buffer that is filled with the message digest -- the caller must
691  *	    ensure that the buffer has sufficient space by, for example, calling
692  *	    crypto_ahash_digestsize()
693  * @nbytes: number of bytes to process from the source scatter/gather list
694  *
695  * By using this call, the caller references the source scatter/gather list.
696  * The source scatter/gather list points to the data the message digest is to
697  * be calculated for.
698  */
699 static inline void ahash_request_set_crypt(struct ahash_request *req,
700 					   struct scatterlist *src, u8 *result,
701 					   unsigned int nbytes)
702 {
703 	req->src = src;
704 	req->nbytes = nbytes;
705 	req->result = result;
706 	req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
707 }
708 
709 /**
710  * ahash_request_set_virt() - set virtual address data buffers
711  * @req: ahash_request handle to be updated
712  * @src: source virtual address
713  * @result: buffer that is filled with the message digest -- the caller must
714  *	    ensure that the buffer has sufficient space by, for example, calling
715  *	    crypto_ahash_digestsize()
716  * @nbytes: number of bytes to process from the source virtual address
717  *
718  * By using this call, the caller references the source virtual address.
719  * The source virtual address points to the data the message digest is to
720  * be calculated for.
721  */
722 static inline void ahash_request_set_virt(struct ahash_request *req,
723 					  const u8 *src, u8 *result,
724 					  unsigned int nbytes)
725 {
726 	req->svirt = src;
727 	req->nbytes = nbytes;
728 	req->result = result;
729 	req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
730 }
731 
732 /**
733  * DOC: Synchronous Message Digest API
734  *
735  * The synchronous message digest API is used with the ciphers of type
736  * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
737  *
738  * The message digest API is able to maintain state information for the
739  * caller.
740  *
741  * The synchronous message digest API can store user-related context in its
742  * shash_desc request data structure.
743  */
744 
745 /**
746  * crypto_alloc_shash() - allocate message digest handle
747  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
748  *	      message digest cipher
749  * @type: specifies the type of the cipher
750  * @mask: specifies the mask for the cipher
751  *
752  * Allocate a cipher handle for a message digest. The returned &struct
753  * crypto_shash is the cipher handle that is required for any subsequent
754  * API invocation for that message digest.
755  *
756  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
757  *	   of an error, PTR_ERR() returns the error code.
758  */
759 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
760 					u32 mask);
761 
762 struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
763 
764 int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
765 
766 static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
767 {
768 	return &tfm->base;
769 }
770 
771 /**
772  * crypto_free_shash() - zeroize and free the message digest handle
773  * @tfm: cipher handle to be freed
774  *
775  * If @tfm is a NULL or error pointer, this function does nothing.
776  */
777 static inline void crypto_free_shash(struct crypto_shash *tfm)
778 {
779 	crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
780 }
781 
782 static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
783 {
784 	return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
785 }
786 
787 static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
788 {
789 	return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
790 }
791 
792 /**
793  * crypto_shash_blocksize() - obtain block size for cipher
794  * @tfm: cipher handle
795  *
796  * The block size for the message digest cipher referenced with the cipher
797  * handle is returned.
798  *
799  * Return: block size of cipher
800  */
801 static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
802 {
803 	return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
804 }
805 
806 static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
807 {
808 	return container_of(alg, struct shash_alg, base);
809 }
810 
811 static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
812 {
813 	return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
814 }
815 
816 /**
817  * crypto_shash_digestsize() - obtain message digest size
818  * @tfm: cipher handle
819  *
820  * The size for the message digest created by the message digest cipher
821  * referenced with the cipher handle is returned.
822  *
823  * Return: digest size of cipher
824  */
825 static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
826 {
827 	return crypto_shash_alg(tfm)->digestsize;
828 }
829 
830 static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
831 {
832 	return crypto_shash_alg(tfm)->statesize;
833 }
834 
835 static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
836 {
837 	return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
838 }
839 
840 static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
841 {
842 	crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
843 }
844 
845 static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
846 {
847 	crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
848 }
849 
850 /**
851  * crypto_shash_descsize() - obtain the operational state size
852  * @tfm: cipher handle
853  *
854  * The size of the operational state the cipher needs during operation is
855  * returned for the hash referenced with the cipher handle. This size is
856  * required to calculate the memory requirements to allow the caller allocating
857  * sufficient memory for operational state.
858  *
859  * The operational state is defined with struct shash_desc where the size of
860  * that data structure is to be calculated as
861  * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
862  *
863  * Return: size of the operational state
864  */
865 static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
866 {
867 	return crypto_shash_alg(tfm)->descsize;
868 }
869 
870 static inline void *shash_desc_ctx(struct shash_desc *desc)
871 {
872 	return desc->__ctx;
873 }
874 
875 /**
876  * crypto_shash_setkey() - set key for message digest
877  * @tfm: cipher handle
878  * @key: buffer holding the key
879  * @keylen: length of the key in bytes
880  *
881  * The caller provided key is set for the keyed message digest cipher. The
882  * cipher handle must point to a keyed message digest cipher in order for this
883  * function to succeed.
884  *
885  * Context: Softirq or process context.
886  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
887  */
888 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
889 			unsigned int keylen);
890 
891 /**
892  * crypto_shash_digest() - calculate message digest for buffer
893  * @desc: see crypto_shash_final()
894  * @data: see crypto_shash_update()
895  * @len: see crypto_shash_update()
896  * @out: see crypto_shash_final()
897  *
898  * This function is a "short-hand" for the function calls of crypto_shash_init,
899  * crypto_shash_update and crypto_shash_final. The parameters have the same
900  * meaning as discussed for those separate three functions.
901  *
902  * Context: Softirq or process context.
903  * Return: 0 if the message digest creation was successful; < 0 if an error
904  *	   occurred
905  */
906 int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
907 			unsigned int len, u8 *out);
908 
909 /**
910  * crypto_shash_tfm_digest() - calculate message digest for buffer
911  * @tfm: hash transformation object
912  * @data: see crypto_shash_update()
913  * @len: see crypto_shash_update()
914  * @out: see crypto_shash_final()
915  *
916  * This is a simplified version of crypto_shash_digest() for users who don't
917  * want to allocate their own hash descriptor (shash_desc).  Instead,
918  * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash)
919  * directly, and it allocates a hash descriptor on the stack internally.
920  * Note that this stack allocation may be fairly large.
921  *
922  * Context: Softirq or process context.
923  * Return: 0 on success; < 0 if an error occurred.
924  */
925 int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
926 			    unsigned int len, u8 *out);
927 
928 int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
929 		       unsigned int len, u8 *out);
930 
931 /**
932  * crypto_shash_export() - extract operational state for message digest
933  * @desc: reference to the operational state handle whose state is exported
934  * @out: output buffer of sufficient size that can hold the hash state
935  *
936  * This function exports the hash state of the operational state handle into the
937  * caller-allocated output buffer out which must have sufficient size (e.g. by
938  * calling crypto_shash_descsize).
939  *
940  * Context: Softirq or process context.
941  * Return: 0 if the export creation was successful; < 0 if an error occurred
942  */
943 int crypto_shash_export(struct shash_desc *desc, void *out);
944 
945 /**
946  * crypto_shash_import() - import operational state
947  * @desc: reference to the operational state handle the state imported into
948  * @in: buffer holding the state
949  *
950  * This function imports the hash state into the operational state handle from
951  * the input buffer. That buffer should have been generated with the
952  * crypto_ahash_export function.
953  *
954  * Context: Softirq or process context.
955  * Return: 0 if the import was successful; < 0 if an error occurred
956  */
957 int crypto_shash_import(struct shash_desc *desc, const void *in);
958 
959 /**
960  * crypto_shash_init() - (re)initialize message digest
961  * @desc: operational state handle that is already filled
962  *
963  * The call (re-)initializes the message digest referenced by the
964  * operational state handle. Any potentially existing state created by
965  * previous operations is discarded.
966  *
967  * Context: Softirq or process context.
968  * Return: 0 if the message digest initialization was successful; < 0 if an
969  *	   error occurred
970  */
971 int crypto_shash_init(struct shash_desc *desc);
972 
973 /**
974  * crypto_shash_finup() - calculate message digest of buffer
975  * @desc: see crypto_shash_final()
976  * @data: see crypto_shash_update()
977  * @len: see crypto_shash_update()
978  * @out: see crypto_shash_final()
979  *
980  * This function is a "short-hand" for the function calls of
981  * crypto_shash_update and crypto_shash_final. The parameters have the same
982  * meaning as discussed for those separate functions.
983  *
984  * Context: Softirq or process context.
985  * Return: 0 if the message digest creation was successful; < 0 if an error
986  *	   occurred
987  */
988 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
989 		       unsigned int len, u8 *out);
990 
991 /**
992  * crypto_shash_update() - add data to message digest for processing
993  * @desc: operational state handle that is already initialized
994  * @data: input data to be added to the message digest
995  * @len: length of the input data
996  *
997  * Updates the message digest state of the operational state handle.
998  *
999  * Context: Softirq or process context.
1000  * Return: 0 if the message digest update was successful; < 0 if an error
1001  *	   occurred
1002  */
1003 static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
1004 				      unsigned int len)
1005 {
1006 	return crypto_shash_finup(desc, data, len, NULL);
1007 }
1008 
1009 /**
1010  * crypto_shash_final() - calculate message digest
1011  * @desc: operational state handle that is already filled with data
1012  * @out: output buffer filled with the message digest
1013  *
1014  * Finalize the message digest operation and create the message digest
1015  * based on all data added to the cipher handle. The message digest is placed
1016  * into the output buffer. The caller must ensure that the output buffer is
1017  * large enough by using crypto_shash_digestsize.
1018  *
1019  * Context: Softirq or process context.
1020  * Return: 0 if the message digest creation was successful; < 0 if an error
1021  *	   occurred
1022  */
1023 static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
1024 {
1025 	return crypto_shash_finup(desc, NULL, 0, out);
1026 }
1027 
1028 static inline void shash_desc_zero(struct shash_desc *desc)
1029 {
1030 	memzero_explicit(desc,
1031 			 sizeof(*desc) + crypto_shash_descsize(desc->tfm));
1032 }
1033 
1034 static inline bool ahash_is_async(struct crypto_ahash *tfm)
1035 {
1036 	return crypto_tfm_is_async(&tfm->base);
1037 }
1038 
1039 static inline struct ahash_request *ahash_request_on_stack_init(
1040 	char *buf, struct crypto_ahash *tfm)
1041 {
1042 	struct ahash_request *req = (void *)buf;
1043 
1044 	crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
1045 	return req;
1046 }
1047 
1048 static inline struct ahash_request *ahash_request_clone(
1049 	struct ahash_request *req, size_t total, gfp_t gfp)
1050 {
1051 	return container_of(crypto_request_clone(&req->base, total, gfp),
1052 			    struct ahash_request, base);
1053 }
1054 
1055 #endif	/* _CRYPTO_HASH_H */
1056