xref: /linux/include/crypto/hash.h (revision 78f4e737a53e1163ded2687a922fce138aee73f5)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Hash: Hash algorithms under the crypto API
4  *
5  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 
8 #ifndef _CRYPTO_HASH_H
9 #define _CRYPTO_HASH_H
10 
11 #include <linux/crypto.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 
16 /* Set this bit for virtual address instead of SG list. */
17 #define CRYPTO_AHASH_REQ_VIRT	0x00000001
18 
19 #define CRYPTO_AHASH_REQ_PRIVATE \
20 	CRYPTO_AHASH_REQ_VIRT
21 
22 struct crypto_ahash;
23 
24 /**
25  * DOC: Message Digest Algorithm Definitions
26  *
27  * These data structures define modular message digest algorithm
28  * implementations, managed via crypto_register_ahash(),
29  * crypto_register_shash(), crypto_unregister_ahash() and
30  * crypto_unregister_shash().
31  */
32 
33 /*
34  * struct hash_alg_common - define properties of message digest
35  * @digestsize: Size of the result of the transformation. A buffer of this size
36  *	        must be available to the @final and @finup calls, so they can
37  *	        store the resulting hash into it. For various predefined sizes,
38  *	        search include/crypto/ using
39  *	        git grep _DIGEST_SIZE include/crypto.
40  * @statesize: Size of the block for partial state of the transformation. A
41  *	       buffer of this size must be passed to the @export function as it
42  *	       will save the partial state of the transformation into it. On the
43  *	       other side, the @import function will load the state from a
44  *	       buffer of this size as well.
45  * @base: Start of data structure of cipher algorithm. The common data
46  *	  structure of crypto_alg contains information common to all ciphers.
47  *	  The hash_alg_common data structure now adds the hash-specific
48  *	  information.
49  */
50 #define HASH_ALG_COMMON {		\
51 	unsigned int digestsize;	\
52 	unsigned int statesize;		\
53 					\
54 	struct crypto_alg base;		\
55 }
56 struct hash_alg_common HASH_ALG_COMMON;
57 
58 struct ahash_request {
59 	struct crypto_async_request base;
60 
61 	unsigned int nbytes;
62 	union {
63 		struct scatterlist *src;
64 		const u8 *svirt;
65 	};
66 	u8 *result;
67 
68 	struct scatterlist sg_head[2];
69 	crypto_completion_t saved_complete;
70 	void *saved_data;
71 
72 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
73 };
74 
75 /**
76  * struct ahash_alg - asynchronous message digest definition
77  * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
78  *	  state of the HASH transformation at the beginning. This shall fill in
79  *	  the internal structures used during the entire duration of the whole
80  *	  transformation. No data processing happens at this point. Driver code
81  *	  implementation must not use req->result.
82  * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
83  *	   function actually pushes blocks of data from upper layers into the
84  *	   driver, which then passes those to the hardware as seen fit. This
85  *	   function must not finalize the HASH transformation by calculating the
86  *	   final message digest as this only adds more data into the
87  *	   transformation. This function shall not modify the transformation
88  *	   context, as this function may be called in parallel with the same
89  *	   transformation object. Data processing can happen synchronously
90  *	   [SHASH] or asynchronously [AHASH] at this point. Driver must not use
91  *	   req->result.
92  *	   For block-only algorithms, @update must return the number
93  *	   of bytes to store in the API partial block buffer.
94  * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
95  *	   transformation and retrieves the resulting hash from the driver and
96  *	   pushes it back to upper layers. No data processing happens at this
97  *	   point unless hardware requires it to finish the transformation
98  *	   (then the data buffered by the device driver is processed).
99  * @finup: **[optional]** Combination of @update and @final. This function is effectively a
100  *	   combination of @update and @final calls issued in sequence. As some
101  *	   hardware cannot do @update and @final separately, this callback was
102  *	   added to allow such hardware to be used at least by IPsec. Data
103  *	   processing can happen synchronously [SHASH] or asynchronously [AHASH]
104  *	   at this point.
105  * @digest: Combination of @init and @update and @final. This function
106  *	    effectively behaves as the entire chain of operations, @init,
107  *	    @update and @final issued in sequence. Just like @finup, this was
108  *	    added for hardware which cannot do even the @finup, but can only do
109  *	    the whole transformation in one run. Data processing can happen
110  *	    synchronously [SHASH] or asynchronously [AHASH] at this point.
111  * @setkey: Set optional key used by the hashing algorithm. Intended to push
112  *	    optional key used by the hashing algorithm from upper layers into
113  *	    the driver. This function can store the key in the transformation
114  *	    context or can outright program it into the hardware. In the former
115  *	    case, one must be careful to program the key into the hardware at
116  *	    appropriate time and one must be careful that .setkey() can be
117  *	    called multiple times during the existence of the transformation
118  *	    object. Not  all hashing algorithms do implement this function as it
119  *	    is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
120  *	    implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
121  *	    this function. This function must be called before any other of the
122  *	    @init, @update, @final, @finup, @digest is called. No data
123  *	    processing happens at this point.
124  * @export: Export partial state of the transformation. This function dumps the
125  *	    entire state of the ongoing transformation into a provided block of
126  *	    data so it can be @import 'ed back later on. This is useful in case
127  *	    you want to save partial result of the transformation after
128  *	    processing certain amount of data and reload this partial result
129  *	    multiple times later on for multiple re-use. No data processing
130  *	    happens at this point. Driver must not use req->result.
131  * @import: Import partial state of the transformation. This function loads the
132  *	    entire state of the ongoing transformation from a provided block of
133  *	    data so the transformation can continue from this point onward. No
134  *	    data processing happens at this point. Driver must not use
135  *	    req->result.
136  * @export_core: Export partial state without partial block.  Only defined
137  *		 for algorithms that are not block-only.
138  * @import_core: Import partial state without partial block.  Only defined
139  *		 for algorithms that are not block-only.
140  * @init_tfm: Initialize the cryptographic transformation object.
141  *	      This function is called only once at the instantiation
142  *	      time, right after the transformation context was
143  *	      allocated. In case the cryptographic hardware has
144  *	      some special requirements which need to be handled
145  *	      by software, this function shall check for the precise
146  *	      requirement of the transformation and put any software
147  *	      fallbacks in place.
148  * @exit_tfm: Deinitialize the cryptographic transformation object.
149  *	      This is a counterpart to @init_tfm, used to remove
150  *	      various changes set in @init_tfm.
151  * @clone_tfm: Copy transform into new object, may allocate memory.
152  * @halg: see struct hash_alg_common
153  */
154 struct ahash_alg {
155 	int (*init)(struct ahash_request *req);
156 	int (*update)(struct ahash_request *req);
157 	int (*final)(struct ahash_request *req);
158 	int (*finup)(struct ahash_request *req);
159 	int (*digest)(struct ahash_request *req);
160 	int (*export)(struct ahash_request *req, void *out);
161 	int (*import)(struct ahash_request *req, const void *in);
162 	int (*export_core)(struct ahash_request *req, void *out);
163 	int (*import_core)(struct ahash_request *req, const void *in);
164 	int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
165 		      unsigned int keylen);
166 	int (*init_tfm)(struct crypto_ahash *tfm);
167 	void (*exit_tfm)(struct crypto_ahash *tfm);
168 	int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
169 
170 	struct hash_alg_common halg;
171 };
172 
173 struct shash_desc {
174 	struct crypto_shash *tfm;
175 	void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
176 };
177 
178 #define HASH_MAX_DIGESTSIZE	 64
179 
180 /* Worst case is sha3-224. */
181 #define HASH_MAX_STATESIZE	 200 + 144 + 1
182 
183 /*
184  * Worst case is hmac(sha3-224-s390).  Its context is a nested 'shash_desc'
185  * containing a 'struct s390_sha_ctx'.
186  */
187 #define HASH_MAX_DESCSIZE	(sizeof(struct shash_desc) + 360)
188 #define MAX_SYNC_HASH_REQSIZE	(sizeof(struct ahash_request) + \
189 				 HASH_MAX_DESCSIZE)
190 
191 #define SHASH_DESC_ON_STACK(shash, ctx)					     \
192 	char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
193 		__aligned(__alignof__(struct shash_desc));		     \
194 	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
195 
196 #define HASH_REQUEST_ON_STACK(name, _tfm) \
197 	char __##name##_req[sizeof(struct ahash_request) + \
198 			    MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
199 	struct ahash_request *name = \
200 		ahash_request_on_stack_init(__##name##_req, (_tfm))
201 
202 #define HASH_REQUEST_CLONE(name, gfp) \
203 	hash_request_clone(name, sizeof(__##name##_req), gfp)
204 
205 #define CRYPTO_HASH_STATESIZE(coresize, blocksize) (coresize + blocksize + 1)
206 
207 /**
208  * struct shash_alg - synchronous message digest definition
209  * @init: see struct ahash_alg
210  * @update: see struct ahash_alg
211  * @final: see struct ahash_alg
212  * @finup: see struct ahash_alg
213  * @digest: see struct ahash_alg
214  * @export: see struct ahash_alg
215  * @import: see struct ahash_alg
216  * @export_core: see struct ahash_alg
217  * @import_core: see struct ahash_alg
218  * @setkey: see struct ahash_alg
219  * @init_tfm: Initialize the cryptographic transformation object.
220  *	      This function is called only once at the instantiation
221  *	      time, right after the transformation context was
222  *	      allocated. In case the cryptographic hardware has
223  *	      some special requirements which need to be handled
224  *	      by software, this function shall check for the precise
225  *	      requirement of the transformation and put any software
226  *	      fallbacks in place.
227  * @exit_tfm: Deinitialize the cryptographic transformation object.
228  *	      This is a counterpart to @init_tfm, used to remove
229  *	      various changes set in @init_tfm.
230  * @clone_tfm: Copy transform into new object, may allocate memory.
231  * @descsize: Size of the operational state for the message digest. This state
232  * 	      size is the memory size that needs to be allocated for
233  *	      shash_desc.__ctx
234  * @halg: see struct hash_alg_common
235  * @HASH_ALG_COMMON: see struct hash_alg_common
236  */
237 struct shash_alg {
238 	int (*init)(struct shash_desc *desc);
239 	int (*update)(struct shash_desc *desc, const u8 *data,
240 		      unsigned int len);
241 	int (*final)(struct shash_desc *desc, u8 *out);
242 	int (*finup)(struct shash_desc *desc, const u8 *data,
243 		     unsigned int len, u8 *out);
244 	int (*digest)(struct shash_desc *desc, const u8 *data,
245 		      unsigned int len, u8 *out);
246 	int (*export)(struct shash_desc *desc, void *out);
247 	int (*import)(struct shash_desc *desc, const void *in);
248 	int (*export_core)(struct shash_desc *desc, void *out);
249 	int (*import_core)(struct shash_desc *desc, const void *in);
250 	int (*setkey)(struct crypto_shash *tfm, const u8 *key,
251 		      unsigned int keylen);
252 	int (*init_tfm)(struct crypto_shash *tfm);
253 	void (*exit_tfm)(struct crypto_shash *tfm);
254 	int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
255 
256 	unsigned int descsize;
257 
258 	union {
259 		struct HASH_ALG_COMMON;
260 		struct hash_alg_common halg;
261 	};
262 };
263 #undef HASH_ALG_COMMON
264 
265 struct crypto_ahash {
266 	bool using_shash; /* Underlying algorithm is shash, not ahash */
267 	unsigned int statesize;
268 	unsigned int reqsize;
269 	struct crypto_tfm base;
270 };
271 
272 struct crypto_shash {
273 	struct crypto_tfm base;
274 };
275 
276 /**
277  * DOC: Asynchronous Message Digest API
278  *
279  * The asynchronous message digest API is used with the ciphers of type
280  * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
281  *
282  * The asynchronous cipher operation discussion provided for the
283  * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
284  */
285 
ahash_req_on_stack(struct ahash_request * req)286 static inline bool ahash_req_on_stack(struct ahash_request *req)
287 {
288 	return crypto_req_on_stack(&req->base);
289 }
290 
__crypto_ahash_cast(struct crypto_tfm * tfm)291 static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
292 {
293 	return container_of(tfm, struct crypto_ahash, base);
294 }
295 
296 /**
297  * crypto_alloc_ahash() - allocate ahash cipher handle
298  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
299  *	      ahash cipher
300  * @type: specifies the type of the cipher
301  * @mask: specifies the mask for the cipher
302  *
303  * Allocate a cipher handle for an ahash. The returned struct
304  * crypto_ahash is the cipher handle that is required for any subsequent
305  * API invocation for that ahash.
306  *
307  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
308  *	   of an error, PTR_ERR() returns the error code.
309  */
310 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
311 					u32 mask);
312 
313 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
314 
crypto_ahash_tfm(struct crypto_ahash * tfm)315 static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
316 {
317 	return &tfm->base;
318 }
319 
320 /**
321  * crypto_free_ahash() - zeroize and free the ahash handle
322  * @tfm: cipher handle to be freed
323  *
324  * If @tfm is a NULL or error pointer, this function does nothing.
325  */
crypto_free_ahash(struct crypto_ahash * tfm)326 static inline void crypto_free_ahash(struct crypto_ahash *tfm)
327 {
328 	crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
329 }
330 
331 /**
332  * crypto_has_ahash() - Search for the availability of an ahash.
333  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
334  *	      ahash
335  * @type: specifies the type of the ahash
336  * @mask: specifies the mask for the ahash
337  *
338  * Return: true when the ahash is known to the kernel crypto API; false
339  *	   otherwise
340  */
341 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
342 
crypto_ahash_alg_name(struct crypto_ahash * tfm)343 static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
344 {
345 	return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
346 }
347 
crypto_ahash_driver_name(struct crypto_ahash * tfm)348 static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
349 {
350 	return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
351 }
352 
353 /**
354  * crypto_ahash_blocksize() - obtain block size for cipher
355  * @tfm: cipher handle
356  *
357  * The block size for the message digest cipher referenced with the cipher
358  * handle is returned.
359  *
360  * Return: block size of cipher
361  */
crypto_ahash_blocksize(struct crypto_ahash * tfm)362 static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm)
363 {
364 	return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
365 }
366 
__crypto_hash_alg_common(struct crypto_alg * alg)367 static inline struct hash_alg_common *__crypto_hash_alg_common(
368 	struct crypto_alg *alg)
369 {
370 	return container_of(alg, struct hash_alg_common, base);
371 }
372 
crypto_hash_alg_common(struct crypto_ahash * tfm)373 static inline struct hash_alg_common *crypto_hash_alg_common(
374 	struct crypto_ahash *tfm)
375 {
376 	return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
377 }
378 
379 /**
380  * crypto_ahash_digestsize() - obtain message digest size
381  * @tfm: cipher handle
382  *
383  * The size for the message digest created by the message digest cipher
384  * referenced with the cipher handle is returned.
385  *
386  *
387  * Return: message digest size of cipher
388  */
crypto_ahash_digestsize(struct crypto_ahash * tfm)389 static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
390 {
391 	return crypto_hash_alg_common(tfm)->digestsize;
392 }
393 
394 /**
395  * crypto_ahash_statesize() - obtain size of the ahash state
396  * @tfm: cipher handle
397  *
398  * Return the size of the ahash state. With the crypto_ahash_export()
399  * function, the caller can export the state into a buffer whose size is
400  * defined with this function.
401  *
402  * Return: size of the ahash state
403  */
crypto_ahash_statesize(struct crypto_ahash * tfm)404 static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
405 {
406 	return tfm->statesize;
407 }
408 
crypto_ahash_get_flags(struct crypto_ahash * tfm)409 static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
410 {
411 	return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
412 }
413 
crypto_ahash_set_flags(struct crypto_ahash * tfm,u32 flags)414 static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
415 {
416 	crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
417 }
418 
crypto_ahash_clear_flags(struct crypto_ahash * tfm,u32 flags)419 static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
420 {
421 	crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
422 }
423 
424 /**
425  * crypto_ahash_reqtfm() - obtain cipher handle from request
426  * @req: asynchronous request handle that contains the reference to the ahash
427  *	 cipher handle
428  *
429  * Return the ahash cipher handle that is registered with the asynchronous
430  * request handle ahash_request.
431  *
432  * Return: ahash cipher handle
433  */
crypto_ahash_reqtfm(struct ahash_request * req)434 static inline struct crypto_ahash *crypto_ahash_reqtfm(
435 	struct ahash_request *req)
436 {
437 	return __crypto_ahash_cast(req->base.tfm);
438 }
439 
440 /**
441  * crypto_ahash_reqsize() - obtain size of the request data structure
442  * @tfm: cipher handle
443  *
444  * Return: size of the request data
445  */
crypto_ahash_reqsize(struct crypto_ahash * tfm)446 static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
447 {
448 	return tfm->reqsize;
449 }
450 
ahash_request_ctx(struct ahash_request * req)451 static inline void *ahash_request_ctx(struct ahash_request *req)
452 {
453 	return req->__ctx;
454 }
455 
456 /**
457  * crypto_ahash_setkey - set key for cipher handle
458  * @tfm: cipher handle
459  * @key: buffer holding the key
460  * @keylen: length of the key in bytes
461  *
462  * The caller provided key is set for the ahash cipher. The cipher
463  * handle must point to a keyed hash in order for this function to succeed.
464  *
465  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
466  */
467 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
468 			unsigned int keylen);
469 
470 /**
471  * crypto_ahash_finup() - update and finalize message digest
472  * @req: reference to the ahash_request handle that holds all information
473  *	 needed to perform the cipher operation
474  *
475  * This function is a "short-hand" for the function calls of
476  * crypto_ahash_update and crypto_ahash_final. The parameters have the same
477  * meaning as discussed for those separate functions.
478  *
479  * Return: see crypto_ahash_final()
480  */
481 int crypto_ahash_finup(struct ahash_request *req);
482 
483 /**
484  * crypto_ahash_final() - calculate message digest
485  * @req: reference to the ahash_request handle that holds all information
486  *	 needed to perform the cipher operation
487  *
488  * Finalize the message digest operation and create the message digest
489  * based on all data added to the cipher handle. The message digest is placed
490  * into the output buffer registered with the ahash_request handle.
491  *
492  * Return:
493  * 0		if the message digest was successfully calculated;
494  * -EINPROGRESS	if data is fed into hardware (DMA) or queued for later;
495  * -EBUSY	if queue is full and request should be resubmitted later;
496  * other < 0	if an error occurred
497  */
crypto_ahash_final(struct ahash_request * req)498 static inline int crypto_ahash_final(struct ahash_request *req)
499 {
500 	req->nbytes = 0;
501 	return crypto_ahash_finup(req);
502 }
503 
504 /**
505  * crypto_ahash_digest() - calculate message digest for a buffer
506  * @req: reference to the ahash_request handle that holds all information
507  *	 needed to perform the cipher operation
508  *
509  * This function is a "short-hand" for the function calls of crypto_ahash_init,
510  * crypto_ahash_update and crypto_ahash_final. The parameters have the same
511  * meaning as discussed for those separate three functions.
512  *
513  * Return: see crypto_ahash_final()
514  */
515 int crypto_ahash_digest(struct ahash_request *req);
516 
517 /**
518  * crypto_ahash_export() - extract current message digest state
519  * @req: reference to the ahash_request handle whose state is exported
520  * @out: output buffer of sufficient size that can hold the hash state
521  *
522  * This function exports the hash state of the ahash_request handle into the
523  * caller-allocated output buffer out which must have sufficient size (e.g. by
524  * calling crypto_ahash_statesize()).
525  *
526  * Return: 0 if the export was successful; < 0 if an error occurred
527  */
528 int crypto_ahash_export(struct ahash_request *req, void *out);
529 
530 /**
531  * crypto_ahash_import() - import message digest state
532  * @req: reference to ahash_request handle the state is imported into
533  * @in: buffer holding the state
534  *
535  * This function imports the hash state into the ahash_request handle from the
536  * input buffer. That buffer should have been generated with the
537  * crypto_ahash_export function.
538  *
539  * Return: 0 if the import was successful; < 0 if an error occurred
540  */
541 int crypto_ahash_import(struct ahash_request *req, const void *in);
542 
543 /**
544  * crypto_ahash_init() - (re)initialize message digest handle
545  * @req: ahash_request handle that already is initialized with all necessary
546  *	 data using the ahash_request_* API functions
547  *
548  * The call (re-)initializes the message digest referenced by the ahash_request
549  * handle. Any potentially existing state created by previous operations is
550  * discarded.
551  *
552  * Return: see crypto_ahash_final()
553  */
554 int crypto_ahash_init(struct ahash_request *req);
555 
556 /**
557  * crypto_ahash_update() - add data to message digest for processing
558  * @req: ahash_request handle that was previously initialized with the
559  *	 crypto_ahash_init call.
560  *
561  * Updates the message digest state of the &ahash_request handle. The input data
562  * is pointed to by the scatter/gather list registered in the &ahash_request
563  * handle
564  *
565  * Return: see crypto_ahash_final()
566  */
567 int crypto_ahash_update(struct ahash_request *req);
568 
569 /**
570  * DOC: Asynchronous Hash Request Handle
571  *
572  * The &ahash_request data structure contains all pointers to data
573  * required for the asynchronous cipher operation. This includes the cipher
574  * handle (which can be used by multiple &ahash_request instances), pointer
575  * to plaintext and the message digest output buffer, asynchronous callback
576  * function, etc. It acts as a handle to the ahash_request_* API calls in a
577  * similar way as ahash handle to the crypto_ahash_* API calls.
578  */
579 
580 /**
581  * ahash_request_set_tfm() - update cipher handle reference in request
582  * @req: request handle to be modified
583  * @tfm: cipher handle that shall be added to the request handle
584  *
585  * Allow the caller to replace the existing ahash handle in the request
586  * data structure with a different one.
587  */
ahash_request_set_tfm(struct ahash_request * req,struct crypto_ahash * tfm)588 static inline void ahash_request_set_tfm(struct ahash_request *req,
589 					 struct crypto_ahash *tfm)
590 {
591 	crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
592 }
593 
594 /**
595  * ahash_request_alloc() - allocate request data structure
596  * @tfm: cipher handle to be registered with the request
597  * @gfp: memory allocation flag that is handed to kmalloc by the API call.
598  *
599  * Allocate the request data structure that must be used with the ahash
600  * message digest API calls. During
601  * the allocation, the provided ahash handle
602  * is registered in the request data structure.
603  *
604  * Return: allocated request handle in case of success, or NULL if out of memory
605  */
ahash_request_alloc_noprof(struct crypto_ahash * tfm,gfp_t gfp)606 static inline struct ahash_request *ahash_request_alloc_noprof(
607 	struct crypto_ahash *tfm, gfp_t gfp)
608 {
609 	struct ahash_request *req;
610 
611 	req = kmalloc_noprof(sizeof(struct ahash_request) +
612 			     crypto_ahash_reqsize(tfm), gfp);
613 
614 	if (likely(req))
615 		ahash_request_set_tfm(req, tfm);
616 
617 	return req;
618 }
619 #define ahash_request_alloc(...)	alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
620 
621 /**
622  * ahash_request_free() - zeroize and free the request data structure
623  * @req: request data structure cipher handle to be freed
624  */
625 void ahash_request_free(struct ahash_request *req);
626 
ahash_request_zero(struct ahash_request * req)627 static inline void ahash_request_zero(struct ahash_request *req)
628 {
629 	memzero_explicit(req, sizeof(*req) +
630 			      crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
631 }
632 
ahash_request_cast(struct crypto_async_request * req)633 static inline struct ahash_request *ahash_request_cast(
634 	struct crypto_async_request *req)
635 {
636 	return container_of(req, struct ahash_request, base);
637 }
638 
639 /**
640  * ahash_request_set_callback() - set asynchronous callback function
641  * @req: request handle
642  * @flags: specify zero or an ORing of the flags
643  *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
644  *	   increase the wait queue beyond the initial maximum size;
645  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
646  * @compl: callback function pointer to be registered with the request handle
647  * @data: The data pointer refers to memory that is not used by the kernel
648  *	  crypto API, but provided to the callback function for it to use. Here,
649  *	  the caller can provide a reference to memory the callback function can
650  *	  operate on. As the callback function is invoked asynchronously to the
651  *	  related functionality, it may need to access data structures of the
652  *	  related functionality which can be referenced using this pointer. The
653  *	  callback function can access the memory via the "data" field in the
654  *	  &crypto_async_request data structure provided to the callback function.
655  *
656  * This function allows setting the callback function that is triggered once
657  * the cipher operation completes.
658  *
659  * The callback function is registered with the &ahash_request handle and
660  * must comply with the following template::
661  *
662  *	void callback_function(struct crypto_async_request *req, int error)
663  */
ahash_request_set_callback(struct ahash_request * req,u32 flags,crypto_completion_t compl,void * data)664 static inline void ahash_request_set_callback(struct ahash_request *req,
665 					      u32 flags,
666 					      crypto_completion_t compl,
667 					      void *data)
668 {
669 	flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
670 	flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
671 	crypto_request_set_callback(&req->base, flags, compl, data);
672 }
673 
674 /**
675  * ahash_request_set_crypt() - set data buffers
676  * @req: ahash_request handle to be updated
677  * @src: source scatter/gather list
678  * @result: buffer that is filled with the message digest -- the caller must
679  *	    ensure that the buffer has sufficient space by, for example, calling
680  *	    crypto_ahash_digestsize()
681  * @nbytes: number of bytes to process from the source scatter/gather list
682  *
683  * By using this call, the caller references the source scatter/gather list.
684  * The source scatter/gather list points to the data the message digest is to
685  * be calculated for.
686  */
ahash_request_set_crypt(struct ahash_request * req,struct scatterlist * src,u8 * result,unsigned int nbytes)687 static inline void ahash_request_set_crypt(struct ahash_request *req,
688 					   struct scatterlist *src, u8 *result,
689 					   unsigned int nbytes)
690 {
691 	req->src = src;
692 	req->nbytes = nbytes;
693 	req->result = result;
694 	req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
695 }
696 
697 /**
698  * ahash_request_set_virt() - set virtual address data buffers
699  * @req: ahash_request handle to be updated
700  * @src: source virtual address
701  * @result: buffer that is filled with the message digest -- the caller must
702  *	    ensure that the buffer has sufficient space by, for example, calling
703  *	    crypto_ahash_digestsize()
704  * @nbytes: number of bytes to process from the source virtual address
705  *
706  * By using this call, the caller references the source virtual address.
707  * The source virtual address points to the data the message digest is to
708  * be calculated for.
709  */
ahash_request_set_virt(struct ahash_request * req,const u8 * src,u8 * result,unsigned int nbytes)710 static inline void ahash_request_set_virt(struct ahash_request *req,
711 					  const u8 *src, u8 *result,
712 					  unsigned int nbytes)
713 {
714 	req->svirt = src;
715 	req->nbytes = nbytes;
716 	req->result = result;
717 	req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
718 }
719 
720 /**
721  * DOC: Synchronous Message Digest API
722  *
723  * The synchronous message digest API is used with the ciphers of type
724  * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
725  *
726  * The message digest API is able to maintain state information for the
727  * caller.
728  *
729  * The synchronous message digest API can store user-related context in its
730  * shash_desc request data structure.
731  */
732 
733 /**
734  * crypto_alloc_shash() - allocate message digest handle
735  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
736  *	      message digest cipher
737  * @type: specifies the type of the cipher
738  * @mask: specifies the mask for the cipher
739  *
740  * Allocate a cipher handle for a message digest. The returned &struct
741  * crypto_shash is the cipher handle that is required for any subsequent
742  * API invocation for that message digest.
743  *
744  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
745  *	   of an error, PTR_ERR() returns the error code.
746  */
747 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
748 					u32 mask);
749 
750 struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
751 
752 int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
753 
crypto_shash_tfm(struct crypto_shash * tfm)754 static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
755 {
756 	return &tfm->base;
757 }
758 
759 /**
760  * crypto_free_shash() - zeroize and free the message digest handle
761  * @tfm: cipher handle to be freed
762  *
763  * If @tfm is a NULL or error pointer, this function does nothing.
764  */
crypto_free_shash(struct crypto_shash * tfm)765 static inline void crypto_free_shash(struct crypto_shash *tfm)
766 {
767 	crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
768 }
769 
crypto_shash_alg_name(struct crypto_shash * tfm)770 static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
771 {
772 	return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
773 }
774 
crypto_shash_driver_name(struct crypto_shash * tfm)775 static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
776 {
777 	return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
778 }
779 
780 /**
781  * crypto_shash_blocksize() - obtain block size for cipher
782  * @tfm: cipher handle
783  *
784  * The block size for the message digest cipher referenced with the cipher
785  * handle is returned.
786  *
787  * Return: block size of cipher
788  */
crypto_shash_blocksize(struct crypto_shash * tfm)789 static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
790 {
791 	return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
792 }
793 
__crypto_shash_alg(struct crypto_alg * alg)794 static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
795 {
796 	return container_of(alg, struct shash_alg, base);
797 }
798 
crypto_shash_alg(struct crypto_shash * tfm)799 static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
800 {
801 	return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
802 }
803 
804 /**
805  * crypto_shash_digestsize() - obtain message digest size
806  * @tfm: cipher handle
807  *
808  * The size for the message digest created by the message digest cipher
809  * referenced with the cipher handle is returned.
810  *
811  * Return: digest size of cipher
812  */
crypto_shash_digestsize(struct crypto_shash * tfm)813 static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
814 {
815 	return crypto_shash_alg(tfm)->digestsize;
816 }
817 
crypto_shash_statesize(struct crypto_shash * tfm)818 static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
819 {
820 	return crypto_shash_alg(tfm)->statesize;
821 }
822 
crypto_shash_get_flags(struct crypto_shash * tfm)823 static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
824 {
825 	return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
826 }
827 
crypto_shash_set_flags(struct crypto_shash * tfm,u32 flags)828 static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
829 {
830 	crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
831 }
832 
crypto_shash_clear_flags(struct crypto_shash * tfm,u32 flags)833 static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
834 {
835 	crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
836 }
837 
838 /**
839  * crypto_shash_descsize() - obtain the operational state size
840  * @tfm: cipher handle
841  *
842  * The size of the operational state the cipher needs during operation is
843  * returned for the hash referenced with the cipher handle. This size is
844  * required to calculate the memory requirements to allow the caller allocating
845  * sufficient memory for operational state.
846  *
847  * The operational state is defined with struct shash_desc where the size of
848  * that data structure is to be calculated as
849  * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
850  *
851  * Return: size of the operational state
852  */
crypto_shash_descsize(struct crypto_shash * tfm)853 static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
854 {
855 	return crypto_shash_alg(tfm)->descsize;
856 }
857 
shash_desc_ctx(struct shash_desc * desc)858 static inline void *shash_desc_ctx(struct shash_desc *desc)
859 {
860 	return desc->__ctx;
861 }
862 
863 /**
864  * crypto_shash_setkey() - set key for message digest
865  * @tfm: cipher handle
866  * @key: buffer holding the key
867  * @keylen: length of the key in bytes
868  *
869  * The caller provided key is set for the keyed message digest cipher. The
870  * cipher handle must point to a keyed message digest cipher in order for this
871  * function to succeed.
872  *
873  * Context: Softirq or process context.
874  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
875  */
876 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
877 			unsigned int keylen);
878 
879 /**
880  * crypto_shash_digest() - calculate message digest for buffer
881  * @desc: see crypto_shash_final()
882  * @data: see crypto_shash_update()
883  * @len: see crypto_shash_update()
884  * @out: see crypto_shash_final()
885  *
886  * This function is a "short-hand" for the function calls of crypto_shash_init,
887  * crypto_shash_update and crypto_shash_final. The parameters have the same
888  * meaning as discussed for those separate three functions.
889  *
890  * Context: Softirq or process context.
891  * Return: 0 if the message digest creation was successful; < 0 if an error
892  *	   occurred
893  */
894 int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
895 			unsigned int len, u8 *out);
896 
897 /**
898  * crypto_shash_tfm_digest() - calculate message digest for buffer
899  * @tfm: hash transformation object
900  * @data: see crypto_shash_update()
901  * @len: see crypto_shash_update()
902  * @out: see crypto_shash_final()
903  *
904  * This is a simplified version of crypto_shash_digest() for users who don't
905  * want to allocate their own hash descriptor (shash_desc).  Instead,
906  * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash)
907  * directly, and it allocates a hash descriptor on the stack internally.
908  * Note that this stack allocation may be fairly large.
909  *
910  * Context: Softirq or process context.
911  * Return: 0 on success; < 0 if an error occurred.
912  */
913 int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
914 			    unsigned int len, u8 *out);
915 
916 int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
917 		       unsigned int len, u8 *out);
918 
919 /**
920  * crypto_shash_export() - extract operational state for message digest
921  * @desc: reference to the operational state handle whose state is exported
922  * @out: output buffer of sufficient size that can hold the hash state
923  *
924  * This function exports the hash state of the operational state handle into the
925  * caller-allocated output buffer out which must have sufficient size (e.g. by
926  * calling crypto_shash_descsize).
927  *
928  * Context: Softirq or process context.
929  * Return: 0 if the export creation was successful; < 0 if an error occurred
930  */
931 int crypto_shash_export(struct shash_desc *desc, void *out);
932 
933 /**
934  * crypto_shash_import() - import operational state
935  * @desc: reference to the operational state handle the state imported into
936  * @in: buffer holding the state
937  *
938  * This function imports the hash state into the operational state handle from
939  * the input buffer. That buffer should have been generated with the
940  * crypto_ahash_export function.
941  *
942  * Context: Softirq or process context.
943  * Return: 0 if the import was successful; < 0 if an error occurred
944  */
945 int crypto_shash_import(struct shash_desc *desc, const void *in);
946 
947 /**
948  * crypto_shash_init() - (re)initialize message digest
949  * @desc: operational state handle that is already filled
950  *
951  * The call (re-)initializes the message digest referenced by the
952  * operational state handle. Any potentially existing state created by
953  * previous operations is discarded.
954  *
955  * Context: Softirq or process context.
956  * Return: 0 if the message digest initialization was successful; < 0 if an
957  *	   error occurred
958  */
959 int crypto_shash_init(struct shash_desc *desc);
960 
961 /**
962  * crypto_shash_finup() - calculate message digest of buffer
963  * @desc: see crypto_shash_final()
964  * @data: see crypto_shash_update()
965  * @len: see crypto_shash_update()
966  * @out: see crypto_shash_final()
967  *
968  * This function is a "short-hand" for the function calls of
969  * crypto_shash_update and crypto_shash_final. The parameters have the same
970  * meaning as discussed for those separate functions.
971  *
972  * Context: Softirq or process context.
973  * Return: 0 if the message digest creation was successful; < 0 if an error
974  *	   occurred
975  */
976 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
977 		       unsigned int len, u8 *out);
978 
979 /**
980  * crypto_shash_update() - add data to message digest for processing
981  * @desc: operational state handle that is already initialized
982  * @data: input data to be added to the message digest
983  * @len: length of the input data
984  *
985  * Updates the message digest state of the operational state handle.
986  *
987  * Context: Softirq or process context.
988  * Return: 0 if the message digest update was successful; < 0 if an error
989  *	   occurred
990  */
crypto_shash_update(struct shash_desc * desc,const u8 * data,unsigned int len)991 static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
992 				      unsigned int len)
993 {
994 	return crypto_shash_finup(desc, data, len, NULL);
995 }
996 
997 /**
998  * crypto_shash_final() - calculate message digest
999  * @desc: operational state handle that is already filled with data
1000  * @out: output buffer filled with the message digest
1001  *
1002  * Finalize the message digest operation and create the message digest
1003  * based on all data added to the cipher handle. The message digest is placed
1004  * into the output buffer. The caller must ensure that the output buffer is
1005  * large enough by using crypto_shash_digestsize.
1006  *
1007  * Context: Softirq or process context.
1008  * Return: 0 if the message digest creation was successful; < 0 if an error
1009  *	   occurred
1010  */
crypto_shash_final(struct shash_desc * desc,u8 * out)1011 static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
1012 {
1013 	return crypto_shash_finup(desc, NULL, 0, out);
1014 }
1015 
shash_desc_zero(struct shash_desc * desc)1016 static inline void shash_desc_zero(struct shash_desc *desc)
1017 {
1018 	memzero_explicit(desc,
1019 			 sizeof(*desc) + crypto_shash_descsize(desc->tfm));
1020 }
1021 
ahash_is_async(struct crypto_ahash * tfm)1022 static inline bool ahash_is_async(struct crypto_ahash *tfm)
1023 {
1024 	return crypto_tfm_is_async(&tfm->base);
1025 }
1026 
ahash_request_on_stack_init(char * buf,struct crypto_ahash * tfm)1027 static inline struct ahash_request *ahash_request_on_stack_init(
1028 	char *buf, struct crypto_ahash *tfm)
1029 {
1030 	struct ahash_request *req = (void *)buf;
1031 
1032 	crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
1033 	return req;
1034 }
1035 
ahash_request_clone(struct ahash_request * req,size_t total,gfp_t gfp)1036 static inline struct ahash_request *ahash_request_clone(
1037 	struct ahash_request *req, size_t total, gfp_t gfp)
1038 {
1039 	return container_of(crypto_request_clone(&req->base, total, gfp),
1040 			    struct ahash_request, base);
1041 }
1042 
1043 #endif	/* _CRYPTO_HASH_H */
1044