xref: /linux/include/crypto/internal/aead.h (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * AEAD: Authenticated Encryption with Associated Data
4  *
5  * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 
8 #ifndef _CRYPTO_INTERNAL_AEAD_H
9 #define _CRYPTO_INTERNAL_AEAD_H
10 
11 #include <crypto/aead.h>
12 #include <crypto/algapi.h>
13 #include <linux/stddef.h>
14 #include <linux/types.h>
15 
16 struct rtattr;
17 
18 struct aead_instance {
19 	void (*free)(struct aead_instance *inst);
20 	union {
21 		struct {
22 			char head[offsetof(struct aead_alg, base)];
23 			struct crypto_instance base;
24 		} s;
25 		struct aead_alg alg;
26 	};
27 };
28 
29 struct crypto_aead_spawn {
30 	struct crypto_spawn base;
31 };
32 
33 struct aead_queue {
34 	struct crypto_queue base;
35 };
36 
37 static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
38 {
39 	return crypto_tfm_ctx(&tfm->base);
40 }
41 
42 static inline struct crypto_instance *aead_crypto_instance(
43 	struct aead_instance *inst)
44 {
45 	return container_of(&inst->alg.base, struct crypto_instance, alg);
46 }
47 
48 static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
49 {
50 	return container_of(&inst->alg, struct aead_instance, alg.base);
51 }
52 
53 static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
54 {
55 	return aead_instance(crypto_tfm_alg_instance(&aead->base));
56 }
57 
58 static inline void *aead_instance_ctx(struct aead_instance *inst)
59 {
60 	return crypto_instance_ctx(aead_crypto_instance(inst));
61 }
62 
63 static inline void *aead_request_ctx(struct aead_request *req)
64 {
65 	return req->__ctx;
66 }
67 
68 static inline void aead_request_complete(struct aead_request *req, int err)
69 {
70 	req->base.complete(&req->base, err);
71 }
72 
73 static inline u32 aead_request_flags(struct aead_request *req)
74 {
75 	return req->base.flags;
76 }
77 
78 static inline struct aead_request *aead_request_cast(
79 	struct crypto_async_request *req)
80 {
81 	return container_of(req, struct aead_request, base);
82 }
83 
84 int crypto_grab_aead(struct crypto_aead_spawn *spawn,
85 		     struct crypto_instance *inst,
86 		     const char *name, u32 type, u32 mask);
87 
88 static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
89 {
90 	crypto_drop_spawn(&spawn->base);
91 }
92 
93 static inline struct aead_alg *crypto_spawn_aead_alg(
94 	struct crypto_aead_spawn *spawn)
95 {
96 	return container_of(spawn->base.alg, struct aead_alg, base);
97 }
98 
99 static inline struct crypto_aead *crypto_spawn_aead(
100 	struct crypto_aead_spawn *spawn)
101 {
102 	return crypto_spawn_tfm2(&spawn->base);
103 }
104 
105 static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
106 					   unsigned int reqsize)
107 {
108 	aead->reqsize = reqsize;
109 }
110 
111 static inline void aead_init_queue(struct aead_queue *queue,
112 				   unsigned int max_qlen)
113 {
114 	crypto_init_queue(&queue->base, max_qlen);
115 }
116 
117 static inline int aead_enqueue_request(struct aead_queue *queue,
118 				       struct aead_request *request)
119 {
120 	return crypto_enqueue_request(&queue->base, &request->base);
121 }
122 
123 static inline struct aead_request *aead_dequeue_request(
124 	struct aead_queue *queue)
125 {
126 	struct crypto_async_request *req;
127 
128 	req = crypto_dequeue_request(&queue->base);
129 
130 	return req ? container_of(req, struct aead_request, base) : NULL;
131 }
132 
133 static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
134 {
135 	struct crypto_async_request *req;
136 
137 	req = crypto_get_backlog(&queue->base);
138 
139 	return req ? container_of(req, struct aead_request, base) : NULL;
140 }
141 
142 static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
143 {
144 	return alg->chunksize;
145 }
146 
147 /**
148  * crypto_aead_chunksize() - obtain chunk size
149  * @tfm: cipher handle
150  *
151  * The block size is set to one for ciphers such as CCM.  However,
152  * you still need to provide incremental updates in multiples of
153  * the underlying block size as the IV does not have sub-block
154  * granularity.  This is known in this API as the chunk size.
155  *
156  * Return: chunk size in bytes
157  */
158 static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm)
159 {
160 	return crypto_aead_alg_chunksize(crypto_aead_alg(tfm));
161 }
162 
163 int crypto_register_aead(struct aead_alg *alg);
164 void crypto_unregister_aead(struct aead_alg *alg);
165 int crypto_register_aeads(struct aead_alg *algs, int count);
166 void crypto_unregister_aeads(struct aead_alg *algs, int count);
167 int aead_register_instance(struct crypto_template *tmpl,
168 			   struct aead_instance *inst);
169 
170 #endif	/* _CRYPTO_INTERNAL_AEAD_H */
171 
172