12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
27a7ffe65SHerbert Xu /*
37a7ffe65SHerbert Xu * Symmetric key cipher operations.
47a7ffe65SHerbert Xu *
57a7ffe65SHerbert Xu * Generic encrypt/decrypt wrapper for ciphers, handles operations across
67a7ffe65SHerbert Xu * multiple page boundaries by using temporary blocks. In user context,
77a7ffe65SHerbert Xu * the kernel is given a chance to schedule us once per page.
87a7ffe65SHerbert Xu *
97a7ffe65SHerbert Xu * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
107a7ffe65SHerbert Xu */
117a7ffe65SHerbert Xu
12b286d8b1SHerbert Xu #include <crypto/internal/aead.h>
130eb76ba2SArd Biesheuvel #include <crypto/internal/cipher.h>
147a7ffe65SHerbert Xu #include <crypto/internal/skcipher.h>
15b286d8b1SHerbert Xu #include <crypto/scatterwalk.h>
167a7ffe65SHerbert Xu #include <linux/bug.h>
174e6c3df4SHerbert Xu #include <linux/cryptouser.h>
181085680bSHerbert Xu #include <linux/err.h>
191085680bSHerbert Xu #include <linux/kernel.h>
20b286d8b1SHerbert Xu #include <linux/list.h>
211085680bSHerbert Xu #include <linux/mm.h>
227a7ffe65SHerbert Xu #include <linux/module.h>
234e6c3df4SHerbert Xu #include <linux/seq_file.h>
241085680bSHerbert Xu #include <linux/slab.h>
251085680bSHerbert Xu #include <linux/string.h>
264e6c3df4SHerbert Xu #include <net/netlink.h>
2731865c4cSHerbert Xu #include "skcipher.h"
287a7ffe65SHerbert Xu
2931865c4cSHerbert Xu #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
307a7ffe65SHerbert Xu
31b286d8b1SHerbert Xu enum {
32b286d8b1SHerbert Xu SKCIPHER_WALK_PHYS = 1 << 0,
33b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW = 1 << 1,
34b286d8b1SHerbert Xu SKCIPHER_WALK_COPY = 1 << 2,
35b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF = 1 << 3,
36b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP = 1 << 4,
37b286d8b1SHerbert Xu };
38b286d8b1SHerbert Xu
39b286d8b1SHerbert Xu struct skcipher_walk_buffer {
40b286d8b1SHerbert Xu struct list_head entry;
41b286d8b1SHerbert Xu struct scatter_walk dst;
42b286d8b1SHerbert Xu unsigned int len;
43b286d8b1SHerbert Xu u8 *data;
44b286d8b1SHerbert Xu u8 buffer[];
45b286d8b1SHerbert Xu };
46b286d8b1SHerbert Xu
4731865c4cSHerbert Xu static const struct crypto_type crypto_skcipher_type;
4831865c4cSHerbert Xu
49b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk);
50b286d8b1SHerbert Xu
skcipher_map_src(struct skcipher_walk * walk)51b286d8b1SHerbert Xu static inline void skcipher_map_src(struct skcipher_walk *walk)
52b286d8b1SHerbert Xu {
53d07bd950SArd Biesheuvel walk->src.virt.addr = scatterwalk_map(&walk->in);
54b286d8b1SHerbert Xu }
55b286d8b1SHerbert Xu
skcipher_map_dst(struct skcipher_walk * walk)56b286d8b1SHerbert Xu static inline void skcipher_map_dst(struct skcipher_walk *walk)
57b286d8b1SHerbert Xu {
58d07bd950SArd Biesheuvel walk->dst.virt.addr = scatterwalk_map(&walk->out);
59b286d8b1SHerbert Xu }
60b286d8b1SHerbert Xu
skcipher_unmap_src(struct skcipher_walk * walk)61b286d8b1SHerbert Xu static inline void skcipher_unmap_src(struct skcipher_walk *walk)
62b286d8b1SHerbert Xu {
63d07bd950SArd Biesheuvel scatterwalk_unmap(walk->src.virt.addr);
64b286d8b1SHerbert Xu }
65b286d8b1SHerbert Xu
skcipher_unmap_dst(struct skcipher_walk * walk)66b286d8b1SHerbert Xu static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
67b286d8b1SHerbert Xu {
68d07bd950SArd Biesheuvel scatterwalk_unmap(walk->dst.virt.addr);
69b286d8b1SHerbert Xu }
70b286d8b1SHerbert Xu
skcipher_walk_gfp(struct skcipher_walk * walk)71b286d8b1SHerbert Xu static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
72b286d8b1SHerbert Xu {
73b286d8b1SHerbert Xu return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
74b286d8b1SHerbert Xu }
75b286d8b1SHerbert Xu
76b286d8b1SHerbert Xu /* Get a spot of the specified length that does not straddle a page.
77b286d8b1SHerbert Xu * The caller needs to ensure that there is enough space for this operation.
78b286d8b1SHerbert Xu */
skcipher_get_spot(u8 * start,unsigned int len)79b286d8b1SHerbert Xu static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
80b286d8b1SHerbert Xu {
81b286d8b1SHerbert Xu u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
82b286d8b1SHerbert Xu
83b286d8b1SHerbert Xu return max(start, end_page);
84b286d8b1SHerbert Xu }
85b286d8b1SHerbert Xu
__crypto_skcipher_alg(struct crypto_alg * alg)861085680bSHerbert Xu static inline struct skcipher_alg *__crypto_skcipher_alg(
871085680bSHerbert Xu struct crypto_alg *alg)
881085680bSHerbert Xu {
891085680bSHerbert Xu return container_of(alg, struct skcipher_alg, base);
901085680bSHerbert Xu }
911085680bSHerbert Xu
skcipher_done_slow(struct skcipher_walk * walk,unsigned int bsize)920ba3c026SHerbert Xu static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
93b286d8b1SHerbert Xu {
94b286d8b1SHerbert Xu u8 *addr;
95b286d8b1SHerbert Xu
96b286d8b1SHerbert Xu addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
97b286d8b1SHerbert Xu addr = skcipher_get_spot(addr, bsize);
98b286d8b1SHerbert Xu scatterwalk_copychunks(addr, &walk->out, bsize,
99b286d8b1SHerbert Xu (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
1000ba3c026SHerbert Xu return 0;
101b286d8b1SHerbert Xu }
102b286d8b1SHerbert Xu
skcipher_walk_done(struct skcipher_walk * walk,int err)103b286d8b1SHerbert Xu int skcipher_walk_done(struct skcipher_walk *walk, int err)
104b286d8b1SHerbert Xu {
1050ba3c026SHerbert Xu unsigned int n = walk->nbytes;
1060ba3c026SHerbert Xu unsigned int nbytes = 0;
107b286d8b1SHerbert Xu
1080ba3c026SHerbert Xu if (!n)
1098088d3ddSEric Biggers goto finish;
110b286d8b1SHerbert Xu
1110ba3c026SHerbert Xu if (likely(err >= 0)) {
1120ba3c026SHerbert Xu n -= err;
1130ba3c026SHerbert Xu nbytes = walk->total - n;
1140ba3c026SHerbert Xu }
1158088d3ddSEric Biggers
1168088d3ddSEric Biggers if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
117b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW |
118b286d8b1SHerbert Xu SKCIPHER_WALK_COPY |
119b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF)))) {
120b286d8b1SHerbert Xu unmap_src:
121b286d8b1SHerbert Xu skcipher_unmap_src(walk);
122b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_DIFF) {
123b286d8b1SHerbert Xu skcipher_unmap_dst(walk);
124b286d8b1SHerbert Xu goto unmap_src;
125b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_COPY) {
126b286d8b1SHerbert Xu skcipher_map_dst(walk);
127b286d8b1SHerbert Xu memcpy(walk->dst.virt.addr, walk->page, n);
128b286d8b1SHerbert Xu skcipher_unmap_dst(walk);
129b286d8b1SHerbert Xu } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
1300ba3c026SHerbert Xu if (err > 0) {
131dcaca01aSEric Biggers /*
132dcaca01aSEric Biggers * Didn't process all bytes. Either the algorithm is
133dcaca01aSEric Biggers * broken, or this was the last step and it turned out
134dcaca01aSEric Biggers * the message wasn't evenly divisible into blocks but
135dcaca01aSEric Biggers * the algorithm requires it.
136dcaca01aSEric Biggers */
137b286d8b1SHerbert Xu err = -EINVAL;
1380ba3c026SHerbert Xu nbytes = 0;
1390ba3c026SHerbert Xu } else
1400ba3c026SHerbert Xu n = skcipher_done_slow(walk, n);
141b286d8b1SHerbert Xu }
1420ba3c026SHerbert Xu
1430ba3c026SHerbert Xu if (err > 0)
1440ba3c026SHerbert Xu err = 0;
1450ba3c026SHerbert Xu
1460ba3c026SHerbert Xu walk->total = nbytes;
1470ba3c026SHerbert Xu walk->nbytes = 0;
148b286d8b1SHerbert Xu
149b286d8b1SHerbert Xu scatterwalk_advance(&walk->in, n);
150b286d8b1SHerbert Xu scatterwalk_advance(&walk->out, n);
1510ba3c026SHerbert Xu scatterwalk_done(&walk->in, 0, nbytes);
1520ba3c026SHerbert Xu scatterwalk_done(&walk->out, 1, nbytes);
153b286d8b1SHerbert Xu
1540ba3c026SHerbert Xu if (nbytes) {
155b286d8b1SHerbert Xu crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
156b286d8b1SHerbert Xu CRYPTO_TFM_REQ_MAY_SLEEP : 0);
157b286d8b1SHerbert Xu return skcipher_walk_next(walk);
158b286d8b1SHerbert Xu }
159b286d8b1SHerbert Xu
1600ba3c026SHerbert Xu finish:
161b286d8b1SHerbert Xu /* Short-circuit for the common/fast path. */
162b286d8b1SHerbert Xu if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
163b286d8b1SHerbert Xu goto out;
164b286d8b1SHerbert Xu
165b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
166b286d8b1SHerbert Xu goto out;
167b286d8b1SHerbert Xu
168b286d8b1SHerbert Xu if (walk->iv != walk->oiv)
169b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize);
170b286d8b1SHerbert Xu if (walk->buffer != walk->page)
171b286d8b1SHerbert Xu kfree(walk->buffer);
172b286d8b1SHerbert Xu if (walk->page)
173b286d8b1SHerbert Xu free_page((unsigned long)walk->page);
174b286d8b1SHerbert Xu
175b286d8b1SHerbert Xu out:
176b286d8b1SHerbert Xu return err;
177b286d8b1SHerbert Xu }
178b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_done);
179b286d8b1SHerbert Xu
skcipher_walk_complete(struct skcipher_walk * walk,int err)180b286d8b1SHerbert Xu void skcipher_walk_complete(struct skcipher_walk *walk, int err)
181b286d8b1SHerbert Xu {
182b286d8b1SHerbert Xu struct skcipher_walk_buffer *p, *tmp;
183b286d8b1SHerbert Xu
184b286d8b1SHerbert Xu list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
185b286d8b1SHerbert Xu u8 *data;
186b286d8b1SHerbert Xu
187b286d8b1SHerbert Xu if (err)
188b286d8b1SHerbert Xu goto done;
189b286d8b1SHerbert Xu
190b286d8b1SHerbert Xu data = p->data;
191b286d8b1SHerbert Xu if (!data) {
192b286d8b1SHerbert Xu data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
193c821f6abSArd Biesheuvel data = skcipher_get_spot(data, walk->stride);
194b286d8b1SHerbert Xu }
195b286d8b1SHerbert Xu
196b286d8b1SHerbert Xu scatterwalk_copychunks(data, &p->dst, p->len, 1);
197b286d8b1SHerbert Xu
198c821f6abSArd Biesheuvel if (offset_in_page(p->data) + p->len + walk->stride >
199b286d8b1SHerbert Xu PAGE_SIZE)
200b286d8b1SHerbert Xu free_page((unsigned long)p->data);
201b286d8b1SHerbert Xu
202b286d8b1SHerbert Xu done:
203b286d8b1SHerbert Xu list_del(&p->entry);
204b286d8b1SHerbert Xu kfree(p);
205b286d8b1SHerbert Xu }
206b286d8b1SHerbert Xu
207b286d8b1SHerbert Xu if (!err && walk->iv != walk->oiv)
208b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize);
209b286d8b1SHerbert Xu if (walk->buffer != walk->page)
210b286d8b1SHerbert Xu kfree(walk->buffer);
211b286d8b1SHerbert Xu if (walk->page)
212b286d8b1SHerbert Xu free_page((unsigned long)walk->page);
213b286d8b1SHerbert Xu }
214b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_complete);
215b286d8b1SHerbert Xu
skcipher_queue_write(struct skcipher_walk * walk,struct skcipher_walk_buffer * p)216b286d8b1SHerbert Xu static void skcipher_queue_write(struct skcipher_walk *walk,
217b286d8b1SHerbert Xu struct skcipher_walk_buffer *p)
218b286d8b1SHerbert Xu {
219b286d8b1SHerbert Xu p->dst = walk->out;
220b286d8b1SHerbert Xu list_add_tail(&p->entry, &walk->buffers);
221b286d8b1SHerbert Xu }
222b286d8b1SHerbert Xu
skcipher_next_slow(struct skcipher_walk * walk,unsigned int bsize)223b286d8b1SHerbert Xu static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
224b286d8b1SHerbert Xu {
225b286d8b1SHerbert Xu bool phys = walk->flags & SKCIPHER_WALK_PHYS;
226b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask;
227b286d8b1SHerbert Xu struct skcipher_walk_buffer *p;
228b286d8b1SHerbert Xu unsigned a;
229b286d8b1SHerbert Xu unsigned n;
230b286d8b1SHerbert Xu u8 *buffer;
231b286d8b1SHerbert Xu void *v;
232b286d8b1SHerbert Xu
233b286d8b1SHerbert Xu if (!phys) {
23418e615adSArd Biesheuvel if (!walk->buffer)
23518e615adSArd Biesheuvel walk->buffer = walk->page;
23618e615adSArd Biesheuvel buffer = walk->buffer;
237b286d8b1SHerbert Xu if (buffer)
238b286d8b1SHerbert Xu goto ok;
239b286d8b1SHerbert Xu }
240b286d8b1SHerbert Xu
241b286d8b1SHerbert Xu /* Start with the minimum alignment of kmalloc. */
242b286d8b1SHerbert Xu a = crypto_tfm_ctx_alignment() - 1;
243b286d8b1SHerbert Xu n = bsize;
244b286d8b1SHerbert Xu
245b286d8b1SHerbert Xu if (phys) {
246b286d8b1SHerbert Xu /* Calculate the minimum alignment of p->buffer. */
247b286d8b1SHerbert Xu a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
248b286d8b1SHerbert Xu n += sizeof(*p);
249b286d8b1SHerbert Xu }
250b286d8b1SHerbert Xu
251b286d8b1SHerbert Xu /* Minimum size to align p->buffer by alignmask. */
252b286d8b1SHerbert Xu n += alignmask & ~a;
253b286d8b1SHerbert Xu
254b286d8b1SHerbert Xu /* Minimum size to ensure p->buffer does not straddle a page. */
255b286d8b1SHerbert Xu n += (bsize - 1) & ~(alignmask | a);
256b286d8b1SHerbert Xu
257b286d8b1SHerbert Xu v = kzalloc(n, skcipher_walk_gfp(walk));
258b286d8b1SHerbert Xu if (!v)
259b286d8b1SHerbert Xu return skcipher_walk_done(walk, -ENOMEM);
260b286d8b1SHerbert Xu
261b286d8b1SHerbert Xu if (phys) {
262b286d8b1SHerbert Xu p = v;
263b286d8b1SHerbert Xu p->len = bsize;
264b286d8b1SHerbert Xu skcipher_queue_write(walk, p);
265b286d8b1SHerbert Xu buffer = p->buffer;
266b286d8b1SHerbert Xu } else {
267b286d8b1SHerbert Xu walk->buffer = v;
268b286d8b1SHerbert Xu buffer = v;
269b286d8b1SHerbert Xu }
270b286d8b1SHerbert Xu
271b286d8b1SHerbert Xu ok:
272b286d8b1SHerbert Xu walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
273b286d8b1SHerbert Xu walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
274b286d8b1SHerbert Xu walk->src.virt.addr = walk->dst.virt.addr;
275b286d8b1SHerbert Xu
276b286d8b1SHerbert Xu scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
277b286d8b1SHerbert Xu
278b286d8b1SHerbert Xu walk->nbytes = bsize;
279b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLOW;
280b286d8b1SHerbert Xu
281b286d8b1SHerbert Xu return 0;
282b286d8b1SHerbert Xu }
283b286d8b1SHerbert Xu
skcipher_next_copy(struct skcipher_walk * walk)284b286d8b1SHerbert Xu static int skcipher_next_copy(struct skcipher_walk *walk)
285b286d8b1SHerbert Xu {
286b286d8b1SHerbert Xu struct skcipher_walk_buffer *p;
287b286d8b1SHerbert Xu u8 *tmp = walk->page;
288b286d8b1SHerbert Xu
289b286d8b1SHerbert Xu skcipher_map_src(walk);
290b286d8b1SHerbert Xu memcpy(tmp, walk->src.virt.addr, walk->nbytes);
291b286d8b1SHerbert Xu skcipher_unmap_src(walk);
292b286d8b1SHerbert Xu
293b286d8b1SHerbert Xu walk->src.virt.addr = tmp;
294b286d8b1SHerbert Xu walk->dst.virt.addr = tmp;
295b286d8b1SHerbert Xu
296b286d8b1SHerbert Xu if (!(walk->flags & SKCIPHER_WALK_PHYS))
297b286d8b1SHerbert Xu return 0;
298b286d8b1SHerbert Xu
299b286d8b1SHerbert Xu p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
300b286d8b1SHerbert Xu if (!p)
301b286d8b1SHerbert Xu return -ENOMEM;
302b286d8b1SHerbert Xu
303b286d8b1SHerbert Xu p->data = walk->page;
304b286d8b1SHerbert Xu p->len = walk->nbytes;
305b286d8b1SHerbert Xu skcipher_queue_write(walk, p);
306b286d8b1SHerbert Xu
307c821f6abSArd Biesheuvel if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
308b286d8b1SHerbert Xu PAGE_SIZE)
309b286d8b1SHerbert Xu walk->page = NULL;
310b286d8b1SHerbert Xu else
311b286d8b1SHerbert Xu walk->page += walk->nbytes;
312b286d8b1SHerbert Xu
313b286d8b1SHerbert Xu return 0;
314b286d8b1SHerbert Xu }
315b286d8b1SHerbert Xu
skcipher_next_fast(struct skcipher_walk * walk)316b286d8b1SHerbert Xu static int skcipher_next_fast(struct skcipher_walk *walk)
317b286d8b1SHerbert Xu {
318b286d8b1SHerbert Xu unsigned long diff;
319b286d8b1SHerbert Xu
320b286d8b1SHerbert Xu walk->src.phys.page = scatterwalk_page(&walk->in);
321b286d8b1SHerbert Xu walk->src.phys.offset = offset_in_page(walk->in.offset);
322b286d8b1SHerbert Xu walk->dst.phys.page = scatterwalk_page(&walk->out);
323b286d8b1SHerbert Xu walk->dst.phys.offset = offset_in_page(walk->out.offset);
324b286d8b1SHerbert Xu
325b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
326b286d8b1SHerbert Xu return 0;
327b286d8b1SHerbert Xu
328b286d8b1SHerbert Xu diff = walk->src.phys.offset - walk->dst.phys.offset;
329b286d8b1SHerbert Xu diff |= walk->src.virt.page - walk->dst.virt.page;
330b286d8b1SHerbert Xu
331b286d8b1SHerbert Xu skcipher_map_src(walk);
332b286d8b1SHerbert Xu walk->dst.virt.addr = walk->src.virt.addr;
333b286d8b1SHerbert Xu
334b286d8b1SHerbert Xu if (diff) {
335b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_DIFF;
336b286d8b1SHerbert Xu skcipher_map_dst(walk);
337b286d8b1SHerbert Xu }
338b286d8b1SHerbert Xu
339b286d8b1SHerbert Xu return 0;
340b286d8b1SHerbert Xu }
341b286d8b1SHerbert Xu
skcipher_walk_next(struct skcipher_walk * walk)342b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk)
343b286d8b1SHerbert Xu {
344b286d8b1SHerbert Xu unsigned int bsize;
345b286d8b1SHerbert Xu unsigned int n;
346b286d8b1SHerbert Xu int err;
347b286d8b1SHerbert Xu
348b286d8b1SHerbert Xu walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
349b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF);
350b286d8b1SHerbert Xu
351b286d8b1SHerbert Xu n = walk->total;
352c821f6abSArd Biesheuvel bsize = min(walk->stride, max(n, walk->blocksize));
353b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->in, n);
354b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->out, n);
355b286d8b1SHerbert Xu
356b286d8b1SHerbert Xu if (unlikely(n < bsize)) {
357b286d8b1SHerbert Xu if (unlikely(walk->total < walk->blocksize))
358b286d8b1SHerbert Xu return skcipher_walk_done(walk, -EINVAL);
359b286d8b1SHerbert Xu
360b286d8b1SHerbert Xu slow_path:
361b286d8b1SHerbert Xu err = skcipher_next_slow(walk, bsize);
362b286d8b1SHerbert Xu goto set_phys_lowmem;
363b286d8b1SHerbert Xu }
364b286d8b1SHerbert Xu
365b286d8b1SHerbert Xu if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
366b286d8b1SHerbert Xu if (!walk->page) {
367b286d8b1SHerbert Xu gfp_t gfp = skcipher_walk_gfp(walk);
368b286d8b1SHerbert Xu
369b286d8b1SHerbert Xu walk->page = (void *)__get_free_page(gfp);
370b286d8b1SHerbert Xu if (!walk->page)
371b286d8b1SHerbert Xu goto slow_path;
372b286d8b1SHerbert Xu }
373b286d8b1SHerbert Xu
374b286d8b1SHerbert Xu walk->nbytes = min_t(unsigned, n,
375b286d8b1SHerbert Xu PAGE_SIZE - offset_in_page(walk->page));
376b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_COPY;
377b286d8b1SHerbert Xu err = skcipher_next_copy(walk);
378b286d8b1SHerbert Xu goto set_phys_lowmem;
379b286d8b1SHerbert Xu }
380b286d8b1SHerbert Xu
381b286d8b1SHerbert Xu walk->nbytes = n;
382b286d8b1SHerbert Xu
383b286d8b1SHerbert Xu return skcipher_next_fast(walk);
384b286d8b1SHerbert Xu
385b286d8b1SHerbert Xu set_phys_lowmem:
386b286d8b1SHerbert Xu if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
387b286d8b1SHerbert Xu walk->src.phys.page = virt_to_page(walk->src.virt.addr);
388b286d8b1SHerbert Xu walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
389b286d8b1SHerbert Xu walk->src.phys.offset &= PAGE_SIZE - 1;
390b286d8b1SHerbert Xu walk->dst.phys.offset &= PAGE_SIZE - 1;
391b286d8b1SHerbert Xu }
392b286d8b1SHerbert Xu return err;
393b286d8b1SHerbert Xu }
394b286d8b1SHerbert Xu
skcipher_copy_iv(struct skcipher_walk * walk)395b286d8b1SHerbert Xu static int skcipher_copy_iv(struct skcipher_walk *walk)
396b286d8b1SHerbert Xu {
397b286d8b1SHerbert Xu unsigned a = crypto_tfm_ctx_alignment() - 1;
398b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask;
399b286d8b1SHerbert Xu unsigned ivsize = walk->ivsize;
400c821f6abSArd Biesheuvel unsigned bs = walk->stride;
401b286d8b1SHerbert Xu unsigned aligned_bs;
402b286d8b1SHerbert Xu unsigned size;
403b286d8b1SHerbert Xu u8 *iv;
404b286d8b1SHerbert Xu
4050567fc9eSEric Biggers aligned_bs = ALIGN(bs, alignmask + 1);
406b286d8b1SHerbert Xu
407b286d8b1SHerbert Xu /* Minimum size to align buffer by alignmask. */
408b286d8b1SHerbert Xu size = alignmask & ~a;
409b286d8b1SHerbert Xu
410b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
411b286d8b1SHerbert Xu size += ivsize;
412b286d8b1SHerbert Xu else {
413b286d8b1SHerbert Xu size += aligned_bs + ivsize;
414b286d8b1SHerbert Xu
415b286d8b1SHerbert Xu /* Minimum size to ensure buffer does not straddle a page. */
416b286d8b1SHerbert Xu size += (bs - 1) & ~(alignmask | a);
417b286d8b1SHerbert Xu }
418b286d8b1SHerbert Xu
419b286d8b1SHerbert Xu walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
420b286d8b1SHerbert Xu if (!walk->buffer)
421b286d8b1SHerbert Xu return -ENOMEM;
422b286d8b1SHerbert Xu
423b286d8b1SHerbert Xu iv = PTR_ALIGN(walk->buffer, alignmask + 1);
424b286d8b1SHerbert Xu iv = skcipher_get_spot(iv, bs) + aligned_bs;
425b286d8b1SHerbert Xu
426b286d8b1SHerbert Xu walk->iv = memcpy(iv, walk->iv, walk->ivsize);
427b286d8b1SHerbert Xu return 0;
428b286d8b1SHerbert Xu }
429b286d8b1SHerbert Xu
skcipher_walk_first(struct skcipher_walk * walk)430b286d8b1SHerbert Xu static int skcipher_walk_first(struct skcipher_walk *walk)
431b286d8b1SHerbert Xu {
432abfc7fadSChangbin Du if (WARN_ON_ONCE(in_hardirq()))
433b286d8b1SHerbert Xu return -EDEADLK;
434b286d8b1SHerbert Xu
435b286d8b1SHerbert Xu walk->buffer = NULL;
436b286d8b1SHerbert Xu if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
437b286d8b1SHerbert Xu int err = skcipher_copy_iv(walk);
438b286d8b1SHerbert Xu if (err)
439b286d8b1SHerbert Xu return err;
440b286d8b1SHerbert Xu }
441b286d8b1SHerbert Xu
442b286d8b1SHerbert Xu walk->page = NULL;
443b286d8b1SHerbert Xu
444b286d8b1SHerbert Xu return skcipher_walk_next(walk);
445b286d8b1SHerbert Xu }
446b286d8b1SHerbert Xu
skcipher_walk_skcipher(struct skcipher_walk * walk,struct skcipher_request * req)447b286d8b1SHerbert Xu static int skcipher_walk_skcipher(struct skcipher_walk *walk,
448b286d8b1SHerbert Xu struct skcipher_request *req)
449b286d8b1SHerbert Xu {
450b286d8b1SHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
45131865c4cSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
452b286d8b1SHerbert Xu
4530cabf2afSHerbert Xu walk->total = req->cryptlen;
4540cabf2afSHerbert Xu walk->nbytes = 0;
4552b4f27c3SEric Biggers walk->iv = req->iv;
4562b4f27c3SEric Biggers walk->oiv = req->iv;
4570cabf2afSHerbert Xu
4580cabf2afSHerbert Xu if (unlikely(!walk->total))
4590cabf2afSHerbert Xu return 0;
4600cabf2afSHerbert Xu
461b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src);
462b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst);
463b286d8b1SHerbert Xu
464b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
465b286d8b1SHerbert Xu walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP : 0;
467b286d8b1SHerbert Xu
468b286d8b1SHerbert Xu walk->blocksize = crypto_skcipher_blocksize(tfm);
469b286d8b1SHerbert Xu walk->ivsize = crypto_skcipher_ivsize(tfm);
470b286d8b1SHerbert Xu walk->alignmask = crypto_skcipher_alignmask(tfm);
471b286d8b1SHerbert Xu
47231865c4cSHerbert Xu if (alg->co.base.cra_type != &crypto_skcipher_type)
47331865c4cSHerbert Xu walk->stride = alg->co.chunksize;
47431865c4cSHerbert Xu else
47531865c4cSHerbert Xu walk->stride = alg->walksize;
47631865c4cSHerbert Xu
477b286d8b1SHerbert Xu return skcipher_walk_first(walk);
478b286d8b1SHerbert Xu }
479b286d8b1SHerbert Xu
skcipher_walk_virt(struct skcipher_walk * walk,struct skcipher_request * req,bool atomic)480b286d8b1SHerbert Xu int skcipher_walk_virt(struct skcipher_walk *walk,
481b286d8b1SHerbert Xu struct skcipher_request *req, bool atomic)
482b286d8b1SHerbert Xu {
483b286d8b1SHerbert Xu int err;
484b286d8b1SHerbert Xu
485bb648291SEric Biggers might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
486bb648291SEric Biggers
487b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_PHYS;
488b286d8b1SHerbert Xu
489b286d8b1SHerbert Xu err = skcipher_walk_skcipher(walk, req);
490b286d8b1SHerbert Xu
491b286d8b1SHerbert Xu walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
492b286d8b1SHerbert Xu
493b286d8b1SHerbert Xu return err;
494b286d8b1SHerbert Xu }
495b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_virt);
496b286d8b1SHerbert Xu
skcipher_walk_async(struct skcipher_walk * walk,struct skcipher_request * req)497b286d8b1SHerbert Xu int skcipher_walk_async(struct skcipher_walk *walk,
498b286d8b1SHerbert Xu struct skcipher_request *req)
499b286d8b1SHerbert Xu {
500b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_PHYS;
501b286d8b1SHerbert Xu
502b286d8b1SHerbert Xu INIT_LIST_HEAD(&walk->buffers);
503b286d8b1SHerbert Xu
504b286d8b1SHerbert Xu return skcipher_walk_skcipher(walk, req);
505b286d8b1SHerbert Xu }
506b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_async);
507b286d8b1SHerbert Xu
skcipher_walk_aead_common(struct skcipher_walk * walk,struct aead_request * req,bool atomic)50834bc085cSHerbert Xu static int skcipher_walk_aead_common(struct skcipher_walk *walk,
50934bc085cSHerbert Xu struct aead_request *req, bool atomic)
510b286d8b1SHerbert Xu {
511b286d8b1SHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req);
512b286d8b1SHerbert Xu int err;
513b286d8b1SHerbert Xu
5140cabf2afSHerbert Xu walk->nbytes = 0;
5152b4f27c3SEric Biggers walk->iv = req->iv;
5162b4f27c3SEric Biggers walk->oiv = req->iv;
5170cabf2afSHerbert Xu
5180cabf2afSHerbert Xu if (unlikely(!walk->total))
5190cabf2afSHerbert Xu return 0;
5200cabf2afSHerbert Xu
5213cbf61fbSArd Biesheuvel walk->flags &= ~SKCIPHER_WALK_PHYS;
5223cbf61fbSArd Biesheuvel
523b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src);
524b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst);
525b286d8b1SHerbert Xu
526b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
527b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
528b286d8b1SHerbert Xu
529c14ca838SOndrej Mosnáček scatterwalk_done(&walk->in, 0, walk->total);
530c14ca838SOndrej Mosnáček scatterwalk_done(&walk->out, 0, walk->total);
531c14ca838SOndrej Mosnáček
532b286d8b1SHerbert Xu if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
533b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLEEP;
534b286d8b1SHerbert Xu else
535b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
536b286d8b1SHerbert Xu
537b286d8b1SHerbert Xu walk->blocksize = crypto_aead_blocksize(tfm);
538c821f6abSArd Biesheuvel walk->stride = crypto_aead_chunksize(tfm);
539b286d8b1SHerbert Xu walk->ivsize = crypto_aead_ivsize(tfm);
540b286d8b1SHerbert Xu walk->alignmask = crypto_aead_alignmask(tfm);
541b286d8b1SHerbert Xu
542b286d8b1SHerbert Xu err = skcipher_walk_first(walk);
543b286d8b1SHerbert Xu
544b286d8b1SHerbert Xu if (atomic)
545b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
546b286d8b1SHerbert Xu
547b286d8b1SHerbert Xu return err;
548b286d8b1SHerbert Xu }
54934bc085cSHerbert Xu
skcipher_walk_aead_encrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)55034bc085cSHerbert Xu int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
55134bc085cSHerbert Xu struct aead_request *req, bool atomic)
55234bc085cSHerbert Xu {
55334bc085cSHerbert Xu walk->total = req->cryptlen;
55434bc085cSHerbert Xu
55534bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic);
55634bc085cSHerbert Xu }
55734bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
55834bc085cSHerbert Xu
skcipher_walk_aead_decrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)55934bc085cSHerbert Xu int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
56034bc085cSHerbert Xu struct aead_request *req, bool atomic)
56134bc085cSHerbert Xu {
56234bc085cSHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req);
56334bc085cSHerbert Xu
56434bc085cSHerbert Xu walk->total = req->cryptlen - crypto_aead_authsize(tfm);
56534bc085cSHerbert Xu
56634bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic);
56734bc085cSHerbert Xu }
56834bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
56934bc085cSHerbert Xu
skcipher_set_needkey(struct crypto_skcipher * tfm)570b1f6b4bfSEric Biggers static void skcipher_set_needkey(struct crypto_skcipher *tfm)
571b1f6b4bfSEric Biggers {
5729ac0d136SEric Biggers if (crypto_skcipher_max_keysize(tfm) != 0)
573b1f6b4bfSEric Biggers crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
574b1f6b4bfSEric Biggers }
575b1f6b4bfSEric Biggers
skcipher_setkey_unaligned(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)5769933e113SHerbert Xu static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
5779933e113SHerbert Xu const u8 *key, unsigned int keylen)
5789933e113SHerbert Xu {
5799933e113SHerbert Xu unsigned long alignmask = crypto_skcipher_alignmask(tfm);
5809933e113SHerbert Xu struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
5819933e113SHerbert Xu u8 *buffer, *alignbuffer;
5829933e113SHerbert Xu unsigned long absize;
5839933e113SHerbert Xu int ret;
5849933e113SHerbert Xu
5859933e113SHerbert Xu absize = keylen + alignmask;
5869933e113SHerbert Xu buffer = kmalloc(absize, GFP_ATOMIC);
5879933e113SHerbert Xu if (!buffer)
5889933e113SHerbert Xu return -ENOMEM;
5899933e113SHerbert Xu
5909933e113SHerbert Xu alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
5919933e113SHerbert Xu memcpy(alignbuffer, key, keylen);
5929933e113SHerbert Xu ret = cipher->setkey(tfm, alignbuffer, keylen);
593453431a5SWaiman Long kfree_sensitive(buffer);
5949933e113SHerbert Xu return ret;
5959933e113SHerbert Xu }
5969933e113SHerbert Xu
crypto_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)59715252d94SEric Biggers int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
5989933e113SHerbert Xu unsigned int keylen)
5999933e113SHerbert Xu {
6009933e113SHerbert Xu struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
6019933e113SHerbert Xu unsigned long alignmask = crypto_skcipher_alignmask(tfm);
602f8d33facSEric Biggers int err;
6039933e113SHerbert Xu
60431865c4cSHerbert Xu if (cipher->co.base.cra_type != &crypto_skcipher_type) {
6057ec0a09dSEric Biggers struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
6067ec0a09dSEric Biggers
6077ec0a09dSEric Biggers crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
6087ec0a09dSEric Biggers crypto_lskcipher_set_flags(*ctx,
6097ec0a09dSEric Biggers crypto_skcipher_get_flags(tfm) &
6107ec0a09dSEric Biggers CRYPTO_TFM_REQ_MASK);
6117ec0a09dSEric Biggers err = crypto_lskcipher_setkey(*ctx, key, keylen);
61231865c4cSHerbert Xu goto out;
61331865c4cSHerbert Xu }
61431865c4cSHerbert Xu
615674f368aSEric Biggers if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
6169933e113SHerbert Xu return -EINVAL;
6179933e113SHerbert Xu
6189933e113SHerbert Xu if ((unsigned long)key & alignmask)
619f8d33facSEric Biggers err = skcipher_setkey_unaligned(tfm, key, keylen);
620f8d33facSEric Biggers else
621f8d33facSEric Biggers err = cipher->setkey(tfm, key, keylen);
6229933e113SHerbert Xu
62331865c4cSHerbert Xu out:
624b1f6b4bfSEric Biggers if (unlikely(err)) {
625b1f6b4bfSEric Biggers skcipher_set_needkey(tfm);
626f8d33facSEric Biggers return err;
627b1f6b4bfSEric Biggers }
628f8d33facSEric Biggers
629f8d33facSEric Biggers crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
630f8d33facSEric Biggers return 0;
6319933e113SHerbert Xu }
63215252d94SEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
6339933e113SHerbert Xu
crypto_skcipher_encrypt(struct skcipher_request * req)63481bcbb1eSEric Biggers int crypto_skcipher_encrypt(struct skcipher_request *req)
63581bcbb1eSEric Biggers {
63681bcbb1eSEric Biggers struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
6371085680bSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6381085680bSHerbert Xu
63981bcbb1eSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
640*29ce50e0SEric Biggers return -ENOKEY;
641*29ce50e0SEric Biggers if (alg->co.base.cra_type != &crypto_skcipher_type)
642*29ce50e0SEric Biggers return crypto_lskcipher_encrypt_sg(req);
643*29ce50e0SEric Biggers return alg->encrypt(req);
64481bcbb1eSEric Biggers }
64581bcbb1eSEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
64681bcbb1eSEric Biggers
crypto_skcipher_decrypt(struct skcipher_request * req)64781bcbb1eSEric Biggers int crypto_skcipher_decrypt(struct skcipher_request *req)
64881bcbb1eSEric Biggers {
64981bcbb1eSEric Biggers struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
6501085680bSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6511085680bSHerbert Xu
65281bcbb1eSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
653*29ce50e0SEric Biggers return -ENOKEY;
654*29ce50e0SEric Biggers if (alg->co.base.cra_type != &crypto_skcipher_type)
655*29ce50e0SEric Biggers return crypto_lskcipher_decrypt_sg(req);
656*29ce50e0SEric Biggers return alg->decrypt(req);
65781bcbb1eSEric Biggers }
65881bcbb1eSEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
65981bcbb1eSEric Biggers
crypto_lskcipher_export(struct skcipher_request * req,void * out)660662ea18dSHerbert Xu static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
661662ea18dSHerbert Xu {
662662ea18dSHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
663662ea18dSHerbert Xu u8 *ivs = skcipher_request_ctx(req);
664662ea18dSHerbert Xu
665662ea18dSHerbert Xu ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
666662ea18dSHerbert Xu
667662ea18dSHerbert Xu memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
668662ea18dSHerbert Xu crypto_skcipher_statesize(tfm));
669662ea18dSHerbert Xu
670662ea18dSHerbert Xu return 0;
671662ea18dSHerbert Xu }
672662ea18dSHerbert Xu
crypto_lskcipher_import(struct skcipher_request * req,const void * in)673662ea18dSHerbert Xu static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
674662ea18dSHerbert Xu {
675662ea18dSHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
676662ea18dSHerbert Xu u8 *ivs = skcipher_request_ctx(req);
677662ea18dSHerbert Xu
678662ea18dSHerbert Xu ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
679662ea18dSHerbert Xu
680662ea18dSHerbert Xu memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
681662ea18dSHerbert Xu crypto_skcipher_statesize(tfm));
682662ea18dSHerbert Xu
683662ea18dSHerbert Xu return 0;
684662ea18dSHerbert Xu }
685662ea18dSHerbert Xu
skcipher_noexport(struct skcipher_request * req,void * out)686662ea18dSHerbert Xu static int skcipher_noexport(struct skcipher_request *req, void *out)
687662ea18dSHerbert Xu {
688662ea18dSHerbert Xu return 0;
689662ea18dSHerbert Xu }
690662ea18dSHerbert Xu
skcipher_noimport(struct skcipher_request * req,const void * in)691662ea18dSHerbert Xu static int skcipher_noimport(struct skcipher_request *req, const void *in)
692662ea18dSHerbert Xu {
693662ea18dSHerbert Xu return 0;
694662ea18dSHerbert Xu }
695662ea18dSHerbert Xu
crypto_skcipher_export(struct skcipher_request * req,void * out)696662ea18dSHerbert Xu int crypto_skcipher_export(struct skcipher_request *req, void *out)
697662ea18dSHerbert Xu {
698662ea18dSHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
699662ea18dSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
700662ea18dSHerbert Xu
701662ea18dSHerbert Xu if (alg->co.base.cra_type != &crypto_skcipher_type)
702662ea18dSHerbert Xu return crypto_lskcipher_export(req, out);
703662ea18dSHerbert Xu return alg->export(req, out);
704662ea18dSHerbert Xu }
705662ea18dSHerbert Xu EXPORT_SYMBOL_GPL(crypto_skcipher_export);
706662ea18dSHerbert Xu
crypto_skcipher_import(struct skcipher_request * req,const void * in)707662ea18dSHerbert Xu int crypto_skcipher_import(struct skcipher_request *req, const void *in)
708662ea18dSHerbert Xu {
709662ea18dSHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
710662ea18dSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
711662ea18dSHerbert Xu
712662ea18dSHerbert Xu if (alg->co.base.cra_type != &crypto_skcipher_type)
713662ea18dSHerbert Xu return crypto_lskcipher_import(req, in);
714662ea18dSHerbert Xu return alg->import(req, in);
715662ea18dSHerbert Xu }
716662ea18dSHerbert Xu EXPORT_SYMBOL_GPL(crypto_skcipher_import);
717662ea18dSHerbert Xu
crypto_skcipher_exit_tfm(struct crypto_tfm * tfm)7184e6c3df4SHerbert Xu static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
7194e6c3df4SHerbert Xu {
7204e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
7214e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
7224e6c3df4SHerbert Xu
7234e6c3df4SHerbert Xu alg->exit(skcipher);
7244e6c3df4SHerbert Xu }
7254e6c3df4SHerbert Xu
crypto_skcipher_init_tfm(struct crypto_tfm * tfm)7267a7ffe65SHerbert Xu static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
7277a7ffe65SHerbert Xu {
7284e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
7294e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
7304e6c3df4SHerbert Xu
731b1f6b4bfSEric Biggers skcipher_set_needkey(skcipher);
732f8d33facSEric Biggers
733662ea18dSHerbert Xu if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
734662ea18dSHerbert Xu unsigned am = crypto_skcipher_alignmask(skcipher);
735662ea18dSHerbert Xu unsigned reqsize;
736662ea18dSHerbert Xu
737662ea18dSHerbert Xu reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
738662ea18dSHerbert Xu reqsize += crypto_skcipher_ivsize(skcipher);
739662ea18dSHerbert Xu reqsize += crypto_skcipher_statesize(skcipher);
740662ea18dSHerbert Xu crypto_skcipher_set_reqsize(skcipher, reqsize);
741662ea18dSHerbert Xu
74231865c4cSHerbert Xu return crypto_init_lskcipher_ops_sg(tfm);
743662ea18dSHerbert Xu }
74431865c4cSHerbert Xu
7454e6c3df4SHerbert Xu if (alg->exit)
7464e6c3df4SHerbert Xu skcipher->base.exit = crypto_skcipher_exit_tfm;
7474e6c3df4SHerbert Xu
7484e6c3df4SHerbert Xu if (alg->init)
7494e6c3df4SHerbert Xu return alg->init(skcipher);
7504e6c3df4SHerbert Xu
7514e6c3df4SHerbert Xu return 0;
7527a7ffe65SHerbert Xu }
7537a7ffe65SHerbert Xu
crypto_skcipher_extsize(struct crypto_alg * alg)75431865c4cSHerbert Xu static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
75531865c4cSHerbert Xu {
75631865c4cSHerbert Xu if (alg->cra_type != &crypto_skcipher_type)
75731865c4cSHerbert Xu return sizeof(struct crypto_lskcipher *);
75831865c4cSHerbert Xu
75931865c4cSHerbert Xu return crypto_alg_extsize(alg);
76031865c4cSHerbert Xu }
76131865c4cSHerbert Xu
crypto_skcipher_free_instance(struct crypto_instance * inst)7624e6c3df4SHerbert Xu static void crypto_skcipher_free_instance(struct crypto_instance *inst)
7634e6c3df4SHerbert Xu {
7644e6c3df4SHerbert Xu struct skcipher_instance *skcipher =
7654e6c3df4SHerbert Xu container_of(inst, struct skcipher_instance, s.base);
7664e6c3df4SHerbert Xu
7674e6c3df4SHerbert Xu skcipher->free(skcipher);
7684e6c3df4SHerbert Xu }
7694e6c3df4SHerbert Xu
7704e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
771d8c34b94SGideon Israel Dsouza __maybe_unused;
crypto_skcipher_show(struct seq_file * m,struct crypto_alg * alg)7724e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
7734e6c3df4SHerbert Xu {
7741085680bSHerbert Xu struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
7754e6c3df4SHerbert Xu
7764e6c3df4SHerbert Xu seq_printf(m, "type : skcipher\n");
7774e6c3df4SHerbert Xu seq_printf(m, "async : %s\n",
7784e6c3df4SHerbert Xu alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
7794e6c3df4SHerbert Xu seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
7804e6c3df4SHerbert Xu seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
7814e6c3df4SHerbert Xu seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
7824e6c3df4SHerbert Xu seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
7834e6c3df4SHerbert Xu seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
784c821f6abSArd Biesheuvel seq_printf(m, "walksize : %u\n", skcipher->walksize);
785662ea18dSHerbert Xu seq_printf(m, "statesize : %u\n", skcipher->statesize);
7864e6c3df4SHerbert Xu }
7874e6c3df4SHerbert Xu
crypto_skcipher_report(struct sk_buff * skb,struct crypto_alg * alg)788c0f9e01dSHerbert Xu static int __maybe_unused crypto_skcipher_report(
789c0f9e01dSHerbert Xu struct sk_buff *skb, struct crypto_alg *alg)
7904e6c3df4SHerbert Xu {
7911085680bSHerbert Xu struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
7924e6c3df4SHerbert Xu struct crypto_report_blkcipher rblkcipher;
7934e6c3df4SHerbert Xu
79437db69e0SEric Biggers memset(&rblkcipher, 0, sizeof(rblkcipher));
79537db69e0SEric Biggers
79637db69e0SEric Biggers strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
79737db69e0SEric Biggers strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
7984e6c3df4SHerbert Xu
7994e6c3df4SHerbert Xu rblkcipher.blocksize = alg->cra_blocksize;
8004e6c3df4SHerbert Xu rblkcipher.min_keysize = skcipher->min_keysize;
8014e6c3df4SHerbert Xu rblkcipher.max_keysize = skcipher->max_keysize;
8024e6c3df4SHerbert Xu rblkcipher.ivsize = skcipher->ivsize;
8034e6c3df4SHerbert Xu
80437db69e0SEric Biggers return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
80537db69e0SEric Biggers sizeof(rblkcipher), &rblkcipher);
8064e6c3df4SHerbert Xu }
8074e6c3df4SHerbert Xu
80853253064SEric Biggers static const struct crypto_type crypto_skcipher_type = {
80931865c4cSHerbert Xu .extsize = crypto_skcipher_extsize,
8107a7ffe65SHerbert Xu .init_tfm = crypto_skcipher_init_tfm,
8114e6c3df4SHerbert Xu .free = crypto_skcipher_free_instance,
8124e6c3df4SHerbert Xu #ifdef CONFIG_PROC_FS
8134e6c3df4SHerbert Xu .show = crypto_skcipher_show,
8144e6c3df4SHerbert Xu #endif
815b8969a1bSOndrej Mosnacek #if IS_ENABLED(CONFIG_CRYPTO_USER)
8164e6c3df4SHerbert Xu .report = crypto_skcipher_report,
817c0f9e01dSHerbert Xu #endif
8187a7ffe65SHerbert Xu .maskclear = ~CRYPTO_ALG_TYPE_MASK,
81931865c4cSHerbert Xu .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
8204e6c3df4SHerbert Xu .type = CRYPTO_ALG_TYPE_SKCIPHER,
8217a7ffe65SHerbert Xu .tfmsize = offsetof(struct crypto_skcipher, base),
8227a7ffe65SHerbert Xu };
8237a7ffe65SHerbert Xu
crypto_grab_skcipher(struct crypto_skcipher_spawn * spawn,struct crypto_instance * inst,const char * name,u32 type,u32 mask)8243a01d0eeSHerbert Xu int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
825b9f76dddSEric Biggers struct crypto_instance *inst,
8264e6c3df4SHerbert Xu const char *name, u32 type, u32 mask)
8274e6c3df4SHerbert Xu {
82853253064SEric Biggers spawn->base.frontend = &crypto_skcipher_type;
829de95c957SEric Biggers return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
8304e6c3df4SHerbert Xu }
8313a01d0eeSHerbert Xu EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
8324e6c3df4SHerbert Xu
crypto_alloc_skcipher(const char * alg_name,u32 type,u32 mask)8337a7ffe65SHerbert Xu struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
8347a7ffe65SHerbert Xu u32 type, u32 mask)
8357a7ffe65SHerbert Xu {
83653253064SEric Biggers return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
8377a7ffe65SHerbert Xu }
8387a7ffe65SHerbert Xu EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
8397a7ffe65SHerbert Xu
crypto_alloc_sync_skcipher(const char * alg_name,u32 type,u32 mask)840b350bee5SKees Cook struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
841b350bee5SKees Cook const char *alg_name, u32 type, u32 mask)
842b350bee5SKees Cook {
843b350bee5SKees Cook struct crypto_skcipher *tfm;
844b350bee5SKees Cook
845b350bee5SKees Cook /* Only sync algorithms allowed. */
846e6cb02bdSHerbert Xu mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
847b350bee5SKees Cook
84853253064SEric Biggers tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
849b350bee5SKees Cook
850b350bee5SKees Cook /*
851b350bee5SKees Cook * Make sure we do not allocate something that might get used with
852b350bee5SKees Cook * an on-stack request: check the request size.
853b350bee5SKees Cook */
854b350bee5SKees Cook if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
855b350bee5SKees Cook MAX_SYNC_SKCIPHER_REQSIZE)) {
856b350bee5SKees Cook crypto_free_skcipher(tfm);
857b350bee5SKees Cook return ERR_PTR(-EINVAL);
858b350bee5SKees Cook }
859b350bee5SKees Cook
860b350bee5SKees Cook return (struct crypto_sync_skcipher *)tfm;
861b350bee5SKees Cook }
862b350bee5SKees Cook EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
863b350bee5SKees Cook
crypto_has_skcipher(const char * alg_name,u32 type,u32 mask)864d3ca75a8SEric Biggers int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
8654e6c3df4SHerbert Xu {
86653253064SEric Biggers return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
8674e6c3df4SHerbert Xu }
868d3ca75a8SEric Biggers EXPORT_SYMBOL_GPL(crypto_has_skcipher);
8694e6c3df4SHerbert Xu
skcipher_prepare_alg_common(struct skcipher_alg_common * alg)87031865c4cSHerbert Xu int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
8714e6c3df4SHerbert Xu {
8724e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base;
8734e6c3df4SHerbert Xu
874662ea18dSHerbert Xu if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
875662ea18dSHerbert Xu alg->statesize > PAGE_SIZE / 2 ||
876662ea18dSHerbert Xu (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
8774e6c3df4SHerbert Xu return -EINVAL;
8784e6c3df4SHerbert Xu
8794e6c3df4SHerbert Xu if (!alg->chunksize)
8804e6c3df4SHerbert Xu alg->chunksize = base->cra_blocksize;
88131865c4cSHerbert Xu
88231865c4cSHerbert Xu base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
88331865c4cSHerbert Xu
88431865c4cSHerbert Xu return 0;
88531865c4cSHerbert Xu }
88631865c4cSHerbert Xu
skcipher_prepare_alg(struct skcipher_alg * alg)88731865c4cSHerbert Xu static int skcipher_prepare_alg(struct skcipher_alg *alg)
88831865c4cSHerbert Xu {
88931865c4cSHerbert Xu struct crypto_alg *base = &alg->base;
89031865c4cSHerbert Xu int err;
89131865c4cSHerbert Xu
89231865c4cSHerbert Xu err = skcipher_prepare_alg_common(&alg->co);
89331865c4cSHerbert Xu if (err)
89431865c4cSHerbert Xu return err;
89531865c4cSHerbert Xu
89631865c4cSHerbert Xu if (alg->walksize > PAGE_SIZE / 8)
89731865c4cSHerbert Xu return -EINVAL;
89831865c4cSHerbert Xu
899c821f6abSArd Biesheuvel if (!alg->walksize)
900c821f6abSArd Biesheuvel alg->walksize = alg->chunksize;
9014e6c3df4SHerbert Xu
902662ea18dSHerbert Xu if (!alg->statesize) {
903662ea18dSHerbert Xu alg->import = skcipher_noimport;
904662ea18dSHerbert Xu alg->export = skcipher_noexport;
905662ea18dSHerbert Xu } else if (!(alg->import && alg->export))
906662ea18dSHerbert Xu return -EINVAL;
907662ea18dSHerbert Xu
90853253064SEric Biggers base->cra_type = &crypto_skcipher_type;
9094e6c3df4SHerbert Xu base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
9104e6c3df4SHerbert Xu
9114e6c3df4SHerbert Xu return 0;
9124e6c3df4SHerbert Xu }
9134e6c3df4SHerbert Xu
crypto_register_skcipher(struct skcipher_alg * alg)9144e6c3df4SHerbert Xu int crypto_register_skcipher(struct skcipher_alg *alg)
9154e6c3df4SHerbert Xu {
9164e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base;
9174e6c3df4SHerbert Xu int err;
9184e6c3df4SHerbert Xu
9194e6c3df4SHerbert Xu err = skcipher_prepare_alg(alg);
9204e6c3df4SHerbert Xu if (err)
9214e6c3df4SHerbert Xu return err;
9224e6c3df4SHerbert Xu
9234e6c3df4SHerbert Xu return crypto_register_alg(base);
9244e6c3df4SHerbert Xu }
9254e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skcipher);
9264e6c3df4SHerbert Xu
crypto_unregister_skcipher(struct skcipher_alg * alg)9274e6c3df4SHerbert Xu void crypto_unregister_skcipher(struct skcipher_alg *alg)
9284e6c3df4SHerbert Xu {
9294e6c3df4SHerbert Xu crypto_unregister_alg(&alg->base);
9304e6c3df4SHerbert Xu }
9314e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
9324e6c3df4SHerbert Xu
crypto_register_skciphers(struct skcipher_alg * algs,int count)9334e6c3df4SHerbert Xu int crypto_register_skciphers(struct skcipher_alg *algs, int count)
9344e6c3df4SHerbert Xu {
9354e6c3df4SHerbert Xu int i, ret;
9364e6c3df4SHerbert Xu
9374e6c3df4SHerbert Xu for (i = 0; i < count; i++) {
9384e6c3df4SHerbert Xu ret = crypto_register_skcipher(&algs[i]);
9394e6c3df4SHerbert Xu if (ret)
9404e6c3df4SHerbert Xu goto err;
9414e6c3df4SHerbert Xu }
9424e6c3df4SHerbert Xu
9434e6c3df4SHerbert Xu return 0;
9444e6c3df4SHerbert Xu
9454e6c3df4SHerbert Xu err:
9464e6c3df4SHerbert Xu for (--i; i >= 0; --i)
9474e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]);
9484e6c3df4SHerbert Xu
9494e6c3df4SHerbert Xu return ret;
9504e6c3df4SHerbert Xu }
9514e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skciphers);
9524e6c3df4SHerbert Xu
crypto_unregister_skciphers(struct skcipher_alg * algs,int count)9534e6c3df4SHerbert Xu void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
9544e6c3df4SHerbert Xu {
9554e6c3df4SHerbert Xu int i;
9564e6c3df4SHerbert Xu
9574e6c3df4SHerbert Xu for (i = count - 1; i >= 0; --i)
9584e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]);
9594e6c3df4SHerbert Xu }
9604e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
9614e6c3df4SHerbert Xu
skcipher_register_instance(struct crypto_template * tmpl,struct skcipher_instance * inst)9624e6c3df4SHerbert Xu int skcipher_register_instance(struct crypto_template *tmpl,
9634e6c3df4SHerbert Xu struct skcipher_instance *inst)
9644e6c3df4SHerbert Xu {
9654e6c3df4SHerbert Xu int err;
9664e6c3df4SHerbert Xu
967d4fdc2dfSEric Biggers if (WARN_ON(!inst->free))
968d4fdc2dfSEric Biggers return -EINVAL;
969d4fdc2dfSEric Biggers
9704e6c3df4SHerbert Xu err = skcipher_prepare_alg(&inst->alg);
9714e6c3df4SHerbert Xu if (err)
9724e6c3df4SHerbert Xu return err;
9734e6c3df4SHerbert Xu
9744e6c3df4SHerbert Xu return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
9754e6c3df4SHerbert Xu }
9764e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_register_instance);
9774e6c3df4SHerbert Xu
skcipher_setkey_simple(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)9780872da16SEric Biggers static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
9790872da16SEric Biggers unsigned int keylen)
9800872da16SEric Biggers {
9810872da16SEric Biggers struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
9820872da16SEric Biggers
9830872da16SEric Biggers crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
9840872da16SEric Biggers crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
9850872da16SEric Biggers CRYPTO_TFM_REQ_MASK);
986af5034e8SEric Biggers return crypto_cipher_setkey(cipher, key, keylen);
9870872da16SEric Biggers }
9880872da16SEric Biggers
skcipher_init_tfm_simple(struct crypto_skcipher * tfm)9890872da16SEric Biggers static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
9900872da16SEric Biggers {
9910872da16SEric Biggers struct skcipher_instance *inst = skcipher_alg_instance(tfm);
992d5ed3b65SEric Biggers struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
9930872da16SEric Biggers struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
9940872da16SEric Biggers struct crypto_cipher *cipher;
9950872da16SEric Biggers
9960872da16SEric Biggers cipher = crypto_spawn_cipher(spawn);
9970872da16SEric Biggers if (IS_ERR(cipher))
9980872da16SEric Biggers return PTR_ERR(cipher);
9990872da16SEric Biggers
10000872da16SEric Biggers ctx->cipher = cipher;
10010872da16SEric Biggers return 0;
10020872da16SEric Biggers }
10030872da16SEric Biggers
skcipher_exit_tfm_simple(struct crypto_skcipher * tfm)10040872da16SEric Biggers static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
10050872da16SEric Biggers {
10060872da16SEric Biggers struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
10070872da16SEric Biggers
10080872da16SEric Biggers crypto_free_cipher(ctx->cipher);
10090872da16SEric Biggers }
10100872da16SEric Biggers
skcipher_free_instance_simple(struct skcipher_instance * inst)10110872da16SEric Biggers static void skcipher_free_instance_simple(struct skcipher_instance *inst)
10120872da16SEric Biggers {
1013aacd5b4cSEric Biggers crypto_drop_cipher(skcipher_instance_ctx(inst));
10140872da16SEric Biggers kfree(inst);
10150872da16SEric Biggers }
10160872da16SEric Biggers
10170872da16SEric Biggers /**
10180872da16SEric Biggers * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
10190872da16SEric Biggers *
10200872da16SEric Biggers * Allocate an skcipher_instance for a simple block cipher mode of operation,
10210872da16SEric Biggers * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
10220872da16SEric Biggers * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
10230872da16SEric Biggers * alignmask, and priority are set from the underlying cipher but can be
10240872da16SEric Biggers * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
10250872da16SEric Biggers * default ->setkey(), ->init(), and ->exit() methods are installed.
10260872da16SEric Biggers *
10270872da16SEric Biggers * @tmpl: the template being instantiated
10280872da16SEric Biggers * @tb: the template parameters
10290872da16SEric Biggers *
10300872da16SEric Biggers * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
10310872da16SEric Biggers * needs to register the instance.
10320872da16SEric Biggers */
skcipher_alloc_instance_simple(struct crypto_template * tmpl,struct rtattr ** tb)1033b3c16bfcSHerbert Xu struct skcipher_instance *skcipher_alloc_instance_simple(
1034b3c16bfcSHerbert Xu struct crypto_template *tmpl, struct rtattr **tb)
10350872da16SEric Biggers {
10360872da16SEric Biggers u32 mask;
1037aacd5b4cSEric Biggers struct skcipher_instance *inst;
1038aacd5b4cSEric Biggers struct crypto_cipher_spawn *spawn;
1039aacd5b4cSEric Biggers struct crypto_alg *cipher_alg;
10400872da16SEric Biggers int err;
10410872da16SEric Biggers
10427bcb2c99SEric Biggers err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
10437bcb2c99SEric Biggers if (err)
10447bcb2c99SEric Biggers return ERR_PTR(err);
10450872da16SEric Biggers
10460872da16SEric Biggers inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1047aacd5b4cSEric Biggers if (!inst)
1048aacd5b4cSEric Biggers return ERR_PTR(-ENOMEM);
10490872da16SEric Biggers spawn = skcipher_instance_ctx(inst);
10500872da16SEric Biggers
1051aacd5b4cSEric Biggers err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
1052aacd5b4cSEric Biggers crypto_attr_alg_name(tb[1]), 0, mask);
1053aacd5b4cSEric Biggers if (err)
1054aacd5b4cSEric Biggers goto err_free_inst;
1055aacd5b4cSEric Biggers cipher_alg = crypto_spawn_cipher_alg(spawn);
1056aacd5b4cSEric Biggers
10570872da16SEric Biggers err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
10580872da16SEric Biggers cipher_alg);
10590872da16SEric Biggers if (err)
10600872da16SEric Biggers goto err_free_inst;
10610872da16SEric Biggers
10620872da16SEric Biggers inst->free = skcipher_free_instance_simple;
10630872da16SEric Biggers
10640872da16SEric Biggers /* Default algorithm properties, can be overridden */
10650872da16SEric Biggers inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
10660872da16SEric Biggers inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
10670872da16SEric Biggers inst->alg.base.cra_priority = cipher_alg->cra_priority;
10680872da16SEric Biggers inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
10690872da16SEric Biggers inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
10700872da16SEric Biggers inst->alg.ivsize = cipher_alg->cra_blocksize;
10710872da16SEric Biggers
10720872da16SEric Biggers /* Use skcipher_ctx_simple by default, can be overridden */
10730872da16SEric Biggers inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
10740872da16SEric Biggers inst->alg.setkey = skcipher_setkey_simple;
10750872da16SEric Biggers inst->alg.init = skcipher_init_tfm_simple;
10760872da16SEric Biggers inst->alg.exit = skcipher_exit_tfm_simple;
10770872da16SEric Biggers
10780872da16SEric Biggers return inst;
10790872da16SEric Biggers
10800872da16SEric Biggers err_free_inst:
1081aacd5b4cSEric Biggers skcipher_free_instance_simple(inst);
10820872da16SEric Biggers return ERR_PTR(err);
10830872da16SEric Biggers }
10840872da16SEric Biggers EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
10850872da16SEric Biggers
10867a7ffe65SHerbert Xu MODULE_LICENSE("GPL");
10877a7ffe65SHerbert Xu MODULE_DESCRIPTION("Symmetric key cipher type");
10880eb76ba2SArd Biesheuvel MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1089