17a7ffe65SHerbert Xu /* 27a7ffe65SHerbert Xu * Symmetric key cipher operations. 37a7ffe65SHerbert Xu * 47a7ffe65SHerbert Xu * Generic encrypt/decrypt wrapper for ciphers, handles operations across 57a7ffe65SHerbert Xu * multiple page boundaries by using temporary blocks. In user context, 67a7ffe65SHerbert Xu * the kernel is given a chance to schedule us once per page. 77a7ffe65SHerbert Xu * 87a7ffe65SHerbert Xu * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 97a7ffe65SHerbert Xu * 107a7ffe65SHerbert Xu * This program is free software; you can redistribute it and/or modify it 117a7ffe65SHerbert Xu * under the terms of the GNU General Public License as published by the Free 127a7ffe65SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 137a7ffe65SHerbert Xu * any later version. 147a7ffe65SHerbert Xu * 157a7ffe65SHerbert Xu */ 167a7ffe65SHerbert Xu 17b286d8b1SHerbert Xu #include <crypto/internal/aead.h> 187a7ffe65SHerbert Xu #include <crypto/internal/skcipher.h> 19b286d8b1SHerbert Xu #include <crypto/scatterwalk.h> 207a7ffe65SHerbert Xu #include <linux/bug.h> 214e6c3df4SHerbert Xu #include <linux/cryptouser.h> 22b286d8b1SHerbert Xu #include <linux/list.h> 237a7ffe65SHerbert Xu #include <linux/module.h> 244e6c3df4SHerbert Xu #include <linux/rtnetlink.h> 254e6c3df4SHerbert Xu #include <linux/seq_file.h> 264e6c3df4SHerbert Xu #include <net/netlink.h> 277a7ffe65SHerbert Xu 287a7ffe65SHerbert Xu #include "internal.h" 297a7ffe65SHerbert Xu 30b286d8b1SHerbert Xu enum { 31b286d8b1SHerbert Xu SKCIPHER_WALK_PHYS = 1 << 0, 32b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW = 1 << 1, 33b286d8b1SHerbert Xu SKCIPHER_WALK_COPY = 1 << 2, 34b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF = 1 << 3, 35b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP = 1 << 4, 36b286d8b1SHerbert Xu }; 37b286d8b1SHerbert Xu 38b286d8b1SHerbert Xu struct skcipher_walk_buffer { 39b286d8b1SHerbert Xu struct list_head entry; 40b286d8b1SHerbert Xu struct scatter_walk dst; 41b286d8b1SHerbert Xu unsigned int len; 42b286d8b1SHerbert Xu u8 *data; 43b286d8b1SHerbert Xu u8 buffer[]; 44b286d8b1SHerbert Xu }; 45b286d8b1SHerbert Xu 46b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk); 47b286d8b1SHerbert Xu 48b286d8b1SHerbert Xu static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) 49b286d8b1SHerbert Xu { 50b286d8b1SHerbert Xu if (PageHighMem(scatterwalk_page(walk))) 51b286d8b1SHerbert Xu kunmap_atomic(vaddr); 52b286d8b1SHerbert Xu } 53b286d8b1SHerbert Xu 54b286d8b1SHerbert Xu static inline void *skcipher_map(struct scatter_walk *walk) 55b286d8b1SHerbert Xu { 56b286d8b1SHerbert Xu struct page *page = scatterwalk_page(walk); 57b286d8b1SHerbert Xu 58b286d8b1SHerbert Xu return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + 59b286d8b1SHerbert Xu offset_in_page(walk->offset); 60b286d8b1SHerbert Xu } 61b286d8b1SHerbert Xu 62b286d8b1SHerbert Xu static inline void skcipher_map_src(struct skcipher_walk *walk) 63b286d8b1SHerbert Xu { 64b286d8b1SHerbert Xu walk->src.virt.addr = skcipher_map(&walk->in); 65b286d8b1SHerbert Xu } 66b286d8b1SHerbert Xu 67b286d8b1SHerbert Xu static inline void skcipher_map_dst(struct skcipher_walk *walk) 68b286d8b1SHerbert Xu { 69b286d8b1SHerbert Xu walk->dst.virt.addr = skcipher_map(&walk->out); 70b286d8b1SHerbert Xu } 71b286d8b1SHerbert Xu 72b286d8b1SHerbert Xu static inline void skcipher_unmap_src(struct skcipher_walk *walk) 73b286d8b1SHerbert Xu { 74b286d8b1SHerbert Xu skcipher_unmap(&walk->in, walk->src.virt.addr); 75b286d8b1SHerbert Xu } 76b286d8b1SHerbert Xu 77b286d8b1SHerbert Xu static inline void skcipher_unmap_dst(struct skcipher_walk *walk) 78b286d8b1SHerbert Xu { 79b286d8b1SHerbert Xu skcipher_unmap(&walk->out, walk->dst.virt.addr); 80b286d8b1SHerbert Xu } 81b286d8b1SHerbert Xu 82b286d8b1SHerbert Xu static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) 83b286d8b1SHerbert Xu { 84b286d8b1SHerbert Xu return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 85b286d8b1SHerbert Xu } 86b286d8b1SHerbert Xu 87b286d8b1SHerbert Xu /* Get a spot of the specified length that does not straddle a page. 88b286d8b1SHerbert Xu * The caller needs to ensure that there is enough space for this operation. 89b286d8b1SHerbert Xu */ 90b286d8b1SHerbert Xu static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) 91b286d8b1SHerbert Xu { 92b286d8b1SHerbert Xu u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 93b286d8b1SHerbert Xu 94b286d8b1SHerbert Xu return max(start, end_page); 95b286d8b1SHerbert Xu } 96b286d8b1SHerbert Xu 97b286d8b1SHerbert Xu static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) 98b286d8b1SHerbert Xu { 99b286d8b1SHerbert Xu u8 *addr; 100b286d8b1SHerbert Xu 101b286d8b1SHerbert Xu addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); 102b286d8b1SHerbert Xu addr = skcipher_get_spot(addr, bsize); 103b286d8b1SHerbert Xu scatterwalk_copychunks(addr, &walk->out, bsize, 104b286d8b1SHerbert Xu (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); 105b286d8b1SHerbert Xu return 0; 106b286d8b1SHerbert Xu } 107b286d8b1SHerbert Xu 108b286d8b1SHerbert Xu int skcipher_walk_done(struct skcipher_walk *walk, int err) 109b286d8b1SHerbert Xu { 110b286d8b1SHerbert Xu unsigned int n = walk->nbytes - err; 111b286d8b1SHerbert Xu unsigned int nbytes; 112b286d8b1SHerbert Xu 113b286d8b1SHerbert Xu nbytes = walk->total - n; 114b286d8b1SHerbert Xu 115b286d8b1SHerbert Xu if (unlikely(err < 0)) { 116b286d8b1SHerbert Xu nbytes = 0; 117b286d8b1SHerbert Xu n = 0; 118b286d8b1SHerbert Xu } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | 119b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW | 120b286d8b1SHerbert Xu SKCIPHER_WALK_COPY | 121b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF)))) { 122b286d8b1SHerbert Xu unmap_src: 123b286d8b1SHerbert Xu skcipher_unmap_src(walk); 124b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_DIFF) { 125b286d8b1SHerbert Xu skcipher_unmap_dst(walk); 126b286d8b1SHerbert Xu goto unmap_src; 127b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_COPY) { 128b286d8b1SHerbert Xu skcipher_map_dst(walk); 129b286d8b1SHerbert Xu memcpy(walk->dst.virt.addr, walk->page, n); 130b286d8b1SHerbert Xu skcipher_unmap_dst(walk); 131b286d8b1SHerbert Xu } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { 132b286d8b1SHerbert Xu if (WARN_ON(err)) { 133b286d8b1SHerbert Xu err = -EINVAL; 134b286d8b1SHerbert Xu nbytes = 0; 135b286d8b1SHerbert Xu } else 136b286d8b1SHerbert Xu n = skcipher_done_slow(walk, n); 137b286d8b1SHerbert Xu } 138b286d8b1SHerbert Xu 139b286d8b1SHerbert Xu if (err > 0) 140b286d8b1SHerbert Xu err = 0; 141b286d8b1SHerbert Xu 142b286d8b1SHerbert Xu walk->total = nbytes; 143b286d8b1SHerbert Xu walk->nbytes = nbytes; 144b286d8b1SHerbert Xu 145b286d8b1SHerbert Xu scatterwalk_advance(&walk->in, n); 146b286d8b1SHerbert Xu scatterwalk_advance(&walk->out, n); 147b286d8b1SHerbert Xu scatterwalk_done(&walk->in, 0, nbytes); 148b286d8b1SHerbert Xu scatterwalk_done(&walk->out, 1, nbytes); 149b286d8b1SHerbert Xu 150b286d8b1SHerbert Xu if (nbytes) { 151b286d8b1SHerbert Xu crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? 152b286d8b1SHerbert Xu CRYPTO_TFM_REQ_MAY_SLEEP : 0); 153b286d8b1SHerbert Xu return skcipher_walk_next(walk); 154b286d8b1SHerbert Xu } 155b286d8b1SHerbert Xu 156b286d8b1SHerbert Xu /* Short-circuit for the common/fast path. */ 157b286d8b1SHerbert Xu if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) 158b286d8b1SHerbert Xu goto out; 159b286d8b1SHerbert Xu 160b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS) 161b286d8b1SHerbert Xu goto out; 162b286d8b1SHerbert Xu 163b286d8b1SHerbert Xu if (walk->iv != walk->oiv) 164b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize); 165b286d8b1SHerbert Xu if (walk->buffer != walk->page) 166b286d8b1SHerbert Xu kfree(walk->buffer); 167b286d8b1SHerbert Xu if (walk->page) 168b286d8b1SHerbert Xu free_page((unsigned long)walk->page); 169b286d8b1SHerbert Xu 170b286d8b1SHerbert Xu out: 171b286d8b1SHerbert Xu return err; 172b286d8b1SHerbert Xu } 173b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_done); 174b286d8b1SHerbert Xu 175b286d8b1SHerbert Xu void skcipher_walk_complete(struct skcipher_walk *walk, int err) 176b286d8b1SHerbert Xu { 177b286d8b1SHerbert Xu struct skcipher_walk_buffer *p, *tmp; 178b286d8b1SHerbert Xu 179b286d8b1SHerbert Xu list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { 180b286d8b1SHerbert Xu u8 *data; 181b286d8b1SHerbert Xu 182b286d8b1SHerbert Xu if (err) 183b286d8b1SHerbert Xu goto done; 184b286d8b1SHerbert Xu 185b286d8b1SHerbert Xu data = p->data; 186b286d8b1SHerbert Xu if (!data) { 187b286d8b1SHerbert Xu data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); 188b286d8b1SHerbert Xu data = skcipher_get_spot(data, walk->chunksize); 189b286d8b1SHerbert Xu } 190b286d8b1SHerbert Xu 191b286d8b1SHerbert Xu scatterwalk_copychunks(data, &p->dst, p->len, 1); 192b286d8b1SHerbert Xu 193b286d8b1SHerbert Xu if (offset_in_page(p->data) + p->len + walk->chunksize > 194b286d8b1SHerbert Xu PAGE_SIZE) 195b286d8b1SHerbert Xu free_page((unsigned long)p->data); 196b286d8b1SHerbert Xu 197b286d8b1SHerbert Xu done: 198b286d8b1SHerbert Xu list_del(&p->entry); 199b286d8b1SHerbert Xu kfree(p); 200b286d8b1SHerbert Xu } 201b286d8b1SHerbert Xu 202b286d8b1SHerbert Xu if (!err && walk->iv != walk->oiv) 203b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize); 204b286d8b1SHerbert Xu if (walk->buffer != walk->page) 205b286d8b1SHerbert Xu kfree(walk->buffer); 206b286d8b1SHerbert Xu if (walk->page) 207b286d8b1SHerbert Xu free_page((unsigned long)walk->page); 208b286d8b1SHerbert Xu } 209b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_complete); 210b286d8b1SHerbert Xu 211b286d8b1SHerbert Xu static void skcipher_queue_write(struct skcipher_walk *walk, 212b286d8b1SHerbert Xu struct skcipher_walk_buffer *p) 213b286d8b1SHerbert Xu { 214b286d8b1SHerbert Xu p->dst = walk->out; 215b286d8b1SHerbert Xu list_add_tail(&p->entry, &walk->buffers); 216b286d8b1SHerbert Xu } 217b286d8b1SHerbert Xu 218b286d8b1SHerbert Xu static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) 219b286d8b1SHerbert Xu { 220b286d8b1SHerbert Xu bool phys = walk->flags & SKCIPHER_WALK_PHYS; 221b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask; 222b286d8b1SHerbert Xu struct skcipher_walk_buffer *p; 223b286d8b1SHerbert Xu unsigned a; 224b286d8b1SHerbert Xu unsigned n; 225b286d8b1SHerbert Xu u8 *buffer; 226b286d8b1SHerbert Xu void *v; 227b286d8b1SHerbert Xu 228b286d8b1SHerbert Xu if (!phys) { 229b286d8b1SHerbert Xu buffer = walk->buffer ?: walk->page; 230b286d8b1SHerbert Xu if (buffer) 231b286d8b1SHerbert Xu goto ok; 232b286d8b1SHerbert Xu } 233b286d8b1SHerbert Xu 234b286d8b1SHerbert Xu /* Start with the minimum alignment of kmalloc. */ 235b286d8b1SHerbert Xu a = crypto_tfm_ctx_alignment() - 1; 236b286d8b1SHerbert Xu n = bsize; 237b286d8b1SHerbert Xu 238b286d8b1SHerbert Xu if (phys) { 239b286d8b1SHerbert Xu /* Calculate the minimum alignment of p->buffer. */ 240b286d8b1SHerbert Xu a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; 241b286d8b1SHerbert Xu n += sizeof(*p); 242b286d8b1SHerbert Xu } 243b286d8b1SHerbert Xu 244b286d8b1SHerbert Xu /* Minimum size to align p->buffer by alignmask. */ 245b286d8b1SHerbert Xu n += alignmask & ~a; 246b286d8b1SHerbert Xu 247b286d8b1SHerbert Xu /* Minimum size to ensure p->buffer does not straddle a page. */ 248b286d8b1SHerbert Xu n += (bsize - 1) & ~(alignmask | a); 249b286d8b1SHerbert Xu 250b286d8b1SHerbert Xu v = kzalloc(n, skcipher_walk_gfp(walk)); 251b286d8b1SHerbert Xu if (!v) 252b286d8b1SHerbert Xu return skcipher_walk_done(walk, -ENOMEM); 253b286d8b1SHerbert Xu 254b286d8b1SHerbert Xu if (phys) { 255b286d8b1SHerbert Xu p = v; 256b286d8b1SHerbert Xu p->len = bsize; 257b286d8b1SHerbert Xu skcipher_queue_write(walk, p); 258b286d8b1SHerbert Xu buffer = p->buffer; 259b286d8b1SHerbert Xu } else { 260b286d8b1SHerbert Xu walk->buffer = v; 261b286d8b1SHerbert Xu buffer = v; 262b286d8b1SHerbert Xu } 263b286d8b1SHerbert Xu 264b286d8b1SHerbert Xu ok: 265b286d8b1SHerbert Xu walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); 266b286d8b1SHerbert Xu walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); 267b286d8b1SHerbert Xu walk->src.virt.addr = walk->dst.virt.addr; 268b286d8b1SHerbert Xu 269b286d8b1SHerbert Xu scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); 270b286d8b1SHerbert Xu 271b286d8b1SHerbert Xu walk->nbytes = bsize; 272b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLOW; 273b286d8b1SHerbert Xu 274b286d8b1SHerbert Xu return 0; 275b286d8b1SHerbert Xu } 276b286d8b1SHerbert Xu 277b286d8b1SHerbert Xu static int skcipher_next_copy(struct skcipher_walk *walk) 278b286d8b1SHerbert Xu { 279b286d8b1SHerbert Xu struct skcipher_walk_buffer *p; 280b286d8b1SHerbert Xu u8 *tmp = walk->page; 281b286d8b1SHerbert Xu 282b286d8b1SHerbert Xu skcipher_map_src(walk); 283b286d8b1SHerbert Xu memcpy(tmp, walk->src.virt.addr, walk->nbytes); 284b286d8b1SHerbert Xu skcipher_unmap_src(walk); 285b286d8b1SHerbert Xu 286b286d8b1SHerbert Xu walk->src.virt.addr = tmp; 287b286d8b1SHerbert Xu walk->dst.virt.addr = tmp; 288b286d8b1SHerbert Xu 289b286d8b1SHerbert Xu if (!(walk->flags & SKCIPHER_WALK_PHYS)) 290b286d8b1SHerbert Xu return 0; 291b286d8b1SHerbert Xu 292b286d8b1SHerbert Xu p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); 293b286d8b1SHerbert Xu if (!p) 294b286d8b1SHerbert Xu return -ENOMEM; 295b286d8b1SHerbert Xu 296b286d8b1SHerbert Xu p->data = walk->page; 297b286d8b1SHerbert Xu p->len = walk->nbytes; 298b286d8b1SHerbert Xu skcipher_queue_write(walk, p); 299b286d8b1SHerbert Xu 300b286d8b1SHerbert Xu if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize > 301b286d8b1SHerbert Xu PAGE_SIZE) 302b286d8b1SHerbert Xu walk->page = NULL; 303b286d8b1SHerbert Xu else 304b286d8b1SHerbert Xu walk->page += walk->nbytes; 305b286d8b1SHerbert Xu 306b286d8b1SHerbert Xu return 0; 307b286d8b1SHerbert Xu } 308b286d8b1SHerbert Xu 309b286d8b1SHerbert Xu static int skcipher_next_fast(struct skcipher_walk *walk) 310b286d8b1SHerbert Xu { 311b286d8b1SHerbert Xu unsigned long diff; 312b286d8b1SHerbert Xu 313b286d8b1SHerbert Xu walk->src.phys.page = scatterwalk_page(&walk->in); 314b286d8b1SHerbert Xu walk->src.phys.offset = offset_in_page(walk->in.offset); 315b286d8b1SHerbert Xu walk->dst.phys.page = scatterwalk_page(&walk->out); 316b286d8b1SHerbert Xu walk->dst.phys.offset = offset_in_page(walk->out.offset); 317b286d8b1SHerbert Xu 318b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS) 319b286d8b1SHerbert Xu return 0; 320b286d8b1SHerbert Xu 321b286d8b1SHerbert Xu diff = walk->src.phys.offset - walk->dst.phys.offset; 322b286d8b1SHerbert Xu diff |= walk->src.virt.page - walk->dst.virt.page; 323b286d8b1SHerbert Xu 324b286d8b1SHerbert Xu skcipher_map_src(walk); 325b286d8b1SHerbert Xu walk->dst.virt.addr = walk->src.virt.addr; 326b286d8b1SHerbert Xu 327b286d8b1SHerbert Xu if (diff) { 328b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_DIFF; 329b286d8b1SHerbert Xu skcipher_map_dst(walk); 330b286d8b1SHerbert Xu } 331b286d8b1SHerbert Xu 332b286d8b1SHerbert Xu return 0; 333b286d8b1SHerbert Xu } 334b286d8b1SHerbert Xu 335b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk) 336b286d8b1SHerbert Xu { 337b286d8b1SHerbert Xu unsigned int bsize; 338b286d8b1SHerbert Xu unsigned int n; 339b286d8b1SHerbert Xu int err; 340b286d8b1SHerbert Xu 341b286d8b1SHerbert Xu walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | 342b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF); 343b286d8b1SHerbert Xu 344b286d8b1SHerbert Xu n = walk->total; 345b286d8b1SHerbert Xu bsize = min(walk->chunksize, max(n, walk->blocksize)); 346b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->in, n); 347b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->out, n); 348b286d8b1SHerbert Xu 349b286d8b1SHerbert Xu if (unlikely(n < bsize)) { 350b286d8b1SHerbert Xu if (unlikely(walk->total < walk->blocksize)) 351b286d8b1SHerbert Xu return skcipher_walk_done(walk, -EINVAL); 352b286d8b1SHerbert Xu 353b286d8b1SHerbert Xu slow_path: 354b286d8b1SHerbert Xu err = skcipher_next_slow(walk, bsize); 355b286d8b1SHerbert Xu goto set_phys_lowmem; 356b286d8b1SHerbert Xu } 357b286d8b1SHerbert Xu 358b286d8b1SHerbert Xu if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { 359b286d8b1SHerbert Xu if (!walk->page) { 360b286d8b1SHerbert Xu gfp_t gfp = skcipher_walk_gfp(walk); 361b286d8b1SHerbert Xu 362b286d8b1SHerbert Xu walk->page = (void *)__get_free_page(gfp); 363b286d8b1SHerbert Xu if (!walk->page) 364b286d8b1SHerbert Xu goto slow_path; 365b286d8b1SHerbert Xu } 366b286d8b1SHerbert Xu 367b286d8b1SHerbert Xu walk->nbytes = min_t(unsigned, n, 368b286d8b1SHerbert Xu PAGE_SIZE - offset_in_page(walk->page)); 369b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_COPY; 370b286d8b1SHerbert Xu err = skcipher_next_copy(walk); 371b286d8b1SHerbert Xu goto set_phys_lowmem; 372b286d8b1SHerbert Xu } 373b286d8b1SHerbert Xu 374b286d8b1SHerbert Xu walk->nbytes = n; 375b286d8b1SHerbert Xu 376b286d8b1SHerbert Xu return skcipher_next_fast(walk); 377b286d8b1SHerbert Xu 378b286d8b1SHerbert Xu set_phys_lowmem: 379b286d8b1SHerbert Xu if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { 380b286d8b1SHerbert Xu walk->src.phys.page = virt_to_page(walk->src.virt.addr); 381b286d8b1SHerbert Xu walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); 382b286d8b1SHerbert Xu walk->src.phys.offset &= PAGE_SIZE - 1; 383b286d8b1SHerbert Xu walk->dst.phys.offset &= PAGE_SIZE - 1; 384b286d8b1SHerbert Xu } 385b286d8b1SHerbert Xu return err; 386b286d8b1SHerbert Xu } 387b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_next); 388b286d8b1SHerbert Xu 389b286d8b1SHerbert Xu static int skcipher_copy_iv(struct skcipher_walk *walk) 390b286d8b1SHerbert Xu { 391b286d8b1SHerbert Xu unsigned a = crypto_tfm_ctx_alignment() - 1; 392b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask; 393b286d8b1SHerbert Xu unsigned ivsize = walk->ivsize; 394b286d8b1SHerbert Xu unsigned bs = walk->chunksize; 395b286d8b1SHerbert Xu unsigned aligned_bs; 396b286d8b1SHerbert Xu unsigned size; 397b286d8b1SHerbert Xu u8 *iv; 398b286d8b1SHerbert Xu 399b286d8b1SHerbert Xu aligned_bs = ALIGN(bs, alignmask); 400b286d8b1SHerbert Xu 401b286d8b1SHerbert Xu /* Minimum size to align buffer by alignmask. */ 402b286d8b1SHerbert Xu size = alignmask & ~a; 403b286d8b1SHerbert Xu 404b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS) 405b286d8b1SHerbert Xu size += ivsize; 406b286d8b1SHerbert Xu else { 407b286d8b1SHerbert Xu size += aligned_bs + ivsize; 408b286d8b1SHerbert Xu 409b286d8b1SHerbert Xu /* Minimum size to ensure buffer does not straddle a page. */ 410b286d8b1SHerbert Xu size += (bs - 1) & ~(alignmask | a); 411b286d8b1SHerbert Xu } 412b286d8b1SHerbert Xu 413b286d8b1SHerbert Xu walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); 414b286d8b1SHerbert Xu if (!walk->buffer) 415b286d8b1SHerbert Xu return -ENOMEM; 416b286d8b1SHerbert Xu 417b286d8b1SHerbert Xu iv = PTR_ALIGN(walk->buffer, alignmask + 1); 418b286d8b1SHerbert Xu iv = skcipher_get_spot(iv, bs) + aligned_bs; 419b286d8b1SHerbert Xu 420b286d8b1SHerbert Xu walk->iv = memcpy(iv, walk->iv, walk->ivsize); 421b286d8b1SHerbert Xu return 0; 422b286d8b1SHerbert Xu } 423b286d8b1SHerbert Xu 424b286d8b1SHerbert Xu static int skcipher_walk_first(struct skcipher_walk *walk) 425b286d8b1SHerbert Xu { 426b286d8b1SHerbert Xu walk->nbytes = 0; 427b286d8b1SHerbert Xu 428b286d8b1SHerbert Xu if (WARN_ON_ONCE(in_irq())) 429b286d8b1SHerbert Xu return -EDEADLK; 430b286d8b1SHerbert Xu 431b286d8b1SHerbert Xu if (unlikely(!walk->total)) 432b286d8b1SHerbert Xu return 0; 433b286d8b1SHerbert Xu 434b286d8b1SHerbert Xu walk->buffer = NULL; 435b286d8b1SHerbert Xu if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 436b286d8b1SHerbert Xu int err = skcipher_copy_iv(walk); 437b286d8b1SHerbert Xu if (err) 438b286d8b1SHerbert Xu return err; 439b286d8b1SHerbert Xu } 440b286d8b1SHerbert Xu 441b286d8b1SHerbert Xu walk->page = NULL; 442b286d8b1SHerbert Xu walk->nbytes = walk->total; 443b286d8b1SHerbert Xu 444b286d8b1SHerbert Xu return skcipher_walk_next(walk); 445b286d8b1SHerbert Xu } 446b286d8b1SHerbert Xu 447b286d8b1SHerbert Xu static int skcipher_walk_skcipher(struct skcipher_walk *walk, 448b286d8b1SHerbert Xu struct skcipher_request *req) 449b286d8b1SHerbert Xu { 450b286d8b1SHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 451b286d8b1SHerbert Xu 452b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src); 453b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst); 454b286d8b1SHerbert Xu 455b286d8b1SHerbert Xu walk->total = req->cryptlen; 456b286d8b1SHerbert Xu walk->iv = req->iv; 457b286d8b1SHerbert Xu walk->oiv = req->iv; 458b286d8b1SHerbert Xu 459b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP; 460b286d8b1SHerbert Xu walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 461b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP : 0; 462b286d8b1SHerbert Xu 463b286d8b1SHerbert Xu walk->blocksize = crypto_skcipher_blocksize(tfm); 464b286d8b1SHerbert Xu walk->chunksize = crypto_skcipher_chunksize(tfm); 465b286d8b1SHerbert Xu walk->ivsize = crypto_skcipher_ivsize(tfm); 466b286d8b1SHerbert Xu walk->alignmask = crypto_skcipher_alignmask(tfm); 467b286d8b1SHerbert Xu 468b286d8b1SHerbert Xu return skcipher_walk_first(walk); 469b286d8b1SHerbert Xu } 470b286d8b1SHerbert Xu 471b286d8b1SHerbert Xu int skcipher_walk_virt(struct skcipher_walk *walk, 472b286d8b1SHerbert Xu struct skcipher_request *req, bool atomic) 473b286d8b1SHerbert Xu { 474b286d8b1SHerbert Xu int err; 475b286d8b1SHerbert Xu 476b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_PHYS; 477b286d8b1SHerbert Xu 478b286d8b1SHerbert Xu err = skcipher_walk_skcipher(walk, req); 479b286d8b1SHerbert Xu 480b286d8b1SHerbert Xu walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; 481b286d8b1SHerbert Xu 482b286d8b1SHerbert Xu return err; 483b286d8b1SHerbert Xu } 484b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_virt); 485b286d8b1SHerbert Xu 486b286d8b1SHerbert Xu void skcipher_walk_atomise(struct skcipher_walk *walk) 487b286d8b1SHerbert Xu { 488b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP; 489b286d8b1SHerbert Xu } 490b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_atomise); 491b286d8b1SHerbert Xu 492b286d8b1SHerbert Xu int skcipher_walk_async(struct skcipher_walk *walk, 493b286d8b1SHerbert Xu struct skcipher_request *req) 494b286d8b1SHerbert Xu { 495b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_PHYS; 496b286d8b1SHerbert Xu 497b286d8b1SHerbert Xu INIT_LIST_HEAD(&walk->buffers); 498b286d8b1SHerbert Xu 499b286d8b1SHerbert Xu return skcipher_walk_skcipher(walk, req); 500b286d8b1SHerbert Xu } 501b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_async); 502b286d8b1SHerbert Xu 503*34bc085cSHerbert Xu static int skcipher_walk_aead_common(struct skcipher_walk *walk, 504*34bc085cSHerbert Xu struct aead_request *req, bool atomic) 505b286d8b1SHerbert Xu { 506b286d8b1SHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req); 507b286d8b1SHerbert Xu int err; 508b286d8b1SHerbert Xu 5093cbf61fbSArd Biesheuvel walk->flags &= ~SKCIPHER_WALK_PHYS; 5103cbf61fbSArd Biesheuvel 511b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src); 512b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst); 513b286d8b1SHerbert Xu 514b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); 515b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); 516b286d8b1SHerbert Xu 517b286d8b1SHerbert Xu walk->iv = req->iv; 518b286d8b1SHerbert Xu walk->oiv = req->iv; 519b286d8b1SHerbert Xu 520b286d8b1SHerbert Xu if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) 521b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLEEP; 522b286d8b1SHerbert Xu else 523b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP; 524b286d8b1SHerbert Xu 525b286d8b1SHerbert Xu walk->blocksize = crypto_aead_blocksize(tfm); 526b286d8b1SHerbert Xu walk->chunksize = crypto_aead_chunksize(tfm); 527b286d8b1SHerbert Xu walk->ivsize = crypto_aead_ivsize(tfm); 528b286d8b1SHerbert Xu walk->alignmask = crypto_aead_alignmask(tfm); 529b286d8b1SHerbert Xu 530b286d8b1SHerbert Xu err = skcipher_walk_first(walk); 531b286d8b1SHerbert Xu 532b286d8b1SHerbert Xu if (atomic) 533b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP; 534b286d8b1SHerbert Xu 535b286d8b1SHerbert Xu return err; 536b286d8b1SHerbert Xu } 537*34bc085cSHerbert Xu 538*34bc085cSHerbert Xu int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, 539*34bc085cSHerbert Xu bool atomic) 540*34bc085cSHerbert Xu { 541*34bc085cSHerbert Xu walk->total = req->cryptlen; 542*34bc085cSHerbert Xu 543*34bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic); 544*34bc085cSHerbert Xu } 545b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead); 546b286d8b1SHerbert Xu 547*34bc085cSHerbert Xu int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, 548*34bc085cSHerbert Xu struct aead_request *req, bool atomic) 549*34bc085cSHerbert Xu { 550*34bc085cSHerbert Xu walk->total = req->cryptlen; 551*34bc085cSHerbert Xu 552*34bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic); 553*34bc085cSHerbert Xu } 554*34bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); 555*34bc085cSHerbert Xu 556*34bc085cSHerbert Xu int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, 557*34bc085cSHerbert Xu struct aead_request *req, bool atomic) 558*34bc085cSHerbert Xu { 559*34bc085cSHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req); 560*34bc085cSHerbert Xu 561*34bc085cSHerbert Xu walk->total = req->cryptlen - crypto_aead_authsize(tfm); 562*34bc085cSHerbert Xu 563*34bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic); 564*34bc085cSHerbert Xu } 565*34bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); 566*34bc085cSHerbert Xu 5677a7ffe65SHerbert Xu static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 5687a7ffe65SHerbert Xu { 5697a7ffe65SHerbert Xu if (alg->cra_type == &crypto_blkcipher_type) 5707a7ffe65SHerbert Xu return sizeof(struct crypto_blkcipher *); 5717a7ffe65SHerbert Xu 5724e6c3df4SHerbert Xu if (alg->cra_type == &crypto_ablkcipher_type || 5734e6c3df4SHerbert Xu alg->cra_type == &crypto_givcipher_type) 5747a7ffe65SHerbert Xu return sizeof(struct crypto_ablkcipher *); 5754e6c3df4SHerbert Xu 5764e6c3df4SHerbert Xu return crypto_alg_extsize(alg); 5777a7ffe65SHerbert Xu } 5787a7ffe65SHerbert Xu 5797a7ffe65SHerbert Xu static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, 5807a7ffe65SHerbert Xu const u8 *key, unsigned int keylen) 5817a7ffe65SHerbert Xu { 5827a7ffe65SHerbert Xu struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); 5837a7ffe65SHerbert Xu struct crypto_blkcipher *blkcipher = *ctx; 5847a7ffe65SHerbert Xu int err; 5857a7ffe65SHerbert Xu 5867a7ffe65SHerbert Xu crypto_blkcipher_clear_flags(blkcipher, ~0); 5877a7ffe65SHerbert Xu crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & 5887a7ffe65SHerbert Xu CRYPTO_TFM_REQ_MASK); 5897a7ffe65SHerbert Xu err = crypto_blkcipher_setkey(blkcipher, key, keylen); 5907a7ffe65SHerbert Xu crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & 5917a7ffe65SHerbert Xu CRYPTO_TFM_RES_MASK); 5927a7ffe65SHerbert Xu 5937a7ffe65SHerbert Xu return err; 5947a7ffe65SHerbert Xu } 5957a7ffe65SHerbert Xu 5967a7ffe65SHerbert Xu static int skcipher_crypt_blkcipher(struct skcipher_request *req, 5977a7ffe65SHerbert Xu int (*crypt)(struct blkcipher_desc *, 5987a7ffe65SHerbert Xu struct scatterlist *, 5997a7ffe65SHerbert Xu struct scatterlist *, 6007a7ffe65SHerbert Xu unsigned int)) 6017a7ffe65SHerbert Xu { 6027a7ffe65SHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 6037a7ffe65SHerbert Xu struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); 6047a7ffe65SHerbert Xu struct blkcipher_desc desc = { 6057a7ffe65SHerbert Xu .tfm = *ctx, 6067a7ffe65SHerbert Xu .info = req->iv, 6077a7ffe65SHerbert Xu .flags = req->base.flags, 6087a7ffe65SHerbert Xu }; 6097a7ffe65SHerbert Xu 6107a7ffe65SHerbert Xu 6117a7ffe65SHerbert Xu return crypt(&desc, req->dst, req->src, req->cryptlen); 6127a7ffe65SHerbert Xu } 6137a7ffe65SHerbert Xu 6147a7ffe65SHerbert Xu static int skcipher_encrypt_blkcipher(struct skcipher_request *req) 6157a7ffe65SHerbert Xu { 6167a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 6177a7ffe65SHerbert Xu struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 6187a7ffe65SHerbert Xu struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 6197a7ffe65SHerbert Xu 6207a7ffe65SHerbert Xu return skcipher_crypt_blkcipher(req, alg->encrypt); 6217a7ffe65SHerbert Xu } 6227a7ffe65SHerbert Xu 6237a7ffe65SHerbert Xu static int skcipher_decrypt_blkcipher(struct skcipher_request *req) 6247a7ffe65SHerbert Xu { 6257a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 6267a7ffe65SHerbert Xu struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 6277a7ffe65SHerbert Xu struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 6287a7ffe65SHerbert Xu 6297a7ffe65SHerbert Xu return skcipher_crypt_blkcipher(req, alg->decrypt); 6307a7ffe65SHerbert Xu } 6317a7ffe65SHerbert Xu 6327a7ffe65SHerbert Xu static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) 6337a7ffe65SHerbert Xu { 6347a7ffe65SHerbert Xu struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); 6357a7ffe65SHerbert Xu 6367a7ffe65SHerbert Xu crypto_free_blkcipher(*ctx); 6377a7ffe65SHerbert Xu } 6387a7ffe65SHerbert Xu 639ecdd6bedSGeliang Tang static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) 6407a7ffe65SHerbert Xu { 6417a7ffe65SHerbert Xu struct crypto_alg *calg = tfm->__crt_alg; 6427a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 6437a7ffe65SHerbert Xu struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); 6447a7ffe65SHerbert Xu struct crypto_blkcipher *blkcipher; 6457a7ffe65SHerbert Xu struct crypto_tfm *btfm; 6467a7ffe65SHerbert Xu 6477a7ffe65SHerbert Xu if (!crypto_mod_get(calg)) 6487a7ffe65SHerbert Xu return -EAGAIN; 6497a7ffe65SHerbert Xu 6507a7ffe65SHerbert Xu btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, 6517a7ffe65SHerbert Xu CRYPTO_ALG_TYPE_MASK); 6527a7ffe65SHerbert Xu if (IS_ERR(btfm)) { 6537a7ffe65SHerbert Xu crypto_mod_put(calg); 6547a7ffe65SHerbert Xu return PTR_ERR(btfm); 6557a7ffe65SHerbert Xu } 6567a7ffe65SHerbert Xu 6577a7ffe65SHerbert Xu blkcipher = __crypto_blkcipher_cast(btfm); 6587a7ffe65SHerbert Xu *ctx = blkcipher; 6597a7ffe65SHerbert Xu tfm->exit = crypto_exit_skcipher_ops_blkcipher; 6607a7ffe65SHerbert Xu 6617a7ffe65SHerbert Xu skcipher->setkey = skcipher_setkey_blkcipher; 6627a7ffe65SHerbert Xu skcipher->encrypt = skcipher_encrypt_blkcipher; 6637a7ffe65SHerbert Xu skcipher->decrypt = skcipher_decrypt_blkcipher; 6647a7ffe65SHerbert Xu 6657a7ffe65SHerbert Xu skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); 666973fb3fbSHerbert Xu skcipher->keysize = calg->cra_blkcipher.max_keysize; 6677a7ffe65SHerbert Xu 6687a7ffe65SHerbert Xu return 0; 6697a7ffe65SHerbert Xu } 6707a7ffe65SHerbert Xu 6717a7ffe65SHerbert Xu static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, 6727a7ffe65SHerbert Xu const u8 *key, unsigned int keylen) 6737a7ffe65SHerbert Xu { 6747a7ffe65SHerbert Xu struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); 6757a7ffe65SHerbert Xu struct crypto_ablkcipher *ablkcipher = *ctx; 6767a7ffe65SHerbert Xu int err; 6777a7ffe65SHerbert Xu 6787a7ffe65SHerbert Xu crypto_ablkcipher_clear_flags(ablkcipher, ~0); 6797a7ffe65SHerbert Xu crypto_ablkcipher_set_flags(ablkcipher, 6807a7ffe65SHerbert Xu crypto_skcipher_get_flags(tfm) & 6817a7ffe65SHerbert Xu CRYPTO_TFM_REQ_MASK); 6827a7ffe65SHerbert Xu err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); 6837a7ffe65SHerbert Xu crypto_skcipher_set_flags(tfm, 6847a7ffe65SHerbert Xu crypto_ablkcipher_get_flags(ablkcipher) & 6857a7ffe65SHerbert Xu CRYPTO_TFM_RES_MASK); 6867a7ffe65SHerbert Xu 6877a7ffe65SHerbert Xu return err; 6887a7ffe65SHerbert Xu } 6897a7ffe65SHerbert Xu 6907a7ffe65SHerbert Xu static int skcipher_crypt_ablkcipher(struct skcipher_request *req, 6917a7ffe65SHerbert Xu int (*crypt)(struct ablkcipher_request *)) 6927a7ffe65SHerbert Xu { 6937a7ffe65SHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 6947a7ffe65SHerbert Xu struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); 6957a7ffe65SHerbert Xu struct ablkcipher_request *subreq = skcipher_request_ctx(req); 6967a7ffe65SHerbert Xu 6977a7ffe65SHerbert Xu ablkcipher_request_set_tfm(subreq, *ctx); 6987a7ffe65SHerbert Xu ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), 6997a7ffe65SHerbert Xu req->base.complete, req->base.data); 7007a7ffe65SHerbert Xu ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 7017a7ffe65SHerbert Xu req->iv); 7027a7ffe65SHerbert Xu 7037a7ffe65SHerbert Xu return crypt(subreq); 7047a7ffe65SHerbert Xu } 7057a7ffe65SHerbert Xu 7067a7ffe65SHerbert Xu static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) 7077a7ffe65SHerbert Xu { 7087a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 7097a7ffe65SHerbert Xu struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 7107a7ffe65SHerbert Xu struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 7117a7ffe65SHerbert Xu 7127a7ffe65SHerbert Xu return skcipher_crypt_ablkcipher(req, alg->encrypt); 7137a7ffe65SHerbert Xu } 7147a7ffe65SHerbert Xu 7157a7ffe65SHerbert Xu static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) 7167a7ffe65SHerbert Xu { 7177a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 7187a7ffe65SHerbert Xu struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 7197a7ffe65SHerbert Xu struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 7207a7ffe65SHerbert Xu 7217a7ffe65SHerbert Xu return skcipher_crypt_ablkcipher(req, alg->decrypt); 7227a7ffe65SHerbert Xu } 7237a7ffe65SHerbert Xu 7247a7ffe65SHerbert Xu static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) 7257a7ffe65SHerbert Xu { 7267a7ffe65SHerbert Xu struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); 7277a7ffe65SHerbert Xu 7287a7ffe65SHerbert Xu crypto_free_ablkcipher(*ctx); 7297a7ffe65SHerbert Xu } 7307a7ffe65SHerbert Xu 731ecdd6bedSGeliang Tang static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) 7327a7ffe65SHerbert Xu { 7337a7ffe65SHerbert Xu struct crypto_alg *calg = tfm->__crt_alg; 7347a7ffe65SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 7357a7ffe65SHerbert Xu struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); 7367a7ffe65SHerbert Xu struct crypto_ablkcipher *ablkcipher; 7377a7ffe65SHerbert Xu struct crypto_tfm *abtfm; 7387a7ffe65SHerbert Xu 7397a7ffe65SHerbert Xu if (!crypto_mod_get(calg)) 7407a7ffe65SHerbert Xu return -EAGAIN; 7417a7ffe65SHerbert Xu 7427a7ffe65SHerbert Xu abtfm = __crypto_alloc_tfm(calg, 0, 0); 7437a7ffe65SHerbert Xu if (IS_ERR(abtfm)) { 7447a7ffe65SHerbert Xu crypto_mod_put(calg); 7457a7ffe65SHerbert Xu return PTR_ERR(abtfm); 7467a7ffe65SHerbert Xu } 7477a7ffe65SHerbert Xu 7487a7ffe65SHerbert Xu ablkcipher = __crypto_ablkcipher_cast(abtfm); 7497a7ffe65SHerbert Xu *ctx = ablkcipher; 7507a7ffe65SHerbert Xu tfm->exit = crypto_exit_skcipher_ops_ablkcipher; 7517a7ffe65SHerbert Xu 7527a7ffe65SHerbert Xu skcipher->setkey = skcipher_setkey_ablkcipher; 7537a7ffe65SHerbert Xu skcipher->encrypt = skcipher_encrypt_ablkcipher; 7547a7ffe65SHerbert Xu skcipher->decrypt = skcipher_decrypt_ablkcipher; 7557a7ffe65SHerbert Xu 7567a7ffe65SHerbert Xu skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); 7577a7ffe65SHerbert Xu skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + 7587a7ffe65SHerbert Xu sizeof(struct ablkcipher_request); 759973fb3fbSHerbert Xu skcipher->keysize = calg->cra_ablkcipher.max_keysize; 7607a7ffe65SHerbert Xu 7617a7ffe65SHerbert Xu return 0; 7627a7ffe65SHerbert Xu } 7637a7ffe65SHerbert Xu 7644e6c3df4SHerbert Xu static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 7654e6c3df4SHerbert Xu { 7664e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 7674e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 7684e6c3df4SHerbert Xu 7694e6c3df4SHerbert Xu alg->exit(skcipher); 7704e6c3df4SHerbert Xu } 7714e6c3df4SHerbert Xu 7727a7ffe65SHerbert Xu static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 7737a7ffe65SHerbert Xu { 7744e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 7754e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 7764e6c3df4SHerbert Xu 7777a7ffe65SHerbert Xu if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) 7787a7ffe65SHerbert Xu return crypto_init_skcipher_ops_blkcipher(tfm); 7797a7ffe65SHerbert Xu 7804e6c3df4SHerbert Xu if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || 7814e6c3df4SHerbert Xu tfm->__crt_alg->cra_type == &crypto_givcipher_type) 7827a7ffe65SHerbert Xu return crypto_init_skcipher_ops_ablkcipher(tfm); 7834e6c3df4SHerbert Xu 7844e6c3df4SHerbert Xu skcipher->setkey = alg->setkey; 7854e6c3df4SHerbert Xu skcipher->encrypt = alg->encrypt; 7864e6c3df4SHerbert Xu skcipher->decrypt = alg->decrypt; 7874e6c3df4SHerbert Xu skcipher->ivsize = alg->ivsize; 7884e6c3df4SHerbert Xu skcipher->keysize = alg->max_keysize; 7894e6c3df4SHerbert Xu 7904e6c3df4SHerbert Xu if (alg->exit) 7914e6c3df4SHerbert Xu skcipher->base.exit = crypto_skcipher_exit_tfm; 7924e6c3df4SHerbert Xu 7934e6c3df4SHerbert Xu if (alg->init) 7944e6c3df4SHerbert Xu return alg->init(skcipher); 7954e6c3df4SHerbert Xu 7964e6c3df4SHerbert Xu return 0; 7977a7ffe65SHerbert Xu } 7987a7ffe65SHerbert Xu 7994e6c3df4SHerbert Xu static void crypto_skcipher_free_instance(struct crypto_instance *inst) 8004e6c3df4SHerbert Xu { 8014e6c3df4SHerbert Xu struct skcipher_instance *skcipher = 8024e6c3df4SHerbert Xu container_of(inst, struct skcipher_instance, s.base); 8034e6c3df4SHerbert Xu 8044e6c3df4SHerbert Xu skcipher->free(skcipher); 8054e6c3df4SHerbert Xu } 8064e6c3df4SHerbert Xu 8074e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 8084e6c3df4SHerbert Xu __attribute__ ((unused)); 8094e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 8104e6c3df4SHerbert Xu { 8114e6c3df4SHerbert Xu struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, 8124e6c3df4SHerbert Xu base); 8134e6c3df4SHerbert Xu 8144e6c3df4SHerbert Xu seq_printf(m, "type : skcipher\n"); 8154e6c3df4SHerbert Xu seq_printf(m, "async : %s\n", 8164e6c3df4SHerbert Xu alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); 8174e6c3df4SHerbert Xu seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 8184e6c3df4SHerbert Xu seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); 8194e6c3df4SHerbert Xu seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); 8204e6c3df4SHerbert Xu seq_printf(m, "ivsize : %u\n", skcipher->ivsize); 8214e6c3df4SHerbert Xu seq_printf(m, "chunksize : %u\n", skcipher->chunksize); 8224e6c3df4SHerbert Xu } 8234e6c3df4SHerbert Xu 8244e6c3df4SHerbert Xu #ifdef CONFIG_NET 8254e6c3df4SHerbert Xu static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 8264e6c3df4SHerbert Xu { 8274e6c3df4SHerbert Xu struct crypto_report_blkcipher rblkcipher; 8284e6c3df4SHerbert Xu struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, 8294e6c3df4SHerbert Xu base); 8304e6c3df4SHerbert Xu 8314e6c3df4SHerbert Xu strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); 8324e6c3df4SHerbert Xu strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); 8334e6c3df4SHerbert Xu 8344e6c3df4SHerbert Xu rblkcipher.blocksize = alg->cra_blocksize; 8354e6c3df4SHerbert Xu rblkcipher.min_keysize = skcipher->min_keysize; 8364e6c3df4SHerbert Xu rblkcipher.max_keysize = skcipher->max_keysize; 8374e6c3df4SHerbert Xu rblkcipher.ivsize = skcipher->ivsize; 8384e6c3df4SHerbert Xu 8394e6c3df4SHerbert Xu if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 8404e6c3df4SHerbert Xu sizeof(struct crypto_report_blkcipher), &rblkcipher)) 8414e6c3df4SHerbert Xu goto nla_put_failure; 8424e6c3df4SHerbert Xu return 0; 8434e6c3df4SHerbert Xu 8444e6c3df4SHerbert Xu nla_put_failure: 8454e6c3df4SHerbert Xu return -EMSGSIZE; 8464e6c3df4SHerbert Xu } 8474e6c3df4SHerbert Xu #else 8484e6c3df4SHerbert Xu static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 8494e6c3df4SHerbert Xu { 8504e6c3df4SHerbert Xu return -ENOSYS; 8514e6c3df4SHerbert Xu } 8524e6c3df4SHerbert Xu #endif 8534e6c3df4SHerbert Xu 8547a7ffe65SHerbert Xu static const struct crypto_type crypto_skcipher_type2 = { 8557a7ffe65SHerbert Xu .extsize = crypto_skcipher_extsize, 8567a7ffe65SHerbert Xu .init_tfm = crypto_skcipher_init_tfm, 8574e6c3df4SHerbert Xu .free = crypto_skcipher_free_instance, 8584e6c3df4SHerbert Xu #ifdef CONFIG_PROC_FS 8594e6c3df4SHerbert Xu .show = crypto_skcipher_show, 8604e6c3df4SHerbert Xu #endif 8614e6c3df4SHerbert Xu .report = crypto_skcipher_report, 8627a7ffe65SHerbert Xu .maskclear = ~CRYPTO_ALG_TYPE_MASK, 8637a7ffe65SHerbert Xu .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, 8644e6c3df4SHerbert Xu .type = CRYPTO_ALG_TYPE_SKCIPHER, 8657a7ffe65SHerbert Xu .tfmsize = offsetof(struct crypto_skcipher, base), 8667a7ffe65SHerbert Xu }; 8677a7ffe65SHerbert Xu 8683a01d0eeSHerbert Xu int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, 8694e6c3df4SHerbert Xu const char *name, u32 type, u32 mask) 8704e6c3df4SHerbert Xu { 8714e6c3df4SHerbert Xu spawn->base.frontend = &crypto_skcipher_type2; 8724e6c3df4SHerbert Xu return crypto_grab_spawn(&spawn->base, name, type, mask); 8734e6c3df4SHerbert Xu } 8743a01d0eeSHerbert Xu EXPORT_SYMBOL_GPL(crypto_grab_skcipher); 8754e6c3df4SHerbert Xu 8767a7ffe65SHerbert Xu struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 8777a7ffe65SHerbert Xu u32 type, u32 mask) 8787a7ffe65SHerbert Xu { 8797a7ffe65SHerbert Xu return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); 8807a7ffe65SHerbert Xu } 8817a7ffe65SHerbert Xu EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 8827a7ffe65SHerbert Xu 8834e6c3df4SHerbert Xu int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) 8844e6c3df4SHerbert Xu { 8854e6c3df4SHerbert Xu return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, 8864e6c3df4SHerbert Xu type, mask); 8874e6c3df4SHerbert Xu } 8884e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_has_skcipher2); 8894e6c3df4SHerbert Xu 8904e6c3df4SHerbert Xu static int skcipher_prepare_alg(struct skcipher_alg *alg) 8914e6c3df4SHerbert Xu { 8924e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base; 8934e6c3df4SHerbert Xu 8944e6c3df4SHerbert Xu if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) 8954e6c3df4SHerbert Xu return -EINVAL; 8964e6c3df4SHerbert Xu 8974e6c3df4SHerbert Xu if (!alg->chunksize) 8984e6c3df4SHerbert Xu alg->chunksize = base->cra_blocksize; 8994e6c3df4SHerbert Xu 9004e6c3df4SHerbert Xu base->cra_type = &crypto_skcipher_type2; 9014e6c3df4SHerbert Xu base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 9024e6c3df4SHerbert Xu base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; 9034e6c3df4SHerbert Xu 9044e6c3df4SHerbert Xu return 0; 9054e6c3df4SHerbert Xu } 9064e6c3df4SHerbert Xu 9074e6c3df4SHerbert Xu int crypto_register_skcipher(struct skcipher_alg *alg) 9084e6c3df4SHerbert Xu { 9094e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base; 9104e6c3df4SHerbert Xu int err; 9114e6c3df4SHerbert Xu 9124e6c3df4SHerbert Xu err = skcipher_prepare_alg(alg); 9134e6c3df4SHerbert Xu if (err) 9144e6c3df4SHerbert Xu return err; 9154e6c3df4SHerbert Xu 9164e6c3df4SHerbert Xu return crypto_register_alg(base); 9174e6c3df4SHerbert Xu } 9184e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skcipher); 9194e6c3df4SHerbert Xu 9204e6c3df4SHerbert Xu void crypto_unregister_skcipher(struct skcipher_alg *alg) 9214e6c3df4SHerbert Xu { 9224e6c3df4SHerbert Xu crypto_unregister_alg(&alg->base); 9234e6c3df4SHerbert Xu } 9244e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); 9254e6c3df4SHerbert Xu 9264e6c3df4SHerbert Xu int crypto_register_skciphers(struct skcipher_alg *algs, int count) 9274e6c3df4SHerbert Xu { 9284e6c3df4SHerbert Xu int i, ret; 9294e6c3df4SHerbert Xu 9304e6c3df4SHerbert Xu for (i = 0; i < count; i++) { 9314e6c3df4SHerbert Xu ret = crypto_register_skcipher(&algs[i]); 9324e6c3df4SHerbert Xu if (ret) 9334e6c3df4SHerbert Xu goto err; 9344e6c3df4SHerbert Xu } 9354e6c3df4SHerbert Xu 9364e6c3df4SHerbert Xu return 0; 9374e6c3df4SHerbert Xu 9384e6c3df4SHerbert Xu err: 9394e6c3df4SHerbert Xu for (--i; i >= 0; --i) 9404e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]); 9414e6c3df4SHerbert Xu 9424e6c3df4SHerbert Xu return ret; 9434e6c3df4SHerbert Xu } 9444e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skciphers); 9454e6c3df4SHerbert Xu 9464e6c3df4SHerbert Xu void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) 9474e6c3df4SHerbert Xu { 9484e6c3df4SHerbert Xu int i; 9494e6c3df4SHerbert Xu 9504e6c3df4SHerbert Xu for (i = count - 1; i >= 0; --i) 9514e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]); 9524e6c3df4SHerbert Xu } 9534e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); 9544e6c3df4SHerbert Xu 9554e6c3df4SHerbert Xu int skcipher_register_instance(struct crypto_template *tmpl, 9564e6c3df4SHerbert Xu struct skcipher_instance *inst) 9574e6c3df4SHerbert Xu { 9584e6c3df4SHerbert Xu int err; 9594e6c3df4SHerbert Xu 9604e6c3df4SHerbert Xu err = skcipher_prepare_alg(&inst->alg); 9614e6c3df4SHerbert Xu if (err) 9624e6c3df4SHerbert Xu return err; 9634e6c3df4SHerbert Xu 9644e6c3df4SHerbert Xu return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); 9654e6c3df4SHerbert Xu } 9664e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_register_instance); 9674e6c3df4SHerbert Xu 9687a7ffe65SHerbert Xu MODULE_LICENSE("GPL"); 9697a7ffe65SHerbert Xu MODULE_DESCRIPTION("Symmetric key cipher type"); 970