1 /* LRW: as defined by Cyril Guyot in 2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 3 * 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 * 6 * Based on ecb.c 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 */ 14 /* This implementation is checked against the test vectors in the above 15 * document and by a test vector provided by Ken Buchanan at 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 * 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 20 #include <crypto/internal/skcipher.h> 21 #include <crypto/scatterwalk.h> 22 #include <linux/err.h> 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/scatterlist.h> 27 #include <linux/slab.h> 28 29 #include <crypto/b128ops.h> 30 #include <crypto/gf128mul.h> 31 32 #define LRW_BLOCK_SIZE 16 33 34 struct priv { 35 struct crypto_skcipher *child; 36 37 /* 38 * optimizes multiplying a random (non incrementing, as at the 39 * start of a new sector) value with key2, we could also have 40 * used 4k optimization tables or no optimization at all. In the 41 * latter case we would have to store key2 here 42 */ 43 struct gf128mul_64k *table; 44 45 /* 46 * stores: 47 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, 48 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } 49 * key2*{ 0,0,...1,1,1,1,1 }, etc 50 * needed for optimized multiplication of incrementing values 51 * with key2 52 */ 53 be128 mulinc[128]; 54 }; 55 56 struct rctx { 57 be128 t; 58 struct skcipher_request subreq; 59 }; 60 61 static inline void setbit128_bbe(void *b, int bit) 62 { 63 __set_bit(bit ^ (0x80 - 64 #ifdef __BIG_ENDIAN 65 BITS_PER_LONG 66 #else 67 BITS_PER_BYTE 68 #endif 69 ), b); 70 } 71 72 static int setkey(struct crypto_skcipher *parent, const u8 *key, 73 unsigned int keylen) 74 { 75 struct priv *ctx = crypto_skcipher_ctx(parent); 76 struct crypto_skcipher *child = ctx->child; 77 int err, bsize = LRW_BLOCK_SIZE; 78 const u8 *tweak = key + keylen - bsize; 79 be128 tmp = { 0 }; 80 int i; 81 82 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 83 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 84 CRYPTO_TFM_REQ_MASK); 85 err = crypto_skcipher_setkey(child, key, keylen - bsize); 86 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 87 CRYPTO_TFM_RES_MASK); 88 if (err) 89 return err; 90 91 if (ctx->table) 92 gf128mul_free_64k(ctx->table); 93 94 /* initialize multiplication table for Key2 */ 95 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 96 if (!ctx->table) 97 return -ENOMEM; 98 99 /* initialize optimization table */ 100 for (i = 0; i < 128; i++) { 101 setbit128_bbe(&tmp, i); 102 ctx->mulinc[i] = tmp; 103 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 104 } 105 106 return 0; 107 } 108 109 /* 110 * Returns the number of trailing '1' bits in the words of the counter, which is 111 * represented by 4 32-bit words, arranged from least to most significant. 112 * At the same time, increments the counter by one. 113 * 114 * For example: 115 * 116 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; 117 * int i = next_index(&counter); 118 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } 119 */ 120 static int next_index(u32 *counter) 121 { 122 int i, res = 0; 123 124 for (i = 0; i < 4; i++) { 125 if (counter[i] + 1 != 0) { 126 res += ffz(counter[i]++); 127 break; 128 } 129 counter[i] = 0; 130 res += 32; 131 } 132 133 /* 134 * If we get here, then x == 128 and we are incrementing the counter 135 * from all ones to all zeros. This means we must return index 127, i.e. 136 * the one corresponding to key2*{ 1,...,1 }. 137 */ 138 return 127; 139 } 140 141 /* 142 * We compute the tweak masks twice (both before and after the ECB encryption or 143 * decryption) to avoid having to allocate a temporary buffer and/or make 144 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than 145 * just doing the next_index() calls again. 146 */ 147 static int xor_tweak(struct skcipher_request *req, bool second_pass) 148 { 149 const int bs = LRW_BLOCK_SIZE; 150 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 151 struct priv *ctx = crypto_skcipher_ctx(tfm); 152 struct rctx *rctx = skcipher_request_ctx(req); 153 be128 t = rctx->t; 154 struct skcipher_walk w; 155 __be32 *iv; 156 u32 counter[4]; 157 int err; 158 159 if (second_pass) { 160 req = &rctx->subreq; 161 /* set to our TFM to enforce correct alignment: */ 162 skcipher_request_set_tfm(req, tfm); 163 } 164 165 err = skcipher_walk_virt(&w, req, false); 166 iv = (__be32 *)w.iv; 167 168 counter[0] = be32_to_cpu(iv[3]); 169 counter[1] = be32_to_cpu(iv[2]); 170 counter[2] = be32_to_cpu(iv[1]); 171 counter[3] = be32_to_cpu(iv[0]); 172 173 while (w.nbytes) { 174 unsigned int avail = w.nbytes; 175 be128 *wsrc; 176 be128 *wdst; 177 178 wsrc = w.src.virt.addr; 179 wdst = w.dst.virt.addr; 180 181 do { 182 be128_xor(wdst++, &t, wsrc++); 183 184 /* T <- I*Key2, using the optimization 185 * discussed in the specification */ 186 be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); 187 } while ((avail -= bs) >= bs); 188 189 if (second_pass && w.nbytes == w.total) { 190 iv[0] = cpu_to_be32(counter[3]); 191 iv[1] = cpu_to_be32(counter[2]); 192 iv[2] = cpu_to_be32(counter[1]); 193 iv[3] = cpu_to_be32(counter[0]); 194 } 195 196 err = skcipher_walk_done(&w, avail); 197 } 198 199 return err; 200 } 201 202 static int xor_tweak_pre(struct skcipher_request *req) 203 { 204 return xor_tweak(req, false); 205 } 206 207 static int xor_tweak_post(struct skcipher_request *req) 208 { 209 return xor_tweak(req, true); 210 } 211 212 static void crypt_done(struct crypto_async_request *areq, int err) 213 { 214 struct skcipher_request *req = areq->data; 215 216 if (!err) 217 err = xor_tweak_post(req); 218 219 skcipher_request_complete(req, err); 220 } 221 222 static void init_crypt(struct skcipher_request *req) 223 { 224 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 225 struct rctx *rctx = skcipher_request_ctx(req); 226 struct skcipher_request *subreq = &rctx->subreq; 227 228 skcipher_request_set_tfm(subreq, ctx->child); 229 skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); 230 /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ 231 skcipher_request_set_crypt(subreq, req->dst, req->dst, 232 req->cryptlen, req->iv); 233 234 /* calculate first value of T */ 235 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 236 237 /* T <- I*Key2 */ 238 gf128mul_64k_bbe(&rctx->t, ctx->table); 239 } 240 241 static int encrypt(struct skcipher_request *req) 242 { 243 struct rctx *rctx = skcipher_request_ctx(req); 244 struct skcipher_request *subreq = &rctx->subreq; 245 246 init_crypt(req); 247 return xor_tweak_pre(req) ?: 248 crypto_skcipher_encrypt(subreq) ?: 249 xor_tweak_post(req); 250 } 251 252 static int decrypt(struct skcipher_request *req) 253 { 254 struct rctx *rctx = skcipher_request_ctx(req); 255 struct skcipher_request *subreq = &rctx->subreq; 256 257 init_crypt(req); 258 return xor_tweak_pre(req) ?: 259 crypto_skcipher_decrypt(subreq) ?: 260 xor_tweak_post(req); 261 } 262 263 static int init_tfm(struct crypto_skcipher *tfm) 264 { 265 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 266 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 267 struct priv *ctx = crypto_skcipher_ctx(tfm); 268 struct crypto_skcipher *cipher; 269 270 cipher = crypto_spawn_skcipher(spawn); 271 if (IS_ERR(cipher)) 272 return PTR_ERR(cipher); 273 274 ctx->child = cipher; 275 276 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 277 sizeof(struct rctx)); 278 279 return 0; 280 } 281 282 static void exit_tfm(struct crypto_skcipher *tfm) 283 { 284 struct priv *ctx = crypto_skcipher_ctx(tfm); 285 286 if (ctx->table) 287 gf128mul_free_64k(ctx->table); 288 crypto_free_skcipher(ctx->child); 289 } 290 291 static void free(struct skcipher_instance *inst) 292 { 293 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 294 kfree(inst); 295 } 296 297 static int create(struct crypto_template *tmpl, struct rtattr **tb) 298 { 299 struct crypto_skcipher_spawn *spawn; 300 struct skcipher_instance *inst; 301 struct crypto_attr_type *algt; 302 struct skcipher_alg *alg; 303 const char *cipher_name; 304 char ecb_name[CRYPTO_MAX_ALG_NAME]; 305 int err; 306 307 algt = crypto_get_attr_type(tb); 308 if (IS_ERR(algt)) 309 return PTR_ERR(algt); 310 311 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 312 return -EINVAL; 313 314 cipher_name = crypto_attr_alg_name(tb[1]); 315 if (IS_ERR(cipher_name)) 316 return PTR_ERR(cipher_name); 317 318 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 319 if (!inst) 320 return -ENOMEM; 321 322 spawn = skcipher_instance_ctx(inst); 323 324 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 325 err = crypto_grab_skcipher(spawn, cipher_name, 0, 326 crypto_requires_sync(algt->type, 327 algt->mask)); 328 if (err == -ENOENT) { 329 err = -ENAMETOOLONG; 330 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 331 cipher_name) >= CRYPTO_MAX_ALG_NAME) 332 goto err_free_inst; 333 334 err = crypto_grab_skcipher(spawn, ecb_name, 0, 335 crypto_requires_sync(algt->type, 336 algt->mask)); 337 } 338 339 if (err) 340 goto err_free_inst; 341 342 alg = crypto_skcipher_spawn_alg(spawn); 343 344 err = -EINVAL; 345 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 346 goto err_drop_spawn; 347 348 if (crypto_skcipher_alg_ivsize(alg)) 349 goto err_drop_spawn; 350 351 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 352 &alg->base); 353 if (err) 354 goto err_drop_spawn; 355 356 err = -EINVAL; 357 cipher_name = alg->base.cra_name; 358 359 /* Alas we screwed up the naming so we have to mangle the 360 * cipher name. 361 */ 362 if (!strncmp(cipher_name, "ecb(", 4)) { 363 unsigned len; 364 365 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 366 if (len < 2 || len >= sizeof(ecb_name)) 367 goto err_drop_spawn; 368 369 if (ecb_name[len - 1] != ')') 370 goto err_drop_spawn; 371 372 ecb_name[len - 1] = 0; 373 374 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 375 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { 376 err = -ENAMETOOLONG; 377 goto err_drop_spawn; 378 } 379 } else 380 goto err_drop_spawn; 381 382 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 383 inst->alg.base.cra_priority = alg->base.cra_priority; 384 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 385 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 386 (__alignof__(__be32) - 1); 387 388 inst->alg.ivsize = LRW_BLOCK_SIZE; 389 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 390 LRW_BLOCK_SIZE; 391 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 392 LRW_BLOCK_SIZE; 393 394 inst->alg.base.cra_ctxsize = sizeof(struct priv); 395 396 inst->alg.init = init_tfm; 397 inst->alg.exit = exit_tfm; 398 399 inst->alg.setkey = setkey; 400 inst->alg.encrypt = encrypt; 401 inst->alg.decrypt = decrypt; 402 403 inst->free = free; 404 405 err = skcipher_register_instance(tmpl, inst); 406 if (err) 407 goto err_drop_spawn; 408 409 out: 410 return err; 411 412 err_drop_spawn: 413 crypto_drop_skcipher(spawn); 414 err_free_inst: 415 kfree(inst); 416 goto out; 417 } 418 419 static struct crypto_template crypto_tmpl = { 420 .name = "lrw", 421 .create = create, 422 .module = THIS_MODULE, 423 }; 424 425 static int __init crypto_module_init(void) 426 { 427 return crypto_register_template(&crypto_tmpl); 428 } 429 430 static void __exit crypto_module_exit(void) 431 { 432 crypto_unregister_template(&crypto_tmpl); 433 } 434 435 module_init(crypto_module_init); 436 module_exit(crypto_module_exit); 437 438 MODULE_LICENSE("GPL"); 439 MODULE_DESCRIPTION("LRW block cipher mode"); 440 MODULE_ALIAS_CRYPTO("lrw"); 441