1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #ifndef _KERNEL
28 #include <strings.h>
29 #include <limits.h>
30 #include <assert.h>
31 #include <security/cryptoki.h>
32 #endif
33
34 #include <sys/debug.h>
35 #include <sys/types.h>
36 #include <modes/modes.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <aes/aes_impl.h>
40
41 /* These are the CMAC Rb constants from NIST SP 800-38B */
42 #define CONST_RB_128 0x87
43 #define CONST_RB_64 0x1B
44
45 /*
46 * Algorithm independent CBC functions.
47 */
48 int
cbc_encrypt_contiguous_blocks(cbc_ctx_t * ctx,char * data,size_t length,crypto_data_t * out,size_t block_size,int (* encrypt)(const void *,const uint8_t *,uint8_t *),void (* copy_block)(uint8_t *,uint8_t *),void (* xor_block)(uint8_t *,uint8_t *))49 cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
50 crypto_data_t *out, size_t block_size,
51 int (*encrypt)(const void *, const uint8_t *, uint8_t *),
52 void (*copy_block)(uint8_t *, uint8_t *),
53 void (*xor_block)(uint8_t *, uint8_t *))
54 {
55 size_t remainder = length;
56 size_t need;
57 uint8_t *datap = (uint8_t *)data;
58 uint8_t *blockp;
59 uint8_t *lastp;
60 void *iov_or_mp;
61 offset_t offset;
62 uint8_t *out_data_1;
63 uint8_t *out_data_2;
64 size_t out_data_1_len;
65
66 if (length + ctx->cbc_remainder_len < ctx->max_remain) {
67 /* accumulate bytes here and return */
68 bcopy(datap,
69 (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
70 length);
71 ctx->cbc_remainder_len += length;
72 ctx->cbc_copy_to = datap;
73 return (CRYPTO_SUCCESS);
74 }
75
76 lastp = (uint8_t *)ctx->cbc_iv;
77 if (out != NULL)
78 crypto_init_ptrs(out, &iov_or_mp, &offset);
79
80 do {
81 /* Unprocessed data from last call. */
82 if (ctx->cbc_remainder_len > 0) {
83 need = block_size - ctx->cbc_remainder_len;
84
85 if (need > remainder)
86 return (CRYPTO_DATA_LEN_RANGE);
87
88 bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
89 [ctx->cbc_remainder_len], need);
90
91 blockp = (uint8_t *)ctx->cbc_remainder;
92 } else {
93 blockp = datap;
94 }
95
96 if (out == NULL) {
97 /*
98 * XOR the previous cipher block or IV with the
99 * current clear block.
100 */
101 xor_block(lastp, blockp);
102 encrypt(ctx->cbc_keysched, blockp, blockp);
103
104 ctx->cbc_lastp = blockp;
105 lastp = blockp;
106
107 if ((ctx->cbc_flags & CMAC_MODE) == 0 &&
108 ctx->cbc_remainder_len > 0) {
109 bcopy(blockp, ctx->cbc_copy_to,
110 ctx->cbc_remainder_len);
111 bcopy(blockp + ctx->cbc_remainder_len, datap,
112 need);
113 }
114 } else {
115 /*
116 * XOR the previous cipher block or IV with the
117 * current clear block.
118 */
119 xor_block(blockp, lastp);
120 encrypt(ctx->cbc_keysched, lastp, lastp);
121
122 /*
123 * CMAC doesn't output until encrypt_final
124 */
125 if ((ctx->cbc_flags & CMAC_MODE) == 0) {
126 crypto_get_ptrs(out, &iov_or_mp, &offset,
127 &out_data_1, &out_data_1_len,
128 &out_data_2, block_size);
129
130 /* copy block to where it belongs */
131 if (out_data_1_len == block_size) {
132 copy_block(lastp, out_data_1);
133 } else {
134 bcopy(lastp, out_data_1,
135 out_data_1_len);
136 if (out_data_2 != NULL) {
137 bcopy(lastp + out_data_1_len,
138 out_data_2,
139 block_size -
140 out_data_1_len);
141 }
142 }
143 /* update offset */
144 out->cd_offset += block_size;
145 }
146 }
147
148 /* Update pointer to next block of data to be processed. */
149 if (ctx->cbc_remainder_len != 0) {
150 datap += need;
151 ctx->cbc_remainder_len = 0;
152 } else {
153 datap += block_size;
154 }
155
156 remainder = (size_t)&data[length] - (size_t)datap;
157
158 /* Incomplete last block. */
159 if (remainder > 0 && remainder < ctx->max_remain) {
160 bcopy(datap, ctx->cbc_remainder, remainder);
161 ctx->cbc_remainder_len = remainder;
162 ctx->cbc_copy_to = datap;
163 goto out;
164 }
165 ctx->cbc_copy_to = NULL;
166
167 } while (remainder > 0);
168
169 out:
170 /*
171 * Save the last encrypted block in the context.
172 */
173 if (ctx->cbc_lastp != NULL) {
174 copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
175 ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
176 }
177
178 return (CRYPTO_SUCCESS);
179 }
180
181 #define OTHER(a, ctx) \
182 (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
183
184 /* ARGSUSED */
185 int
cbc_decrypt_contiguous_blocks(cbc_ctx_t * ctx,char * data,size_t length,crypto_data_t * out,size_t block_size,int (* decrypt)(const void *,const uint8_t *,uint8_t *),void (* copy_block)(uint8_t *,uint8_t *),void (* xor_block)(uint8_t *,uint8_t *))186 cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
187 crypto_data_t *out, size_t block_size,
188 int (*decrypt)(const void *, const uint8_t *, uint8_t *),
189 void (*copy_block)(uint8_t *, uint8_t *),
190 void (*xor_block)(uint8_t *, uint8_t *))
191 {
192 size_t remainder = length;
193 size_t need;
194 uint8_t *datap = (uint8_t *)data;
195 uint8_t *blockp;
196 uint8_t *lastp;
197 void *iov_or_mp;
198 offset_t offset;
199 uint8_t *out_data_1;
200 uint8_t *out_data_2;
201 size_t out_data_1_len;
202
203 if (length + ctx->cbc_remainder_len < block_size) {
204 /* accumulate bytes here and return */
205 bcopy(datap,
206 (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
207 length);
208 ctx->cbc_remainder_len += length;
209 ctx->cbc_copy_to = datap;
210 return (CRYPTO_SUCCESS);
211 }
212
213 lastp = ctx->cbc_lastp;
214 if (out != NULL)
215 crypto_init_ptrs(out, &iov_or_mp, &offset);
216
217 do {
218 /* Unprocessed data from last call. */
219 if (ctx->cbc_remainder_len > 0) {
220 need = block_size - ctx->cbc_remainder_len;
221
222 if (need > remainder)
223 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
224
225 bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
226 [ctx->cbc_remainder_len], need);
227
228 blockp = (uint8_t *)ctx->cbc_remainder;
229 } else {
230 blockp = datap;
231 }
232
233 /* LINTED: pointer alignment */
234 copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
235
236 if (out != NULL) {
237 decrypt(ctx->cbc_keysched, blockp,
238 (uint8_t *)ctx->cbc_remainder);
239 blockp = (uint8_t *)ctx->cbc_remainder;
240 } else {
241 decrypt(ctx->cbc_keysched, blockp, blockp);
242 }
243
244 /*
245 * XOR the previous cipher block or IV with the
246 * currently decrypted block.
247 */
248 xor_block(lastp, blockp);
249
250 /* LINTED: pointer alignment */
251 lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
252
253 if (out != NULL) {
254 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
255 &out_data_1_len, &out_data_2, block_size);
256
257 bcopy(blockp, out_data_1, out_data_1_len);
258 if (out_data_2 != NULL) {
259 bcopy(blockp + out_data_1_len, out_data_2,
260 block_size - out_data_1_len);
261 }
262
263 /* update offset */
264 out->cd_offset += block_size;
265
266 } else if (ctx->cbc_remainder_len > 0) {
267 /* copy temporary block to where it belongs */
268 bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
269 bcopy(blockp + ctx->cbc_remainder_len, datap, need);
270 }
271
272 /* Update pointer to next block of data to be processed. */
273 if (ctx->cbc_remainder_len != 0) {
274 datap += need;
275 ctx->cbc_remainder_len = 0;
276 } else {
277 datap += block_size;
278 }
279
280 remainder = (size_t)&data[length] - (size_t)datap;
281
282 /* Incomplete last block. */
283 if (remainder > 0 && remainder < block_size) {
284 bcopy(datap, ctx->cbc_remainder, remainder);
285 ctx->cbc_remainder_len = remainder;
286 ctx->cbc_lastp = lastp;
287 ctx->cbc_copy_to = datap;
288 return (CRYPTO_SUCCESS);
289 }
290 ctx->cbc_copy_to = NULL;
291
292 } while (remainder > 0);
293
294 ctx->cbc_lastp = lastp;
295 return (CRYPTO_SUCCESS);
296 }
297
298 int
cbc_init_ctx(cbc_ctx_t * cbc_ctx,char * param,size_t param_len,size_t block_size,void (* copy_block)(uint8_t *,uint64_t *))299 cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
300 size_t block_size, void (*copy_block)(uint8_t *, uint64_t *))
301 {
302 /*
303 * Copy IV into context.
304 *
305 * If cm_param == NULL then the IV comes from the
306 * cd_miscdata field in the crypto_data structure.
307 */
308 if (param != NULL) {
309 #ifdef _KERNEL
310 ASSERT(param_len == block_size);
311 #else
312 assert(param_len == block_size);
313 #endif
314 copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
315 }
316
317 cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
318 cbc_ctx->cbc_flags |= CBC_MODE;
319 cbc_ctx->max_remain = block_size;
320 return (CRYPTO_SUCCESS);
321 }
322
323 /* ARGSUSED */
324 static void *
cbc_cmac_alloc_ctx(int kmflag,uint32_t mode)325 cbc_cmac_alloc_ctx(int kmflag, uint32_t mode)
326 {
327 cbc_ctx_t *cbc_ctx;
328 uint32_t modeval = mode & (CBC_MODE|CMAC_MODE);
329
330 /* Only one of the two modes can be set */
331 VERIFY(modeval == CBC_MODE || modeval == CMAC_MODE);
332
333 #ifdef _KERNEL
334 if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL)
335 #else
336 if ((cbc_ctx = calloc(1, sizeof (cbc_ctx_t))) == NULL)
337 #endif
338 return (NULL);
339
340 cbc_ctx->cbc_flags = mode;
341 return (cbc_ctx);
342 }
343
344 void *
cbc_alloc_ctx(int kmflag)345 cbc_alloc_ctx(int kmflag)
346 {
347 return (cbc_cmac_alloc_ctx(kmflag, CBC_MODE));
348 }
349
350 /*
351 * Algorithms for supporting AES-CMAC
352 * NOTE: CMAC is generally just a wrapper for CBC
353 */
354
355 void *
cmac_alloc_ctx(int kmflag)356 cmac_alloc_ctx(int kmflag)
357 {
358 return (cbc_cmac_alloc_ctx(kmflag, CMAC_MODE));
359 }
360
361
362 /*
363 * Typically max_remain is set to block_size - 1, since we usually
364 * will process the data once we have a full block. However with CMAC,
365 * we must preprocess the final block of data. Since we cannot know
366 * when we've received the final block of data until the _final() method
367 * is called, we must not process the last block of data until we know
368 * it is the last block, or we receive a new block of data. As such,
369 * max_remain for CMAC is block_size + 1.
370 */
371 int
cmac_init_ctx(cbc_ctx_t * cbc_ctx,size_t block_size)372 cmac_init_ctx(cbc_ctx_t *cbc_ctx, size_t block_size)
373 {
374 /*
375 * CMAC is only approved for block sizes 64 and 128 bits /
376 * 8 and 16 bytes.
377 */
378
379 if (block_size != 16 && block_size != 8)
380 return (CRYPTO_INVALID_CONTEXT);
381
382 /*
383 * For CMAC, cbc_iv is always 0.
384 */
385
386 cbc_ctx->cbc_iv[0] = 0;
387 cbc_ctx->cbc_iv[1] = 0;
388
389 cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
390 cbc_ctx->cbc_flags |= CMAC_MODE;
391
392 cbc_ctx->max_remain = block_size + 1;
393 return (CRYPTO_SUCCESS);
394 }
395
396 /*
397 * Left shifts blocks by one and returns the leftmost bit
398 */
399 static uint8_t
cmac_left_shift_block_by1(uint8_t * block,size_t block_size)400 cmac_left_shift_block_by1(uint8_t *block, size_t block_size)
401 {
402 uint8_t carry = 0, old;
403 size_t i;
404 for (i = block_size; i > 0; i--) {
405 old = carry;
406 carry = (block[i - 1] & 0x80) ? 1 : 0;
407 block[i - 1] = (block[i - 1] << 1) | old;
408 }
409 return (carry);
410 }
411
412 /*
413 * Generate subkeys to preprocess the last block according to RFC 4493.
414 * Store the final block_size MAC generated in 'out'.
415 */
416 int
cmac_mode_final(cbc_ctx_t * cbc_ctx,crypto_data_t * out,int (* encrypt_block)(const void *,const uint8_t *,uint8_t *),void (* xor_block)(uint8_t *,uint8_t *))417 cmac_mode_final(cbc_ctx_t *cbc_ctx, crypto_data_t *out,
418 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
419 void (*xor_block)(uint8_t *, uint8_t *))
420 {
421 uint8_t buf[AES_BLOCK_LEN] = {0};
422 uint8_t *M_last = (uint8_t *)cbc_ctx->cbc_remainder;
423 size_t length = cbc_ctx->cbc_remainder_len;
424 size_t block_size = cbc_ctx->max_remain - 1;
425 uint8_t const_rb;
426
427 if (length > block_size)
428 return (CRYPTO_INVALID_CONTEXT);
429
430 if (out->cd_length < block_size)
431 return (CRYPTO_DATA_LEN_RANGE);
432
433 if (block_size == 16)
434 const_rb = CONST_RB_128;
435 else if (block_size == 8)
436 const_rb = CONST_RB_64;
437 else
438 return (CRYPTO_INVALID_CONTEXT);
439
440 /* k_0 = E_k(0) */
441 encrypt_block(cbc_ctx->cbc_keysched, buf, buf);
442
443 if (cmac_left_shift_block_by1(buf, block_size))
444 buf[block_size - 1] ^= const_rb;
445
446 if (length == block_size) {
447 /* Last block complete, so m_n = k_1 + m_n' */
448 xor_block(buf, M_last);
449 xor_block(cbc_ctx->cbc_lastp, M_last);
450 encrypt_block(cbc_ctx->cbc_keysched, M_last, M_last);
451 } else {
452 /* Last block incomplete, so m_n = k_2 + (m_n' | 100...0_bin) */
453 if (cmac_left_shift_block_by1(buf, block_size))
454 buf[block_size - 1] ^= const_rb;
455
456 M_last[length] = 0x80;
457 bzero(M_last + length + 1, block_size - length - 1);
458 xor_block(buf, M_last);
459 xor_block(cbc_ctx->cbc_lastp, M_last);
460 encrypt_block(cbc_ctx->cbc_keysched, M_last, M_last);
461 }
462
463 /*
464 * zero out the sub-key.
465 */
466 #ifndef _KERNEL
467 explicit_bzero(&buf, sizeof (buf));
468 #else
469 bzero(&buf, sizeof (buf));
470 #endif
471 return (crypto_put_output_data(M_last, out, block_size));
472 }
473