xref: /linux/net/ceph/crypto.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/ceph/ceph_debug.h>
4 
5 #include <linux/err.h>
6 #include <linux/scatterlist.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <crypto/aes.h>
10 #include <crypto/krb5.h>
11 #include <crypto/skcipher.h>
12 #include <linux/key-type.h>
13 #include <linux/sched/mm.h>
14 
15 #include <keys/ceph-type.h>
16 #include <keys/user-type.h>
17 #include <linux/ceph/decode.h>
18 #include "crypto.h"
19 
20 static int set_aes_tfm(struct ceph_crypto_key *key)
21 {
22 	unsigned int noio_flag;
23 	int ret;
24 
25 	noio_flag = memalloc_noio_save();
26 	key->aes_tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
27 	memalloc_noio_restore(noio_flag);
28 	if (IS_ERR(key->aes_tfm)) {
29 		ret = PTR_ERR(key->aes_tfm);
30 		key->aes_tfm = NULL;
31 		return ret;
32 	}
33 
34 	ret = crypto_sync_skcipher_setkey(key->aes_tfm, key->key, key->len);
35 	if (ret)
36 		return ret;
37 
38 	return 0;
39 }
40 
41 static int set_krb5_tfms(struct ceph_crypto_key *key, const u32 *key_usages,
42 			 int key_usage_cnt)
43 {
44 	struct krb5_buffer TK = { .len = key->len, .data = key->key };
45 	unsigned int noio_flag;
46 	int ret = 0;
47 	int i;
48 
49 	if (WARN_ON_ONCE(key_usage_cnt > ARRAY_SIZE(key->krb5_tfms)))
50 		return -EINVAL;
51 
52 	key->krb5_type = crypto_krb5_find_enctype(
53 			     KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192);
54 	if (!key->krb5_type)
55 		return -ENOPKG;
56 
57 	/*
58 	 * Despite crypto_krb5_prepare_encryption() taking a gfp mask,
59 	 * crypto_alloc_aead() inside of it allocates with GFP_KERNEL.
60 	 */
61 	noio_flag = memalloc_noio_save();
62 	for (i = 0; i < key_usage_cnt; i++) {
63 		key->krb5_tfms[i] = crypto_krb5_prepare_encryption(
64 					key->krb5_type, &TK, key_usages[i],
65 					GFP_NOIO);
66 		if (IS_ERR(key->krb5_tfms[i])) {
67 			ret = PTR_ERR(key->krb5_tfms[i]);
68 			key->krb5_tfms[i] = NULL;
69 			goto out_flag;
70 		}
71 	}
72 
73 out_flag:
74 	memalloc_noio_restore(noio_flag);
75 	return ret;
76 }
77 
78 int ceph_crypto_key_prepare(struct ceph_crypto_key *key,
79 			    const u32 *key_usages, int key_usage_cnt)
80 {
81 	switch (key->type) {
82 	case CEPH_CRYPTO_NONE:
83 		return 0; /* nothing to do */
84 	case CEPH_CRYPTO_AES:
85 		return set_aes_tfm(key);
86 	case CEPH_CRYPTO_AES256KRB5:
87 		hmac_sha256_preparekey(&key->hmac_key, key->key, key->len);
88 		return set_krb5_tfms(key, key_usages, key_usage_cnt);
89 	default:
90 		return -ENOTSUPP;
91 	}
92 }
93 
94 /*
95  * @dst should be zeroed before this function is called.
96  */
97 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
98 			  const struct ceph_crypto_key *src)
99 {
100 	dst->type = src->type;
101 	dst->created = src->created;
102 	dst->len = src->len;
103 
104 	dst->key = kmemdup(src->key, src->len, GFP_NOIO);
105 	if (!dst->key)
106 		return -ENOMEM;
107 
108 	return 0;
109 }
110 
111 /*
112  * @key should be zeroed before this function is called.
113  */
114 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
115 {
116 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
117 	key->type = ceph_decode_16(p);
118 	ceph_decode_copy(p, &key->created, sizeof(key->created));
119 	key->len = ceph_decode_16(p);
120 	ceph_decode_need(p, end, key->len, bad);
121 	if (key->len > CEPH_MAX_KEY_LEN) {
122 		pr_err("secret too big %d\n", key->len);
123 		return -EINVAL;
124 	}
125 
126 	key->key = kmemdup(*p, key->len, GFP_NOIO);
127 	if (!key->key)
128 		return -ENOMEM;
129 
130 	memzero_explicit(*p, key->len);
131 	*p += key->len;
132 	return 0;
133 
134 bad:
135 	dout("failed to decode crypto key\n");
136 	return -EINVAL;
137 }
138 
139 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
140 {
141 	int inlen = strlen(inkey);
142 	int blen = inlen * 3 / 4;
143 	void *buf, *p;
144 	int ret;
145 
146 	dout("crypto_key_unarmor %s\n", inkey);
147 	buf = kmalloc(blen, GFP_NOFS);
148 	if (!buf)
149 		return -ENOMEM;
150 	blen = ceph_unarmor(buf, inkey, inkey+inlen);
151 	if (blen < 0) {
152 		kfree(buf);
153 		return blen;
154 	}
155 
156 	p = buf;
157 	ret = ceph_crypto_key_decode(key, &p, p + blen);
158 	kfree(buf);
159 	if (ret)
160 		return ret;
161 	dout("crypto_key_unarmor key %p type %d len %d\n", key,
162 	     key->type, key->len);
163 	return 0;
164 }
165 
166 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
167 {
168 	int i;
169 
170 	if (!key)
171 		return;
172 
173 	kfree_sensitive(key->key);
174 	key->key = NULL;
175 
176 	if (key->type == CEPH_CRYPTO_AES) {
177 		if (key->aes_tfm) {
178 			crypto_free_sync_skcipher(key->aes_tfm);
179 			key->aes_tfm = NULL;
180 		}
181 	} else if (key->type == CEPH_CRYPTO_AES256KRB5) {
182 		memzero_explicit(&key->hmac_key, sizeof(key->hmac_key));
183 		for (i = 0; i < ARRAY_SIZE(key->krb5_tfms); i++) {
184 			if (key->krb5_tfms[i]) {
185 				crypto_free_aead(key->krb5_tfms[i]);
186 				key->krb5_tfms[i] = NULL;
187 			}
188 		}
189 	}
190 }
191 
192 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
193 
194 /*
195  * Should be used for buffers allocated with kvmalloc().
196  * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
197  * in-buffer (msg front).
198  *
199  * Dispose of @sgt with teardown_sgtable().
200  *
201  * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
202  * in cases where a single sg is sufficient.  No attempt to reduce the
203  * number of sgs by squeezing physically contiguous pages together is
204  * made though, for simplicity.
205  */
206 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
207 			 const void *buf, unsigned int buf_len)
208 {
209 	struct scatterlist *sg;
210 	const bool is_vmalloc = is_vmalloc_addr(buf);
211 	unsigned int off = offset_in_page(buf);
212 	unsigned int chunk_cnt = 1;
213 	unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
214 	int i;
215 	int ret;
216 
217 	if (buf_len == 0) {
218 		memset(sgt, 0, sizeof(*sgt));
219 		return -EINVAL;
220 	}
221 
222 	if (is_vmalloc) {
223 		chunk_cnt = chunk_len >> PAGE_SHIFT;
224 		chunk_len = PAGE_SIZE;
225 	}
226 
227 	if (chunk_cnt > 1) {
228 		ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
229 		if (ret)
230 			return ret;
231 	} else {
232 		WARN_ON(chunk_cnt != 1);
233 		sg_init_table(prealloc_sg, 1);
234 		sgt->sgl = prealloc_sg;
235 		sgt->nents = sgt->orig_nents = 1;
236 	}
237 
238 	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
239 		struct page *page;
240 		unsigned int len = min(chunk_len - off, buf_len);
241 
242 		if (is_vmalloc)
243 			page = vmalloc_to_page(buf);
244 		else
245 			page = virt_to_page(buf);
246 
247 		sg_set_page(sg, page, len, off);
248 
249 		off = 0;
250 		buf += len;
251 		buf_len -= len;
252 	}
253 	WARN_ON(buf_len != 0);
254 
255 	return 0;
256 }
257 
258 static void teardown_sgtable(struct sg_table *sgt)
259 {
260 	if (sgt->orig_nents > 1)
261 		sg_free_table(sgt);
262 }
263 
264 static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
265 			  void *buf, int buf_len, int in_len, int *pout_len)
266 {
267 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->aes_tfm);
268 	struct sg_table sgt;
269 	struct scatterlist prealloc_sg;
270 	char iv[AES_BLOCK_SIZE] __aligned(8);
271 	int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
272 	int crypt_len = encrypt ? in_len + pad_byte : in_len;
273 	int ret;
274 
275 	WARN_ON(crypt_len > buf_len);
276 	if (encrypt)
277 		memset(buf + in_len, pad_byte, pad_byte);
278 	ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
279 	if (ret)
280 		return ret;
281 
282 	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
283 	skcipher_request_set_sync_tfm(req, key->aes_tfm);
284 	skcipher_request_set_callback(req, 0, NULL, NULL);
285 	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
286 
287 	/*
288 	print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
289 		       key->key, key->len, 1);
290 	print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
291 		       buf, crypt_len, 1);
292 	*/
293 	if (encrypt)
294 		ret = crypto_skcipher_encrypt(req);
295 	else
296 		ret = crypto_skcipher_decrypt(req);
297 	skcipher_request_zero(req);
298 	if (ret) {
299 		pr_err("%s %scrypt failed: %d\n", __func__,
300 		       encrypt ? "en" : "de", ret);
301 		goto out_sgt;
302 	}
303 	/*
304 	print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
305 		       buf, crypt_len, 1);
306 	*/
307 
308 	if (encrypt) {
309 		*pout_len = crypt_len;
310 	} else {
311 		pad_byte = *(char *)(buf + in_len - 1);
312 		if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
313 		    in_len >= pad_byte) {
314 			*pout_len = in_len - pad_byte;
315 		} else {
316 			pr_err("%s got bad padding %d on in_len %d\n",
317 			       __func__, pad_byte, in_len);
318 			ret = -EPERM;
319 			goto out_sgt;
320 		}
321 	}
322 
323 out_sgt:
324 	teardown_sgtable(&sgt);
325 	return ret;
326 }
327 
328 static int ceph_krb5_encrypt(const struct ceph_crypto_key *key, int usage_slot,
329 			     void *buf, int buf_len, int in_len, int *pout_len)
330 {
331 	struct sg_table sgt;
332 	struct scatterlist prealloc_sg;
333 	int ret;
334 
335 	if (WARN_ON_ONCE(usage_slot >= ARRAY_SIZE(key->krb5_tfms)))
336 		return -EINVAL;
337 
338 	ret = setup_sgtable(&sgt, &prealloc_sg, buf, buf_len);
339 	if (ret)
340 		return ret;
341 
342 	ret = crypto_krb5_encrypt(key->krb5_type, key->krb5_tfms[usage_slot],
343 				  sgt.sgl, sgt.nents, buf_len, AES_BLOCK_SIZE,
344 				  in_len, false);
345 	if (ret < 0) {
346 		pr_err("%s encrypt failed: %d\n", __func__, ret);
347 		goto out_sgt;
348 	}
349 
350 	*pout_len = ret;
351 	ret = 0;
352 
353 out_sgt:
354 	teardown_sgtable(&sgt);
355 	return ret;
356 }
357 
358 static int ceph_krb5_decrypt(const struct ceph_crypto_key *key, int usage_slot,
359 			     void *buf, int buf_len, int in_len, int *pout_len)
360 {
361 	struct sg_table sgt;
362 	struct scatterlist prealloc_sg;
363 	size_t data_off = 0;
364 	size_t data_len = in_len;
365 	int ret;
366 
367 	if (WARN_ON_ONCE(usage_slot >= ARRAY_SIZE(key->krb5_tfms)))
368 		return -EINVAL;
369 
370 	ret = setup_sgtable(&sgt, &prealloc_sg, buf, in_len);
371 	if (ret)
372 		return ret;
373 
374 	ret = crypto_krb5_decrypt(key->krb5_type, key->krb5_tfms[usage_slot],
375 				  sgt.sgl, sgt.nents, &data_off, &data_len);
376 	if (ret) {
377 		pr_err("%s decrypt failed: %d\n", __func__, ret);
378 		goto out_sgt;
379 	}
380 
381 	WARN_ON(data_off != AES_BLOCK_SIZE);
382 	*pout_len = data_len;
383 
384 out_sgt:
385 	teardown_sgtable(&sgt);
386 	return ret;
387 }
388 
389 int ceph_crypt(const struct ceph_crypto_key *key, int usage_slot, bool encrypt,
390 	       void *buf, int buf_len, int in_len, int *pout_len)
391 {
392 	switch (key->type) {
393 	case CEPH_CRYPTO_NONE:
394 		*pout_len = in_len;
395 		return 0;
396 	case CEPH_CRYPTO_AES:
397 		return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
398 				      pout_len);
399 	case CEPH_CRYPTO_AES256KRB5:
400 		return encrypt ?
401 		    ceph_krb5_encrypt(key, usage_slot, buf, buf_len, in_len,
402 				      pout_len) :
403 		    ceph_krb5_decrypt(key, usage_slot, buf, buf_len, in_len,
404 				      pout_len);
405 	default:
406 		return -ENOTSUPP;
407 	}
408 }
409 
410 int ceph_crypt_data_offset(const struct ceph_crypto_key *key)
411 {
412 	switch (key->type) {
413 	case CEPH_CRYPTO_NONE:
414 	case CEPH_CRYPTO_AES:
415 		return 0;
416 	case CEPH_CRYPTO_AES256KRB5:
417 		/* confounder */
418 		return AES_BLOCK_SIZE;
419 	default:
420 		BUG();
421 	}
422 }
423 
424 int ceph_crypt_buflen(const struct ceph_crypto_key *key, int data_len)
425 {
426 	switch (key->type) {
427 	case CEPH_CRYPTO_NONE:
428 		return data_len;
429 	case CEPH_CRYPTO_AES:
430 		/* PKCS#7 padding at the end */
431 		return data_len + AES_BLOCK_SIZE -
432 		       (data_len & (AES_BLOCK_SIZE - 1));
433 	case CEPH_CRYPTO_AES256KRB5:
434 		/* confounder at the beginning and 192-bit HMAC at the end */
435 		return AES_BLOCK_SIZE + data_len + 24;
436 	default:
437 		BUG();
438 	}
439 }
440 
441 void ceph_hmac_sha256(const struct ceph_crypto_key *key, const void *buf,
442 		      int buf_len, u8 hmac[SHA256_DIGEST_SIZE])
443 {
444 	switch (key->type) {
445 	case CEPH_CRYPTO_NONE:
446 	case CEPH_CRYPTO_AES:
447 		memset(hmac, 0, SHA256_DIGEST_SIZE);
448 		return;
449 	case CEPH_CRYPTO_AES256KRB5:
450 		hmac_sha256(&key->hmac_key, buf, buf_len, hmac);
451 		return;
452 	default:
453 		BUG();
454 	}
455 }
456 
457 static int ceph_key_preparse(struct key_preparsed_payload *prep)
458 {
459 	struct ceph_crypto_key *ckey;
460 	size_t datalen = prep->datalen;
461 	int ret;
462 	void *p;
463 
464 	ret = -EINVAL;
465 	if (datalen <= 0 || datalen > 32767 || !prep->data)
466 		goto err;
467 
468 	ret = -ENOMEM;
469 	ckey = kzalloc(sizeof(*ckey), GFP_KERNEL);
470 	if (!ckey)
471 		goto err;
472 
473 	/* TODO ceph_crypto_key_decode should really take const input */
474 	p = (void *)prep->data;
475 	ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
476 	if (ret < 0)
477 		goto err_ckey;
478 
479 	prep->payload.data[0] = ckey;
480 	prep->quotalen = datalen;
481 	return 0;
482 
483 err_ckey:
484 	kfree(ckey);
485 err:
486 	return ret;
487 }
488 
489 static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
490 {
491 	struct ceph_crypto_key *ckey = prep->payload.data[0];
492 	ceph_crypto_key_destroy(ckey);
493 	kfree(ckey);
494 }
495 
496 static void ceph_key_destroy(struct key *key)
497 {
498 	struct ceph_crypto_key *ckey = key->payload.data[0];
499 
500 	ceph_crypto_key_destroy(ckey);
501 	kfree(ckey);
502 }
503 
504 struct key_type key_type_ceph = {
505 	.name		= "ceph",
506 	.preparse	= ceph_key_preparse,
507 	.free_preparse	= ceph_key_free_preparse,
508 	.instantiate	= generic_key_instantiate,
509 	.destroy	= ceph_key_destroy,
510 };
511 
512 int __init ceph_crypto_init(void)
513 {
514 	return register_key_type(&key_type_ceph);
515 }
516 
517 void ceph_crypto_shutdown(void)
518 {
519 	unregister_key_type(&key_type_ceph);
520 }
521