xref: /linux/crypto/algif_aead.c (revision 372e2db7210df7c45ead46429aeb1443ba148060)
1 /*
2  * algif_aead: User-space interface for AEAD algorithms
3  *
4  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5  *
6  * This file provides the user-space API for AEAD ciphers.
7  *
8  * This file is derived from algif_skcipher.c.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  */
15 
16 #include <crypto/internal/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/net.h>
25 #include <net/sock.h>
26 
27 struct aead_sg_list {
28 	unsigned int cur;
29 	struct scatterlist sg[ALG_MAX_PAGES];
30 };
31 
32 struct aead_async_rsgl {
33 	struct af_alg_sgl sgl;
34 	struct list_head list;
35 };
36 
37 struct aead_async_req {
38 	struct scatterlist *tsgl;
39 	struct aead_async_rsgl first_rsgl;
40 	struct list_head list;
41 	struct kiocb *iocb;
42 	unsigned int tsgls;
43 	char iv[];
44 };
45 
46 struct aead_ctx {
47 	struct aead_sg_list tsgl;
48 	struct aead_async_rsgl first_rsgl;
49 	struct list_head list;
50 
51 	void *iv;
52 
53 	struct af_alg_completion completion;
54 
55 	unsigned long used;
56 
57 	unsigned int len;
58 	bool more;
59 	bool merge;
60 	bool enc;
61 
62 	size_t aead_assoclen;
63 	struct aead_request aead_req;
64 };
65 
66 static inline int aead_sndbuf(struct sock *sk)
67 {
68 	struct alg_sock *ask = alg_sk(sk);
69 	struct aead_ctx *ctx = ask->private;
70 
71 	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
72 			  ctx->used, 0);
73 }
74 
75 static inline bool aead_writable(struct sock *sk)
76 {
77 	return PAGE_SIZE <= aead_sndbuf(sk);
78 }
79 
80 static inline bool aead_sufficient_data(struct aead_ctx *ctx)
81 {
82 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
83 
84 	/*
85 	 * The minimum amount of memory needed for an AEAD cipher is
86 	 * the AAD and in case of decryption the tag.
87 	 */
88 	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
89 }
90 
91 static void aead_reset_ctx(struct aead_ctx *ctx)
92 {
93 	struct aead_sg_list *sgl = &ctx->tsgl;
94 
95 	sg_init_table(sgl->sg, ALG_MAX_PAGES);
96 	sgl->cur = 0;
97 	ctx->used = 0;
98 	ctx->more = 0;
99 	ctx->merge = 0;
100 }
101 
102 static void aead_put_sgl(struct sock *sk)
103 {
104 	struct alg_sock *ask = alg_sk(sk);
105 	struct aead_ctx *ctx = ask->private;
106 	struct aead_sg_list *sgl = &ctx->tsgl;
107 	struct scatterlist *sg = sgl->sg;
108 	unsigned int i;
109 
110 	for (i = 0; i < sgl->cur; i++) {
111 		if (!sg_page(sg + i))
112 			continue;
113 
114 		put_page(sg_page(sg + i));
115 		sg_assign_page(sg + i, NULL);
116 	}
117 	aead_reset_ctx(ctx);
118 }
119 
120 static void aead_wmem_wakeup(struct sock *sk)
121 {
122 	struct socket_wq *wq;
123 
124 	if (!aead_writable(sk))
125 		return;
126 
127 	rcu_read_lock();
128 	wq = rcu_dereference(sk->sk_wq);
129 	if (skwq_has_sleeper(wq))
130 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
131 							   POLLRDNORM |
132 							   POLLRDBAND);
133 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
134 	rcu_read_unlock();
135 }
136 
137 static int aead_wait_for_data(struct sock *sk, unsigned flags)
138 {
139 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
140 	struct alg_sock *ask = alg_sk(sk);
141 	struct aead_ctx *ctx = ask->private;
142 	long timeout;
143 	int err = -ERESTARTSYS;
144 
145 	if (flags & MSG_DONTWAIT)
146 		return -EAGAIN;
147 
148 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
149 	add_wait_queue(sk_sleep(sk), &wait);
150 	for (;;) {
151 		if (signal_pending(current))
152 			break;
153 		timeout = MAX_SCHEDULE_TIMEOUT;
154 		if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
155 			err = 0;
156 			break;
157 		}
158 	}
159 	remove_wait_queue(sk_sleep(sk), &wait);
160 
161 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
162 
163 	return err;
164 }
165 
166 static void aead_data_wakeup(struct sock *sk)
167 {
168 	struct alg_sock *ask = alg_sk(sk);
169 	struct aead_ctx *ctx = ask->private;
170 	struct socket_wq *wq;
171 
172 	if (ctx->more)
173 		return;
174 	if (!ctx->used)
175 		return;
176 
177 	rcu_read_lock();
178 	wq = rcu_dereference(sk->sk_wq);
179 	if (skwq_has_sleeper(wq))
180 		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
181 							   POLLRDNORM |
182 							   POLLRDBAND);
183 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
184 	rcu_read_unlock();
185 }
186 
187 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
188 {
189 	struct sock *sk = sock->sk;
190 	struct alg_sock *ask = alg_sk(sk);
191 	struct aead_ctx *ctx = ask->private;
192 	unsigned ivsize =
193 		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
194 	struct aead_sg_list *sgl = &ctx->tsgl;
195 	struct af_alg_control con = {};
196 	long copied = 0;
197 	bool enc = 0;
198 	bool init = 0;
199 	int err = -EINVAL;
200 
201 	if (msg->msg_controllen) {
202 		err = af_alg_cmsg_send(msg, &con);
203 		if (err)
204 			return err;
205 
206 		init = 1;
207 		switch (con.op) {
208 		case ALG_OP_ENCRYPT:
209 			enc = 1;
210 			break;
211 		case ALG_OP_DECRYPT:
212 			enc = 0;
213 			break;
214 		default:
215 			return -EINVAL;
216 		}
217 
218 		if (con.iv && con.iv->ivlen != ivsize)
219 			return -EINVAL;
220 	}
221 
222 	lock_sock(sk);
223 	if (!ctx->more && ctx->used)
224 		goto unlock;
225 
226 	if (init) {
227 		ctx->enc = enc;
228 		if (con.iv)
229 			memcpy(ctx->iv, con.iv->iv, ivsize);
230 
231 		ctx->aead_assoclen = con.aead_assoclen;
232 	}
233 
234 	while (size) {
235 		size_t len = size;
236 		struct scatterlist *sg = NULL;
237 
238 		/* use the existing memory in an allocated page */
239 		if (ctx->merge) {
240 			sg = sgl->sg + sgl->cur - 1;
241 			len = min_t(unsigned long, len,
242 				    PAGE_SIZE - sg->offset - sg->length);
243 			err = memcpy_from_msg(page_address(sg_page(sg)) +
244 					      sg->offset + sg->length,
245 					      msg, len);
246 			if (err)
247 				goto unlock;
248 
249 			sg->length += len;
250 			ctx->merge = (sg->offset + sg->length) &
251 				     (PAGE_SIZE - 1);
252 
253 			ctx->used += len;
254 			copied += len;
255 			size -= len;
256 			continue;
257 		}
258 
259 		if (!aead_writable(sk)) {
260 			/* user space sent too much data */
261 			aead_put_sgl(sk);
262 			err = -EMSGSIZE;
263 			goto unlock;
264 		}
265 
266 		/* allocate a new page */
267 		len = min_t(unsigned long, size, aead_sndbuf(sk));
268 		while (len) {
269 			size_t plen = 0;
270 
271 			if (sgl->cur >= ALG_MAX_PAGES) {
272 				aead_put_sgl(sk);
273 				err = -E2BIG;
274 				goto unlock;
275 			}
276 
277 			sg = sgl->sg + sgl->cur;
278 			plen = min_t(size_t, len, PAGE_SIZE);
279 
280 			sg_assign_page(sg, alloc_page(GFP_KERNEL));
281 			err = -ENOMEM;
282 			if (!sg_page(sg))
283 				goto unlock;
284 
285 			err = memcpy_from_msg(page_address(sg_page(sg)),
286 					      msg, plen);
287 			if (err) {
288 				__free_page(sg_page(sg));
289 				sg_assign_page(sg, NULL);
290 				goto unlock;
291 			}
292 
293 			sg->offset = 0;
294 			sg->length = plen;
295 			len -= plen;
296 			ctx->used += plen;
297 			copied += plen;
298 			sgl->cur++;
299 			size -= plen;
300 			ctx->merge = plen & (PAGE_SIZE - 1);
301 		}
302 	}
303 
304 	err = 0;
305 
306 	ctx->more = msg->msg_flags & MSG_MORE;
307 	if (!ctx->more && !aead_sufficient_data(ctx)) {
308 		aead_put_sgl(sk);
309 		err = -EMSGSIZE;
310 	}
311 
312 unlock:
313 	aead_data_wakeup(sk);
314 	release_sock(sk);
315 
316 	return err ?: copied;
317 }
318 
319 static ssize_t aead_sendpage(struct socket *sock, struct page *page,
320 			     int offset, size_t size, int flags)
321 {
322 	struct sock *sk = sock->sk;
323 	struct alg_sock *ask = alg_sk(sk);
324 	struct aead_ctx *ctx = ask->private;
325 	struct aead_sg_list *sgl = &ctx->tsgl;
326 	int err = -EINVAL;
327 
328 	if (flags & MSG_SENDPAGE_NOTLAST)
329 		flags |= MSG_MORE;
330 
331 	if (sgl->cur >= ALG_MAX_PAGES)
332 		return -E2BIG;
333 
334 	lock_sock(sk);
335 	if (!ctx->more && ctx->used)
336 		goto unlock;
337 
338 	if (!size)
339 		goto done;
340 
341 	if (!aead_writable(sk)) {
342 		/* user space sent too much data */
343 		aead_put_sgl(sk);
344 		err = -EMSGSIZE;
345 		goto unlock;
346 	}
347 
348 	ctx->merge = 0;
349 
350 	get_page(page);
351 	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
352 	sgl->cur++;
353 	ctx->used += size;
354 
355 	err = 0;
356 
357 done:
358 	ctx->more = flags & MSG_MORE;
359 	if (!ctx->more && !aead_sufficient_data(ctx)) {
360 		aead_put_sgl(sk);
361 		err = -EMSGSIZE;
362 	}
363 
364 unlock:
365 	aead_data_wakeup(sk);
366 	release_sock(sk);
367 
368 	return err ?: size;
369 }
370 
371 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
372 		((char *)req + sizeof(struct aead_request) + \
373 		 crypto_aead_reqsize(tfm))
374 
375  #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
376 	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
377 	sizeof(struct aead_request)
378 
379 static void aead_async_cb(struct crypto_async_request *_req, int err)
380 {
381 	struct sock *sk = _req->data;
382 	struct alg_sock *ask = alg_sk(sk);
383 	struct aead_ctx *ctx = ask->private;
384 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
385 	struct aead_request *req = aead_request_cast(_req);
386 	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
387 	struct scatterlist *sg = areq->tsgl;
388 	struct aead_async_rsgl *rsgl;
389 	struct kiocb *iocb = areq->iocb;
390 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
391 
392 	list_for_each_entry(rsgl, &areq->list, list) {
393 		af_alg_free_sg(&rsgl->sgl);
394 		if (rsgl != &areq->first_rsgl)
395 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
396 	}
397 
398 	for (i = 0; i < areq->tsgls; i++)
399 		put_page(sg_page(sg + i));
400 
401 	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
402 	sock_kfree_s(sk, req, reqlen);
403 	__sock_put(sk);
404 	iocb->ki_complete(iocb, err, err);
405 }
406 
407 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
408 			      int flags)
409 {
410 	struct sock *sk = sock->sk;
411 	struct alg_sock *ask = alg_sk(sk);
412 	struct aead_ctx *ctx = ask->private;
413 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
414 	struct aead_async_req *areq;
415 	struct aead_request *req = NULL;
416 	struct aead_sg_list *sgl = &ctx->tsgl;
417 	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
418 	unsigned int as = crypto_aead_authsize(tfm);
419 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
420 	int err = -ENOMEM;
421 	unsigned long used;
422 	size_t outlen = 0;
423 	size_t usedpages = 0;
424 
425 	lock_sock(sk);
426 	if (ctx->more) {
427 		err = aead_wait_for_data(sk, flags);
428 		if (err)
429 			goto unlock;
430 	}
431 
432 	if (!aead_sufficient_data(ctx))
433 		goto unlock;
434 
435 	used = ctx->used;
436 	if (ctx->enc)
437 		outlen = used + as;
438 	else
439 		outlen = used - as;
440 
441 	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
442 	if (unlikely(!req))
443 		goto unlock;
444 
445 	areq = GET_ASYM_REQ(req, tfm);
446 	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
447 	INIT_LIST_HEAD(&areq->list);
448 	areq->iocb = msg->msg_iocb;
449 	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
450 	aead_request_set_tfm(req, tfm);
451 	aead_request_set_ad(req, ctx->aead_assoclen);
452 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
453 				  aead_async_cb, sk);
454 	used -= ctx->aead_assoclen;
455 
456 	/* take over all tx sgls from ctx */
457 	areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
458 				  GFP_KERNEL);
459 	if (unlikely(!areq->tsgl))
460 		goto free;
461 
462 	sg_init_table(areq->tsgl, sgl->cur);
463 	for (i = 0; i < sgl->cur; i++)
464 		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
465 			    sgl->sg[i].length, sgl->sg[i].offset);
466 
467 	areq->tsgls = sgl->cur;
468 
469 	/* create rx sgls */
470 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
471 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
472 				      (outlen - usedpages));
473 
474 		if (list_empty(&areq->list)) {
475 			rsgl = &areq->first_rsgl;
476 
477 		} else {
478 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
479 			if (unlikely(!rsgl)) {
480 				err = -ENOMEM;
481 				goto free;
482 			}
483 		}
484 		rsgl->sgl.npages = 0;
485 		list_add_tail(&rsgl->list, &areq->list);
486 
487 		/* make one iovec available as scatterlist */
488 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
489 		if (err < 0)
490 			goto free;
491 
492 		usedpages += err;
493 
494 		/* chain the new scatterlist with previous one */
495 		if (last_rsgl)
496 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
497 
498 		last_rsgl = rsgl;
499 
500 		iov_iter_advance(&msg->msg_iter, err);
501 	}
502 
503 	/* ensure output buffer is sufficiently large */
504 	if (usedpages < outlen) {
505 		err = -EINVAL;
506 		goto unlock;
507 	}
508 
509 	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
510 			       areq->iv);
511 	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
512 	if (err) {
513 		if (err == -EINPROGRESS) {
514 			sock_hold(sk);
515 			err = -EIOCBQUEUED;
516 			aead_reset_ctx(ctx);
517 			goto unlock;
518 		} else if (err == -EBADMSG) {
519 			aead_put_sgl(sk);
520 		}
521 		goto free;
522 	}
523 	aead_put_sgl(sk);
524 
525 free:
526 	list_for_each_entry(rsgl, &areq->list, list) {
527 		af_alg_free_sg(&rsgl->sgl);
528 		if (rsgl != &areq->first_rsgl)
529 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
530 	}
531 	if (areq->tsgl)
532 		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
533 	if (req)
534 		sock_kfree_s(sk, req, reqlen);
535 unlock:
536 	aead_wmem_wakeup(sk);
537 	release_sock(sk);
538 	return err ? err : outlen;
539 }
540 
541 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
542 {
543 	struct sock *sk = sock->sk;
544 	struct alg_sock *ask = alg_sk(sk);
545 	struct aead_ctx *ctx = ask->private;
546 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
547 	struct aead_sg_list *sgl = &ctx->tsgl;
548 	struct aead_async_rsgl *last_rsgl = NULL;
549 	struct aead_async_rsgl *rsgl, *tmp;
550 	int err = -EINVAL;
551 	unsigned long used = 0;
552 	size_t outlen = 0;
553 	size_t usedpages = 0;
554 
555 	lock_sock(sk);
556 
557 	/*
558 	 * AEAD memory structure: For encryption, the tag is appended to the
559 	 * ciphertext which implies that the memory allocated for the ciphertext
560 	 * must be increased by the tag length. For decryption, the tag
561 	 * is expected to be concatenated to the ciphertext. The plaintext
562 	 * therefore has a memory size of the ciphertext minus the tag length.
563 	 *
564 	 * The memory structure for cipher operation has the following
565 	 * structure:
566 	 *	AEAD encryption input:  assoc data || plaintext
567 	 *	AEAD encryption output: cipherntext || auth tag
568 	 *	AEAD decryption input:  assoc data || ciphertext || auth tag
569 	 *	AEAD decryption output: plaintext
570 	 */
571 
572 	if (ctx->more) {
573 		err = aead_wait_for_data(sk, flags);
574 		if (err)
575 			goto unlock;
576 	}
577 
578 	/* data length provided by caller via sendmsg/sendpage */
579 	used = ctx->used;
580 
581 	/*
582 	 * Make sure sufficient data is present -- note, the same check is
583 	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
584 	 * shall provide an information to the data sender that something is
585 	 * wrong, but they are irrelevant to maintain the kernel integrity.
586 	 * We need this check here too in case user space decides to not honor
587 	 * the error message in sendmsg/sendpage and still call recvmsg. This
588 	 * check here protects the kernel integrity.
589 	 */
590 	if (!aead_sufficient_data(ctx))
591 		goto unlock;
592 
593 	/*
594 	 * Calculate the minimum output buffer size holding the result of the
595 	 * cipher operation. When encrypting data, the receiving buffer is
596 	 * larger by the tag length compared to the input buffer as the
597 	 * encryption operation generates the tag. For decryption, the input
598 	 * buffer provides the tag which is consumed resulting in only the
599 	 * plaintext without a buffer for the tag returned to the caller.
600 	 */
601 	if (ctx->enc)
602 		outlen = used + as;
603 	else
604 		outlen = used - as;
605 
606 	/*
607 	 * The cipher operation input data is reduced by the associated data
608 	 * length as this data is processed separately later on.
609 	 */
610 	used -= ctx->aead_assoclen;
611 
612 	/* convert iovecs of output buffers into scatterlists */
613 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
614 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
615 				      (outlen - usedpages));
616 
617 		if (list_empty(&ctx->list)) {
618 			rsgl = &ctx->first_rsgl;
619 		} else {
620 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
621 			if (unlikely(!rsgl)) {
622 				err = -ENOMEM;
623 				goto unlock;
624 			}
625 		}
626 		rsgl->sgl.npages = 0;
627 		list_add_tail(&rsgl->list, &ctx->list);
628 
629 		/* make one iovec available as scatterlist */
630 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
631 		if (err < 0)
632 			goto unlock;
633 		usedpages += err;
634 		/* chain the new scatterlist with previous one */
635 		if (last_rsgl)
636 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
637 
638 		last_rsgl = rsgl;
639 
640 		iov_iter_advance(&msg->msg_iter, err);
641 	}
642 
643 	/* ensure output buffer is sufficiently large */
644 	if (usedpages < outlen) {
645 		err = -EINVAL;
646 		goto unlock;
647 	}
648 
649 	sg_mark_end(sgl->sg + sgl->cur - 1);
650 	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
651 			       used, ctx->iv);
652 	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
653 
654 	err = af_alg_wait_for_completion(ctx->enc ?
655 					 crypto_aead_encrypt(&ctx->aead_req) :
656 					 crypto_aead_decrypt(&ctx->aead_req),
657 					 &ctx->completion);
658 
659 	if (err) {
660 		/* EBADMSG implies a valid cipher operation took place */
661 		if (err == -EBADMSG)
662 			aead_put_sgl(sk);
663 
664 		goto unlock;
665 	}
666 
667 	aead_put_sgl(sk);
668 	err = 0;
669 
670 unlock:
671 	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
672 		af_alg_free_sg(&rsgl->sgl);
673 		if (rsgl != &ctx->first_rsgl)
674 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
675 		list_del(&rsgl->list);
676 	}
677 	INIT_LIST_HEAD(&ctx->list);
678 	aead_wmem_wakeup(sk);
679 	release_sock(sk);
680 
681 	return err ? err : outlen;
682 }
683 
684 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
685 			int flags)
686 {
687 	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
688 		aead_recvmsg_async(sock, msg, flags) :
689 		aead_recvmsg_sync(sock, msg, flags);
690 }
691 
692 static unsigned int aead_poll(struct file *file, struct socket *sock,
693 			      poll_table *wait)
694 {
695 	struct sock *sk = sock->sk;
696 	struct alg_sock *ask = alg_sk(sk);
697 	struct aead_ctx *ctx = ask->private;
698 	unsigned int mask;
699 
700 	sock_poll_wait(file, sk_sleep(sk), wait);
701 	mask = 0;
702 
703 	if (!ctx->more)
704 		mask |= POLLIN | POLLRDNORM;
705 
706 	if (aead_writable(sk))
707 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
708 
709 	return mask;
710 }
711 
712 static struct proto_ops algif_aead_ops = {
713 	.family		=	PF_ALG,
714 
715 	.connect	=	sock_no_connect,
716 	.socketpair	=	sock_no_socketpair,
717 	.getname	=	sock_no_getname,
718 	.ioctl		=	sock_no_ioctl,
719 	.listen		=	sock_no_listen,
720 	.shutdown	=	sock_no_shutdown,
721 	.getsockopt	=	sock_no_getsockopt,
722 	.mmap		=	sock_no_mmap,
723 	.bind		=	sock_no_bind,
724 	.accept		=	sock_no_accept,
725 	.setsockopt	=	sock_no_setsockopt,
726 
727 	.release	=	af_alg_release,
728 	.sendmsg	=	aead_sendmsg,
729 	.sendpage	=	aead_sendpage,
730 	.recvmsg	=	aead_recvmsg,
731 	.poll		=	aead_poll,
732 };
733 
734 static void *aead_bind(const char *name, u32 type, u32 mask)
735 {
736 	return crypto_alloc_aead(name, type, mask);
737 }
738 
739 static void aead_release(void *private)
740 {
741 	crypto_free_aead(private);
742 }
743 
744 static int aead_setauthsize(void *private, unsigned int authsize)
745 {
746 	return crypto_aead_setauthsize(private, authsize);
747 }
748 
749 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
750 {
751 	return crypto_aead_setkey(private, key, keylen);
752 }
753 
754 static void aead_sock_destruct(struct sock *sk)
755 {
756 	struct alg_sock *ask = alg_sk(sk);
757 	struct aead_ctx *ctx = ask->private;
758 	unsigned int ivlen = crypto_aead_ivsize(
759 				crypto_aead_reqtfm(&ctx->aead_req));
760 
761 	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
762 	aead_put_sgl(sk);
763 	sock_kzfree_s(sk, ctx->iv, ivlen);
764 	sock_kfree_s(sk, ctx, ctx->len);
765 	af_alg_release_parent(sk);
766 }
767 
768 static int aead_accept_parent(void *private, struct sock *sk)
769 {
770 	struct aead_ctx *ctx;
771 	struct alg_sock *ask = alg_sk(sk);
772 	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
773 	unsigned int ivlen = crypto_aead_ivsize(private);
774 
775 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
776 	if (!ctx)
777 		return -ENOMEM;
778 	memset(ctx, 0, len);
779 
780 	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
781 	if (!ctx->iv) {
782 		sock_kfree_s(sk, ctx, len);
783 		return -ENOMEM;
784 	}
785 	memset(ctx->iv, 0, ivlen);
786 
787 	ctx->len = len;
788 	ctx->used = 0;
789 	ctx->more = 0;
790 	ctx->merge = 0;
791 	ctx->enc = 0;
792 	ctx->tsgl.cur = 0;
793 	ctx->aead_assoclen = 0;
794 	af_alg_init_completion(&ctx->completion);
795 	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
796 	INIT_LIST_HEAD(&ctx->list);
797 
798 	ask->private = ctx;
799 
800 	aead_request_set_tfm(&ctx->aead_req, private);
801 	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
802 				  af_alg_complete, &ctx->completion);
803 
804 	sk->sk_destruct = aead_sock_destruct;
805 
806 	return 0;
807 }
808 
809 static const struct af_alg_type algif_type_aead = {
810 	.bind		=	aead_bind,
811 	.release	=	aead_release,
812 	.setkey		=	aead_setkey,
813 	.setauthsize	=	aead_setauthsize,
814 	.accept		=	aead_accept_parent,
815 	.ops		=	&algif_aead_ops,
816 	.name		=	"aead",
817 	.owner		=	THIS_MODULE
818 };
819 
820 static int __init algif_aead_init(void)
821 {
822 	return af_alg_register_type(&algif_type_aead);
823 }
824 
825 static void __exit algif_aead_exit(void)
826 {
827 	int err = af_alg_unregister_type(&algif_type_aead);
828 	BUG_ON(err);
829 }
830 
831 module_init(algif_aead_init);
832 module_exit(algif_aead_exit);
833 MODULE_LICENSE("GPL");
834 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
835 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
836