xref: /linux/crypto/algif_aead.c (revision 9a736fcb096b43b68af8329eb12abc8256dceaba)
1 /*
2  * algif_aead: User-space interface for AEAD algorithms
3  *
4  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5  *
6  * This file provides the user-space API for AEAD ciphers.
7  *
8  * This file is derived from algif_skcipher.c.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  */
15 
16 #include <crypto/internal/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/net.h>
25 #include <net/sock.h>
26 
27 struct aead_sg_list {
28 	unsigned int cur;
29 	struct scatterlist sg[ALG_MAX_PAGES];
30 };
31 
32 struct aead_async_rsgl {
33 	struct af_alg_sgl sgl;
34 	struct list_head list;
35 };
36 
37 struct aead_async_req {
38 	struct scatterlist *tsgl;
39 	struct aead_async_rsgl first_rsgl;
40 	struct list_head list;
41 	struct kiocb *iocb;
42 	unsigned int tsgls;
43 	char iv[];
44 };
45 
46 struct aead_ctx {
47 	struct aead_sg_list tsgl;
48 	struct aead_async_rsgl first_rsgl;
49 	struct list_head list;
50 
51 	void *iv;
52 
53 	struct af_alg_completion completion;
54 
55 	unsigned long used;
56 
57 	unsigned int len;
58 	bool more;
59 	bool merge;
60 	bool enc;
61 
62 	size_t aead_assoclen;
63 	struct aead_request aead_req;
64 };
65 
66 static inline int aead_sndbuf(struct sock *sk)
67 {
68 	struct alg_sock *ask = alg_sk(sk);
69 	struct aead_ctx *ctx = ask->private;
70 
71 	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
72 			  ctx->used, 0);
73 }
74 
75 static inline bool aead_writable(struct sock *sk)
76 {
77 	return PAGE_SIZE <= aead_sndbuf(sk);
78 }
79 
80 static inline bool aead_sufficient_data(struct aead_ctx *ctx)
81 {
82 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
83 
84 	/*
85 	 * The minimum amount of memory needed for an AEAD cipher is
86 	 * the AAD and in case of decryption the tag.
87 	 */
88 	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
89 }
90 
91 static void aead_reset_ctx(struct aead_ctx *ctx)
92 {
93 	struct aead_sg_list *sgl = &ctx->tsgl;
94 
95 	sg_init_table(sgl->sg, ALG_MAX_PAGES);
96 	sgl->cur = 0;
97 	ctx->used = 0;
98 	ctx->more = 0;
99 	ctx->merge = 0;
100 }
101 
102 static void aead_put_sgl(struct sock *sk)
103 {
104 	struct alg_sock *ask = alg_sk(sk);
105 	struct aead_ctx *ctx = ask->private;
106 	struct aead_sg_list *sgl = &ctx->tsgl;
107 	struct scatterlist *sg = sgl->sg;
108 	unsigned int i;
109 
110 	for (i = 0; i < sgl->cur; i++) {
111 		if (!sg_page(sg + i))
112 			continue;
113 
114 		put_page(sg_page(sg + i));
115 		sg_assign_page(sg + i, NULL);
116 	}
117 	aead_reset_ctx(ctx);
118 }
119 
120 static void aead_wmem_wakeup(struct sock *sk)
121 {
122 	struct socket_wq *wq;
123 
124 	if (!aead_writable(sk))
125 		return;
126 
127 	rcu_read_lock();
128 	wq = rcu_dereference(sk->sk_wq);
129 	if (skwq_has_sleeper(wq))
130 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
131 							   POLLRDNORM |
132 							   POLLRDBAND);
133 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
134 	rcu_read_unlock();
135 }
136 
137 static int aead_wait_for_data(struct sock *sk, unsigned flags)
138 {
139 	struct alg_sock *ask = alg_sk(sk);
140 	struct aead_ctx *ctx = ask->private;
141 	long timeout;
142 	DEFINE_WAIT(wait);
143 	int err = -ERESTARTSYS;
144 
145 	if (flags & MSG_DONTWAIT)
146 		return -EAGAIN;
147 
148 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
149 
150 	for (;;) {
151 		if (signal_pending(current))
152 			break;
153 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
154 		timeout = MAX_SCHEDULE_TIMEOUT;
155 		if (sk_wait_event(sk, &timeout, !ctx->more)) {
156 			err = 0;
157 			break;
158 		}
159 	}
160 	finish_wait(sk_sleep(sk), &wait);
161 
162 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
163 
164 	return err;
165 }
166 
167 static void aead_data_wakeup(struct sock *sk)
168 {
169 	struct alg_sock *ask = alg_sk(sk);
170 	struct aead_ctx *ctx = ask->private;
171 	struct socket_wq *wq;
172 
173 	if (ctx->more)
174 		return;
175 	if (!ctx->used)
176 		return;
177 
178 	rcu_read_lock();
179 	wq = rcu_dereference(sk->sk_wq);
180 	if (skwq_has_sleeper(wq))
181 		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
182 							   POLLRDNORM |
183 							   POLLRDBAND);
184 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
185 	rcu_read_unlock();
186 }
187 
188 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
189 {
190 	struct sock *sk = sock->sk;
191 	struct alg_sock *ask = alg_sk(sk);
192 	struct aead_ctx *ctx = ask->private;
193 	unsigned ivsize =
194 		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
195 	struct aead_sg_list *sgl = &ctx->tsgl;
196 	struct af_alg_control con = {};
197 	long copied = 0;
198 	bool enc = 0;
199 	bool init = 0;
200 	int err = -EINVAL;
201 
202 	if (msg->msg_controllen) {
203 		err = af_alg_cmsg_send(msg, &con);
204 		if (err)
205 			return err;
206 
207 		init = 1;
208 		switch (con.op) {
209 		case ALG_OP_ENCRYPT:
210 			enc = 1;
211 			break;
212 		case ALG_OP_DECRYPT:
213 			enc = 0;
214 			break;
215 		default:
216 			return -EINVAL;
217 		}
218 
219 		if (con.iv && con.iv->ivlen != ivsize)
220 			return -EINVAL;
221 	}
222 
223 	lock_sock(sk);
224 	if (!ctx->more && ctx->used)
225 		goto unlock;
226 
227 	if (init) {
228 		ctx->enc = enc;
229 		if (con.iv)
230 			memcpy(ctx->iv, con.iv->iv, ivsize);
231 
232 		ctx->aead_assoclen = con.aead_assoclen;
233 	}
234 
235 	while (size) {
236 		size_t len = size;
237 		struct scatterlist *sg = NULL;
238 
239 		/* use the existing memory in an allocated page */
240 		if (ctx->merge) {
241 			sg = sgl->sg + sgl->cur - 1;
242 			len = min_t(unsigned long, len,
243 				    PAGE_SIZE - sg->offset - sg->length);
244 			err = memcpy_from_msg(page_address(sg_page(sg)) +
245 					      sg->offset + sg->length,
246 					      msg, len);
247 			if (err)
248 				goto unlock;
249 
250 			sg->length += len;
251 			ctx->merge = (sg->offset + sg->length) &
252 				     (PAGE_SIZE - 1);
253 
254 			ctx->used += len;
255 			copied += len;
256 			size -= len;
257 			continue;
258 		}
259 
260 		if (!aead_writable(sk)) {
261 			/* user space sent too much data */
262 			aead_put_sgl(sk);
263 			err = -EMSGSIZE;
264 			goto unlock;
265 		}
266 
267 		/* allocate a new page */
268 		len = min_t(unsigned long, size, aead_sndbuf(sk));
269 		while (len) {
270 			size_t plen = 0;
271 
272 			if (sgl->cur >= ALG_MAX_PAGES) {
273 				aead_put_sgl(sk);
274 				err = -E2BIG;
275 				goto unlock;
276 			}
277 
278 			sg = sgl->sg + sgl->cur;
279 			plen = min_t(size_t, len, PAGE_SIZE);
280 
281 			sg_assign_page(sg, alloc_page(GFP_KERNEL));
282 			err = -ENOMEM;
283 			if (!sg_page(sg))
284 				goto unlock;
285 
286 			err = memcpy_from_msg(page_address(sg_page(sg)),
287 					      msg, plen);
288 			if (err) {
289 				__free_page(sg_page(sg));
290 				sg_assign_page(sg, NULL);
291 				goto unlock;
292 			}
293 
294 			sg->offset = 0;
295 			sg->length = plen;
296 			len -= plen;
297 			ctx->used += plen;
298 			copied += plen;
299 			sgl->cur++;
300 			size -= plen;
301 			ctx->merge = plen & (PAGE_SIZE - 1);
302 		}
303 	}
304 
305 	err = 0;
306 
307 	ctx->more = msg->msg_flags & MSG_MORE;
308 	if (!ctx->more && !aead_sufficient_data(ctx)) {
309 		aead_put_sgl(sk);
310 		err = -EMSGSIZE;
311 	}
312 
313 unlock:
314 	aead_data_wakeup(sk);
315 	release_sock(sk);
316 
317 	return err ?: copied;
318 }
319 
320 static ssize_t aead_sendpage(struct socket *sock, struct page *page,
321 			     int offset, size_t size, int flags)
322 {
323 	struct sock *sk = sock->sk;
324 	struct alg_sock *ask = alg_sk(sk);
325 	struct aead_ctx *ctx = ask->private;
326 	struct aead_sg_list *sgl = &ctx->tsgl;
327 	int err = -EINVAL;
328 
329 	if (flags & MSG_SENDPAGE_NOTLAST)
330 		flags |= MSG_MORE;
331 
332 	if (sgl->cur >= ALG_MAX_PAGES)
333 		return -E2BIG;
334 
335 	lock_sock(sk);
336 	if (!ctx->more && ctx->used)
337 		goto unlock;
338 
339 	if (!size)
340 		goto done;
341 
342 	if (!aead_writable(sk)) {
343 		/* user space sent too much data */
344 		aead_put_sgl(sk);
345 		err = -EMSGSIZE;
346 		goto unlock;
347 	}
348 
349 	ctx->merge = 0;
350 
351 	get_page(page);
352 	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
353 	sgl->cur++;
354 	ctx->used += size;
355 
356 	err = 0;
357 
358 done:
359 	ctx->more = flags & MSG_MORE;
360 	if (!ctx->more && !aead_sufficient_data(ctx)) {
361 		aead_put_sgl(sk);
362 		err = -EMSGSIZE;
363 	}
364 
365 unlock:
366 	aead_data_wakeup(sk);
367 	release_sock(sk);
368 
369 	return err ?: size;
370 }
371 
372 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
373 		((char *)req + sizeof(struct aead_request) + \
374 		 crypto_aead_reqsize(tfm))
375 
376  #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
377 	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
378 	sizeof(struct aead_request)
379 
380 static void aead_async_cb(struct crypto_async_request *_req, int err)
381 {
382 	struct sock *sk = _req->data;
383 	struct alg_sock *ask = alg_sk(sk);
384 	struct aead_ctx *ctx = ask->private;
385 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
386 	struct aead_request *req = aead_request_cast(_req);
387 	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
388 	struct scatterlist *sg = areq->tsgl;
389 	struct aead_async_rsgl *rsgl;
390 	struct kiocb *iocb = areq->iocb;
391 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
392 
393 	list_for_each_entry(rsgl, &areq->list, list) {
394 		af_alg_free_sg(&rsgl->sgl);
395 		if (rsgl != &areq->first_rsgl)
396 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
397 	}
398 
399 	for (i = 0; i < areq->tsgls; i++)
400 		put_page(sg_page(sg + i));
401 
402 	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
403 	sock_kfree_s(sk, req, reqlen);
404 	__sock_put(sk);
405 	iocb->ki_complete(iocb, err, err);
406 }
407 
408 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
409 			      int flags)
410 {
411 	struct sock *sk = sock->sk;
412 	struct alg_sock *ask = alg_sk(sk);
413 	struct aead_ctx *ctx = ask->private;
414 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
415 	struct aead_async_req *areq;
416 	struct aead_request *req = NULL;
417 	struct aead_sg_list *sgl = &ctx->tsgl;
418 	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
419 	unsigned int as = crypto_aead_authsize(tfm);
420 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
421 	int err = -ENOMEM;
422 	unsigned long used;
423 	size_t outlen = 0;
424 	size_t usedpages = 0;
425 
426 	lock_sock(sk);
427 	if (ctx->more) {
428 		err = aead_wait_for_data(sk, flags);
429 		if (err)
430 			goto unlock;
431 	}
432 
433 	if (!aead_sufficient_data(ctx))
434 		goto unlock;
435 
436 	used = ctx->used;
437 	if (ctx->enc)
438 		outlen = used + as;
439 	else
440 		outlen = used - as;
441 
442 	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
443 	if (unlikely(!req))
444 		goto unlock;
445 
446 	areq = GET_ASYM_REQ(req, tfm);
447 	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
448 	INIT_LIST_HEAD(&areq->list);
449 	areq->iocb = msg->msg_iocb;
450 	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
451 	aead_request_set_tfm(req, tfm);
452 	aead_request_set_ad(req, ctx->aead_assoclen);
453 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 				  aead_async_cb, sk);
455 	used -= ctx->aead_assoclen;
456 
457 	/* take over all tx sgls from ctx */
458 	areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
459 				  GFP_KERNEL);
460 	if (unlikely(!areq->tsgl))
461 		goto free;
462 
463 	sg_init_table(areq->tsgl, sgl->cur);
464 	for (i = 0; i < sgl->cur; i++)
465 		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
466 			    sgl->sg[i].length, sgl->sg[i].offset);
467 
468 	areq->tsgls = sgl->cur;
469 
470 	/* create rx sgls */
471 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
472 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
473 				      (outlen - usedpages));
474 
475 		if (list_empty(&areq->list)) {
476 			rsgl = &areq->first_rsgl;
477 
478 		} else {
479 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
480 			if (unlikely(!rsgl)) {
481 				err = -ENOMEM;
482 				goto free;
483 			}
484 		}
485 		rsgl->sgl.npages = 0;
486 		list_add_tail(&rsgl->list, &areq->list);
487 
488 		/* make one iovec available as scatterlist */
489 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
490 		if (err < 0)
491 			goto free;
492 
493 		usedpages += err;
494 
495 		/* chain the new scatterlist with previous one */
496 		if (last_rsgl)
497 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
498 
499 		last_rsgl = rsgl;
500 
501 		iov_iter_advance(&msg->msg_iter, err);
502 	}
503 
504 	/* ensure output buffer is sufficiently large */
505 	if (usedpages < outlen) {
506 		err = -EINVAL;
507 		goto unlock;
508 	}
509 
510 	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
511 			       areq->iv);
512 	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
513 	if (err) {
514 		if (err == -EINPROGRESS) {
515 			sock_hold(sk);
516 			err = -EIOCBQUEUED;
517 			aead_reset_ctx(ctx);
518 			goto unlock;
519 		} else if (err == -EBADMSG) {
520 			aead_put_sgl(sk);
521 		}
522 		goto free;
523 	}
524 	aead_put_sgl(sk);
525 
526 free:
527 	list_for_each_entry(rsgl, &areq->list, list) {
528 		af_alg_free_sg(&rsgl->sgl);
529 		if (rsgl != &areq->first_rsgl)
530 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
531 	}
532 	if (areq->tsgl)
533 		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
534 	if (req)
535 		sock_kfree_s(sk, req, reqlen);
536 unlock:
537 	aead_wmem_wakeup(sk);
538 	release_sock(sk);
539 	return err ? err : outlen;
540 }
541 
542 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
543 {
544 	struct sock *sk = sock->sk;
545 	struct alg_sock *ask = alg_sk(sk);
546 	struct aead_ctx *ctx = ask->private;
547 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
548 	struct aead_sg_list *sgl = &ctx->tsgl;
549 	struct aead_async_rsgl *last_rsgl = NULL;
550 	struct aead_async_rsgl *rsgl, *tmp;
551 	int err = -EINVAL;
552 	unsigned long used = 0;
553 	size_t outlen = 0;
554 	size_t usedpages = 0;
555 
556 	lock_sock(sk);
557 
558 	/*
559 	 * AEAD memory structure: For encryption, the tag is appended to the
560 	 * ciphertext which implies that the memory allocated for the ciphertext
561 	 * must be increased by the tag length. For decryption, the tag
562 	 * is expected to be concatenated to the ciphertext. The plaintext
563 	 * therefore has a memory size of the ciphertext minus the tag length.
564 	 *
565 	 * The memory structure for cipher operation has the following
566 	 * structure:
567 	 *	AEAD encryption input:  assoc data || plaintext
568 	 *	AEAD encryption output: cipherntext || auth tag
569 	 *	AEAD decryption input:  assoc data || ciphertext || auth tag
570 	 *	AEAD decryption output: plaintext
571 	 */
572 
573 	if (ctx->more) {
574 		err = aead_wait_for_data(sk, flags);
575 		if (err)
576 			goto unlock;
577 	}
578 
579 	/* data length provided by caller via sendmsg/sendpage */
580 	used = ctx->used;
581 
582 	/*
583 	 * Make sure sufficient data is present -- note, the same check is
584 	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
585 	 * shall provide an information to the data sender that something is
586 	 * wrong, but they are irrelevant to maintain the kernel integrity.
587 	 * We need this check here too in case user space decides to not honor
588 	 * the error message in sendmsg/sendpage and still call recvmsg. This
589 	 * check here protects the kernel integrity.
590 	 */
591 	if (!aead_sufficient_data(ctx))
592 		goto unlock;
593 
594 	/*
595 	 * Calculate the minimum output buffer size holding the result of the
596 	 * cipher operation. When encrypting data, the receiving buffer is
597 	 * larger by the tag length compared to the input buffer as the
598 	 * encryption operation generates the tag. For decryption, the input
599 	 * buffer provides the tag which is consumed resulting in only the
600 	 * plaintext without a buffer for the tag returned to the caller.
601 	 */
602 	if (ctx->enc)
603 		outlen = used + as;
604 	else
605 		outlen = used - as;
606 
607 	/*
608 	 * The cipher operation input data is reduced by the associated data
609 	 * length as this data is processed separately later on.
610 	 */
611 	used -= ctx->aead_assoclen;
612 
613 	/* convert iovecs of output buffers into scatterlists */
614 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
615 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
616 				      (outlen - usedpages));
617 
618 		if (list_empty(&ctx->list)) {
619 			rsgl = &ctx->first_rsgl;
620 		} else {
621 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
622 			if (unlikely(!rsgl)) {
623 				err = -ENOMEM;
624 				goto unlock;
625 			}
626 		}
627 		rsgl->sgl.npages = 0;
628 		list_add_tail(&rsgl->list, &ctx->list);
629 
630 		/* make one iovec available as scatterlist */
631 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
632 		if (err < 0)
633 			goto unlock;
634 		usedpages += err;
635 		/* chain the new scatterlist with previous one */
636 		if (last_rsgl)
637 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
638 
639 		last_rsgl = rsgl;
640 
641 		iov_iter_advance(&msg->msg_iter, err);
642 	}
643 
644 	/* ensure output buffer is sufficiently large */
645 	if (usedpages < outlen) {
646 		err = -EINVAL;
647 		goto unlock;
648 	}
649 
650 	sg_mark_end(sgl->sg + sgl->cur - 1);
651 	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
652 			       used, ctx->iv);
653 	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
654 
655 	err = af_alg_wait_for_completion(ctx->enc ?
656 					 crypto_aead_encrypt(&ctx->aead_req) :
657 					 crypto_aead_decrypt(&ctx->aead_req),
658 					 &ctx->completion);
659 
660 	if (err) {
661 		/* EBADMSG implies a valid cipher operation took place */
662 		if (err == -EBADMSG)
663 			aead_put_sgl(sk);
664 
665 		goto unlock;
666 	}
667 
668 	aead_put_sgl(sk);
669 	err = 0;
670 
671 unlock:
672 	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
673 		af_alg_free_sg(&rsgl->sgl);
674 		if (rsgl != &ctx->first_rsgl)
675 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
676 		list_del(&rsgl->list);
677 	}
678 	INIT_LIST_HEAD(&ctx->list);
679 	aead_wmem_wakeup(sk);
680 	release_sock(sk);
681 
682 	return err ? err : outlen;
683 }
684 
685 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
686 			int flags)
687 {
688 	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
689 		aead_recvmsg_async(sock, msg, flags) :
690 		aead_recvmsg_sync(sock, msg, flags);
691 }
692 
693 static unsigned int aead_poll(struct file *file, struct socket *sock,
694 			      poll_table *wait)
695 {
696 	struct sock *sk = sock->sk;
697 	struct alg_sock *ask = alg_sk(sk);
698 	struct aead_ctx *ctx = ask->private;
699 	unsigned int mask;
700 
701 	sock_poll_wait(file, sk_sleep(sk), wait);
702 	mask = 0;
703 
704 	if (!ctx->more)
705 		mask |= POLLIN | POLLRDNORM;
706 
707 	if (aead_writable(sk))
708 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
709 
710 	return mask;
711 }
712 
713 static struct proto_ops algif_aead_ops = {
714 	.family		=	PF_ALG,
715 
716 	.connect	=	sock_no_connect,
717 	.socketpair	=	sock_no_socketpair,
718 	.getname	=	sock_no_getname,
719 	.ioctl		=	sock_no_ioctl,
720 	.listen		=	sock_no_listen,
721 	.shutdown	=	sock_no_shutdown,
722 	.getsockopt	=	sock_no_getsockopt,
723 	.mmap		=	sock_no_mmap,
724 	.bind		=	sock_no_bind,
725 	.accept		=	sock_no_accept,
726 	.setsockopt	=	sock_no_setsockopt,
727 
728 	.release	=	af_alg_release,
729 	.sendmsg	=	aead_sendmsg,
730 	.sendpage	=	aead_sendpage,
731 	.recvmsg	=	aead_recvmsg,
732 	.poll		=	aead_poll,
733 };
734 
735 static void *aead_bind(const char *name, u32 type, u32 mask)
736 {
737 	return crypto_alloc_aead(name, type, mask);
738 }
739 
740 static void aead_release(void *private)
741 {
742 	crypto_free_aead(private);
743 }
744 
745 static int aead_setauthsize(void *private, unsigned int authsize)
746 {
747 	return crypto_aead_setauthsize(private, authsize);
748 }
749 
750 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
751 {
752 	return crypto_aead_setkey(private, key, keylen);
753 }
754 
755 static void aead_sock_destruct(struct sock *sk)
756 {
757 	struct alg_sock *ask = alg_sk(sk);
758 	struct aead_ctx *ctx = ask->private;
759 	unsigned int ivlen = crypto_aead_ivsize(
760 				crypto_aead_reqtfm(&ctx->aead_req));
761 
762 	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
763 	aead_put_sgl(sk);
764 	sock_kzfree_s(sk, ctx->iv, ivlen);
765 	sock_kfree_s(sk, ctx, ctx->len);
766 	af_alg_release_parent(sk);
767 }
768 
769 static int aead_accept_parent(void *private, struct sock *sk)
770 {
771 	struct aead_ctx *ctx;
772 	struct alg_sock *ask = alg_sk(sk);
773 	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
774 	unsigned int ivlen = crypto_aead_ivsize(private);
775 
776 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
777 	if (!ctx)
778 		return -ENOMEM;
779 	memset(ctx, 0, len);
780 
781 	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
782 	if (!ctx->iv) {
783 		sock_kfree_s(sk, ctx, len);
784 		return -ENOMEM;
785 	}
786 	memset(ctx->iv, 0, ivlen);
787 
788 	ctx->len = len;
789 	ctx->used = 0;
790 	ctx->more = 0;
791 	ctx->merge = 0;
792 	ctx->enc = 0;
793 	ctx->tsgl.cur = 0;
794 	ctx->aead_assoclen = 0;
795 	af_alg_init_completion(&ctx->completion);
796 	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
797 	INIT_LIST_HEAD(&ctx->list);
798 
799 	ask->private = ctx;
800 
801 	aead_request_set_tfm(&ctx->aead_req, private);
802 	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
803 				  af_alg_complete, &ctx->completion);
804 
805 	sk->sk_destruct = aead_sock_destruct;
806 
807 	return 0;
808 }
809 
810 static const struct af_alg_type algif_type_aead = {
811 	.bind		=	aead_bind,
812 	.release	=	aead_release,
813 	.setkey		=	aead_setkey,
814 	.setauthsize	=	aead_setauthsize,
815 	.accept		=	aead_accept_parent,
816 	.ops		=	&algif_aead_ops,
817 	.name		=	"aead",
818 	.owner		=	THIS_MODULE
819 };
820 
821 static int __init algif_aead_init(void)
822 {
823 	return af_alg_register_type(&algif_type_aead);
824 }
825 
826 static void __exit algif_aead_exit(void)
827 {
828 	int err = af_alg_unregister_type(&algif_type_aead);
829 	BUG_ON(err);
830 }
831 
832 module_init(algif_aead_init);
833 module_exit(algif_aead_exit);
834 MODULE_LICENSE("GPL");
835 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
836 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
837