xref: /linux/crypto/algif_aead.c (revision a3a4a816b4b194c45d0217e8b9e08b2639802cda)
1 /*
2  * algif_aead: User-space interface for AEAD algorithms
3  *
4  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5  *
6  * This file provides the user-space API for AEAD ciphers.
7  *
8  * This file is derived from algif_skcipher.c.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  */
15 
16 #include <crypto/internal/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
22 #include <linux/sched/signal.h>
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/net.h>
26 #include <net/sock.h>
27 
28 struct aead_sg_list {
29 	unsigned int cur;
30 	struct scatterlist sg[ALG_MAX_PAGES];
31 };
32 
33 struct aead_async_rsgl {
34 	struct af_alg_sgl sgl;
35 	struct list_head list;
36 };
37 
38 struct aead_async_req {
39 	struct scatterlist *tsgl;
40 	struct aead_async_rsgl first_rsgl;
41 	struct list_head list;
42 	struct kiocb *iocb;
43 	unsigned int tsgls;
44 	char iv[];
45 };
46 
47 struct aead_ctx {
48 	struct aead_sg_list tsgl;
49 	struct aead_async_rsgl first_rsgl;
50 	struct list_head list;
51 
52 	void *iv;
53 
54 	struct af_alg_completion completion;
55 
56 	unsigned long used;
57 
58 	unsigned int len;
59 	bool more;
60 	bool merge;
61 	bool enc;
62 
63 	size_t aead_assoclen;
64 	struct aead_request aead_req;
65 };
66 
67 static inline int aead_sndbuf(struct sock *sk)
68 {
69 	struct alg_sock *ask = alg_sk(sk);
70 	struct aead_ctx *ctx = ask->private;
71 
72 	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
73 			  ctx->used, 0);
74 }
75 
76 static inline bool aead_writable(struct sock *sk)
77 {
78 	return PAGE_SIZE <= aead_sndbuf(sk);
79 }
80 
81 static inline bool aead_sufficient_data(struct aead_ctx *ctx)
82 {
83 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
84 
85 	/*
86 	 * The minimum amount of memory needed for an AEAD cipher is
87 	 * the AAD and in case of decryption the tag.
88 	 */
89 	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
90 }
91 
92 static void aead_reset_ctx(struct aead_ctx *ctx)
93 {
94 	struct aead_sg_list *sgl = &ctx->tsgl;
95 
96 	sg_init_table(sgl->sg, ALG_MAX_PAGES);
97 	sgl->cur = 0;
98 	ctx->used = 0;
99 	ctx->more = 0;
100 	ctx->merge = 0;
101 }
102 
103 static void aead_put_sgl(struct sock *sk)
104 {
105 	struct alg_sock *ask = alg_sk(sk);
106 	struct aead_ctx *ctx = ask->private;
107 	struct aead_sg_list *sgl = &ctx->tsgl;
108 	struct scatterlist *sg = sgl->sg;
109 	unsigned int i;
110 
111 	for (i = 0; i < sgl->cur; i++) {
112 		if (!sg_page(sg + i))
113 			continue;
114 
115 		put_page(sg_page(sg + i));
116 		sg_assign_page(sg + i, NULL);
117 	}
118 	aead_reset_ctx(ctx);
119 }
120 
121 static void aead_wmem_wakeup(struct sock *sk)
122 {
123 	struct socket_wq *wq;
124 
125 	if (!aead_writable(sk))
126 		return;
127 
128 	rcu_read_lock();
129 	wq = rcu_dereference(sk->sk_wq);
130 	if (skwq_has_sleeper(wq))
131 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
132 							   POLLRDNORM |
133 							   POLLRDBAND);
134 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
135 	rcu_read_unlock();
136 }
137 
138 static int aead_wait_for_data(struct sock *sk, unsigned flags)
139 {
140 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
141 	struct alg_sock *ask = alg_sk(sk);
142 	struct aead_ctx *ctx = ask->private;
143 	long timeout;
144 	int err = -ERESTARTSYS;
145 
146 	if (flags & MSG_DONTWAIT)
147 		return -EAGAIN;
148 
149 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
150 	add_wait_queue(sk_sleep(sk), &wait);
151 	for (;;) {
152 		if (signal_pending(current))
153 			break;
154 		timeout = MAX_SCHEDULE_TIMEOUT;
155 		if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
156 			err = 0;
157 			break;
158 		}
159 	}
160 	remove_wait_queue(sk_sleep(sk), &wait);
161 
162 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
163 
164 	return err;
165 }
166 
167 static void aead_data_wakeup(struct sock *sk)
168 {
169 	struct alg_sock *ask = alg_sk(sk);
170 	struct aead_ctx *ctx = ask->private;
171 	struct socket_wq *wq;
172 
173 	if (ctx->more)
174 		return;
175 	if (!ctx->used)
176 		return;
177 
178 	rcu_read_lock();
179 	wq = rcu_dereference(sk->sk_wq);
180 	if (skwq_has_sleeper(wq))
181 		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
182 							   POLLRDNORM |
183 							   POLLRDBAND);
184 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
185 	rcu_read_unlock();
186 }
187 
188 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
189 {
190 	struct sock *sk = sock->sk;
191 	struct alg_sock *ask = alg_sk(sk);
192 	struct aead_ctx *ctx = ask->private;
193 	unsigned ivsize =
194 		crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
195 	struct aead_sg_list *sgl = &ctx->tsgl;
196 	struct af_alg_control con = {};
197 	long copied = 0;
198 	bool enc = 0;
199 	bool init = 0;
200 	int err = -EINVAL;
201 
202 	if (msg->msg_controllen) {
203 		err = af_alg_cmsg_send(msg, &con);
204 		if (err)
205 			return err;
206 
207 		init = 1;
208 		switch (con.op) {
209 		case ALG_OP_ENCRYPT:
210 			enc = 1;
211 			break;
212 		case ALG_OP_DECRYPT:
213 			enc = 0;
214 			break;
215 		default:
216 			return -EINVAL;
217 		}
218 
219 		if (con.iv && con.iv->ivlen != ivsize)
220 			return -EINVAL;
221 	}
222 
223 	lock_sock(sk);
224 	if (!ctx->more && ctx->used)
225 		goto unlock;
226 
227 	if (init) {
228 		ctx->enc = enc;
229 		if (con.iv)
230 			memcpy(ctx->iv, con.iv->iv, ivsize);
231 
232 		ctx->aead_assoclen = con.aead_assoclen;
233 	}
234 
235 	while (size) {
236 		size_t len = size;
237 		struct scatterlist *sg = NULL;
238 
239 		/* use the existing memory in an allocated page */
240 		if (ctx->merge) {
241 			sg = sgl->sg + sgl->cur - 1;
242 			len = min_t(unsigned long, len,
243 				    PAGE_SIZE - sg->offset - sg->length);
244 			err = memcpy_from_msg(page_address(sg_page(sg)) +
245 					      sg->offset + sg->length,
246 					      msg, len);
247 			if (err)
248 				goto unlock;
249 
250 			sg->length += len;
251 			ctx->merge = (sg->offset + sg->length) &
252 				     (PAGE_SIZE - 1);
253 
254 			ctx->used += len;
255 			copied += len;
256 			size -= len;
257 			continue;
258 		}
259 
260 		if (!aead_writable(sk)) {
261 			/* user space sent too much data */
262 			aead_put_sgl(sk);
263 			err = -EMSGSIZE;
264 			goto unlock;
265 		}
266 
267 		/* allocate a new page */
268 		len = min_t(unsigned long, size, aead_sndbuf(sk));
269 		while (len) {
270 			size_t plen = 0;
271 
272 			if (sgl->cur >= ALG_MAX_PAGES) {
273 				aead_put_sgl(sk);
274 				err = -E2BIG;
275 				goto unlock;
276 			}
277 
278 			sg = sgl->sg + sgl->cur;
279 			plen = min_t(size_t, len, PAGE_SIZE);
280 
281 			sg_assign_page(sg, alloc_page(GFP_KERNEL));
282 			err = -ENOMEM;
283 			if (!sg_page(sg))
284 				goto unlock;
285 
286 			err = memcpy_from_msg(page_address(sg_page(sg)),
287 					      msg, plen);
288 			if (err) {
289 				__free_page(sg_page(sg));
290 				sg_assign_page(sg, NULL);
291 				goto unlock;
292 			}
293 
294 			sg->offset = 0;
295 			sg->length = plen;
296 			len -= plen;
297 			ctx->used += plen;
298 			copied += plen;
299 			sgl->cur++;
300 			size -= plen;
301 			ctx->merge = plen & (PAGE_SIZE - 1);
302 		}
303 	}
304 
305 	err = 0;
306 
307 	ctx->more = msg->msg_flags & MSG_MORE;
308 	if (!ctx->more && !aead_sufficient_data(ctx)) {
309 		aead_put_sgl(sk);
310 		err = -EMSGSIZE;
311 	}
312 
313 unlock:
314 	aead_data_wakeup(sk);
315 	release_sock(sk);
316 
317 	return err ?: copied;
318 }
319 
320 static ssize_t aead_sendpage(struct socket *sock, struct page *page,
321 			     int offset, size_t size, int flags)
322 {
323 	struct sock *sk = sock->sk;
324 	struct alg_sock *ask = alg_sk(sk);
325 	struct aead_ctx *ctx = ask->private;
326 	struct aead_sg_list *sgl = &ctx->tsgl;
327 	int err = -EINVAL;
328 
329 	if (flags & MSG_SENDPAGE_NOTLAST)
330 		flags |= MSG_MORE;
331 
332 	if (sgl->cur >= ALG_MAX_PAGES)
333 		return -E2BIG;
334 
335 	lock_sock(sk);
336 	if (!ctx->more && ctx->used)
337 		goto unlock;
338 
339 	if (!size)
340 		goto done;
341 
342 	if (!aead_writable(sk)) {
343 		/* user space sent too much data */
344 		aead_put_sgl(sk);
345 		err = -EMSGSIZE;
346 		goto unlock;
347 	}
348 
349 	ctx->merge = 0;
350 
351 	get_page(page);
352 	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
353 	sgl->cur++;
354 	ctx->used += size;
355 
356 	err = 0;
357 
358 done:
359 	ctx->more = flags & MSG_MORE;
360 	if (!ctx->more && !aead_sufficient_data(ctx)) {
361 		aead_put_sgl(sk);
362 		err = -EMSGSIZE;
363 	}
364 
365 unlock:
366 	aead_data_wakeup(sk);
367 	release_sock(sk);
368 
369 	return err ?: size;
370 }
371 
372 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
373 		((char *)req + sizeof(struct aead_request) + \
374 		 crypto_aead_reqsize(tfm))
375 
376  #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
377 	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
378 	sizeof(struct aead_request)
379 
380 static void aead_async_cb(struct crypto_async_request *_req, int err)
381 {
382 	struct sock *sk = _req->data;
383 	struct alg_sock *ask = alg_sk(sk);
384 	struct aead_ctx *ctx = ask->private;
385 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
386 	struct aead_request *req = aead_request_cast(_req);
387 	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
388 	struct scatterlist *sg = areq->tsgl;
389 	struct aead_async_rsgl *rsgl;
390 	struct kiocb *iocb = areq->iocb;
391 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
392 
393 	list_for_each_entry(rsgl, &areq->list, list) {
394 		af_alg_free_sg(&rsgl->sgl);
395 		if (rsgl != &areq->first_rsgl)
396 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
397 	}
398 
399 	for (i = 0; i < areq->tsgls; i++)
400 		put_page(sg_page(sg + i));
401 
402 	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
403 	sock_kfree_s(sk, req, reqlen);
404 	__sock_put(sk);
405 	iocb->ki_complete(iocb, err, err);
406 }
407 
408 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
409 			      int flags)
410 {
411 	struct sock *sk = sock->sk;
412 	struct alg_sock *ask = alg_sk(sk);
413 	struct aead_ctx *ctx = ask->private;
414 	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
415 	struct aead_async_req *areq;
416 	struct aead_request *req = NULL;
417 	struct aead_sg_list *sgl = &ctx->tsgl;
418 	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
419 	unsigned int as = crypto_aead_authsize(tfm);
420 	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
421 	int err = -ENOMEM;
422 	unsigned long used;
423 	size_t outlen = 0;
424 	size_t usedpages = 0;
425 
426 	lock_sock(sk);
427 	if (ctx->more) {
428 		err = aead_wait_for_data(sk, flags);
429 		if (err)
430 			goto unlock;
431 	}
432 
433 	if (!aead_sufficient_data(ctx))
434 		goto unlock;
435 
436 	used = ctx->used;
437 	if (ctx->enc)
438 		outlen = used + as;
439 	else
440 		outlen = used - as;
441 
442 	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
443 	if (unlikely(!req))
444 		goto unlock;
445 
446 	areq = GET_ASYM_REQ(req, tfm);
447 	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
448 	INIT_LIST_HEAD(&areq->list);
449 	areq->iocb = msg->msg_iocb;
450 	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
451 	aead_request_set_tfm(req, tfm);
452 	aead_request_set_ad(req, ctx->aead_assoclen);
453 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 				  aead_async_cb, sk);
455 	used -= ctx->aead_assoclen;
456 
457 	/* take over all tx sgls from ctx */
458 	areq->tsgl = sock_kmalloc(sk,
459 				  sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
460 				  GFP_KERNEL);
461 	if (unlikely(!areq->tsgl))
462 		goto free;
463 
464 	sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
465 	for (i = 0; i < sgl->cur; i++)
466 		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
467 			    sgl->sg[i].length, sgl->sg[i].offset);
468 
469 	areq->tsgls = sgl->cur;
470 
471 	/* create rx sgls */
472 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
473 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
474 				      (outlen - usedpages));
475 
476 		if (list_empty(&areq->list)) {
477 			rsgl = &areq->first_rsgl;
478 
479 		} else {
480 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
481 			if (unlikely(!rsgl)) {
482 				err = -ENOMEM;
483 				goto free;
484 			}
485 		}
486 		rsgl->sgl.npages = 0;
487 		list_add_tail(&rsgl->list, &areq->list);
488 
489 		/* make one iovec available as scatterlist */
490 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
491 		if (err < 0)
492 			goto free;
493 
494 		usedpages += err;
495 
496 		/* chain the new scatterlist with previous one */
497 		if (last_rsgl)
498 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
499 
500 		last_rsgl = rsgl;
501 
502 		iov_iter_advance(&msg->msg_iter, err);
503 	}
504 
505 	/* ensure output buffer is sufficiently large */
506 	if (usedpages < outlen) {
507 		err = -EINVAL;
508 		goto unlock;
509 	}
510 
511 	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
512 			       areq->iv);
513 	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
514 	if (err) {
515 		if (err == -EINPROGRESS) {
516 			sock_hold(sk);
517 			err = -EIOCBQUEUED;
518 			aead_reset_ctx(ctx);
519 			goto unlock;
520 		} else if (err == -EBADMSG) {
521 			aead_put_sgl(sk);
522 		}
523 		goto free;
524 	}
525 	aead_put_sgl(sk);
526 
527 free:
528 	list_for_each_entry(rsgl, &areq->list, list) {
529 		af_alg_free_sg(&rsgl->sgl);
530 		if (rsgl != &areq->first_rsgl)
531 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
532 	}
533 	if (areq->tsgl)
534 		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
535 	if (req)
536 		sock_kfree_s(sk, req, reqlen);
537 unlock:
538 	aead_wmem_wakeup(sk);
539 	release_sock(sk);
540 	return err ? err : outlen;
541 }
542 
543 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
544 {
545 	struct sock *sk = sock->sk;
546 	struct alg_sock *ask = alg_sk(sk);
547 	struct aead_ctx *ctx = ask->private;
548 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
549 	struct aead_sg_list *sgl = &ctx->tsgl;
550 	struct aead_async_rsgl *last_rsgl = NULL;
551 	struct aead_async_rsgl *rsgl, *tmp;
552 	int err = -EINVAL;
553 	unsigned long used = 0;
554 	size_t outlen = 0;
555 	size_t usedpages = 0;
556 
557 	lock_sock(sk);
558 
559 	/*
560 	 * Please see documentation of aead_request_set_crypt for the
561 	 * description of the AEAD memory structure expected from the caller.
562 	 */
563 
564 	if (ctx->more) {
565 		err = aead_wait_for_data(sk, flags);
566 		if (err)
567 			goto unlock;
568 	}
569 
570 	/* data length provided by caller via sendmsg/sendpage */
571 	used = ctx->used;
572 
573 	/*
574 	 * Make sure sufficient data is present -- note, the same check is
575 	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
576 	 * shall provide an information to the data sender that something is
577 	 * wrong, but they are irrelevant to maintain the kernel integrity.
578 	 * We need this check here too in case user space decides to not honor
579 	 * the error message in sendmsg/sendpage and still call recvmsg. This
580 	 * check here protects the kernel integrity.
581 	 */
582 	if (!aead_sufficient_data(ctx))
583 		goto unlock;
584 
585 	/*
586 	 * Calculate the minimum output buffer size holding the result of the
587 	 * cipher operation. When encrypting data, the receiving buffer is
588 	 * larger by the tag length compared to the input buffer as the
589 	 * encryption operation generates the tag. For decryption, the input
590 	 * buffer provides the tag which is consumed resulting in only the
591 	 * plaintext without a buffer for the tag returned to the caller.
592 	 */
593 	if (ctx->enc)
594 		outlen = used + as;
595 	else
596 		outlen = used - as;
597 
598 	/*
599 	 * The cipher operation input data is reduced by the associated data
600 	 * length as this data is processed separately later on.
601 	 */
602 	used -= ctx->aead_assoclen;
603 
604 	/* convert iovecs of output buffers into scatterlists */
605 	while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
606 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
607 				      (outlen - usedpages));
608 
609 		if (list_empty(&ctx->list)) {
610 			rsgl = &ctx->first_rsgl;
611 		} else {
612 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
613 			if (unlikely(!rsgl)) {
614 				err = -ENOMEM;
615 				goto unlock;
616 			}
617 		}
618 		rsgl->sgl.npages = 0;
619 		list_add_tail(&rsgl->list, &ctx->list);
620 
621 		/* make one iovec available as scatterlist */
622 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
623 		if (err < 0)
624 			goto unlock;
625 		usedpages += err;
626 		/* chain the new scatterlist with previous one */
627 		if (last_rsgl)
628 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
629 
630 		last_rsgl = rsgl;
631 
632 		iov_iter_advance(&msg->msg_iter, err);
633 	}
634 
635 	/* ensure output buffer is sufficiently large */
636 	if (usedpages < outlen) {
637 		err = -EINVAL;
638 		goto unlock;
639 	}
640 
641 	sg_mark_end(sgl->sg + sgl->cur - 1);
642 	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
643 			       used, ctx->iv);
644 	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
645 
646 	err = af_alg_wait_for_completion(ctx->enc ?
647 					 crypto_aead_encrypt(&ctx->aead_req) :
648 					 crypto_aead_decrypt(&ctx->aead_req),
649 					 &ctx->completion);
650 
651 	if (err) {
652 		/* EBADMSG implies a valid cipher operation took place */
653 		if (err == -EBADMSG)
654 			aead_put_sgl(sk);
655 
656 		goto unlock;
657 	}
658 
659 	aead_put_sgl(sk);
660 	err = 0;
661 
662 unlock:
663 	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
664 		af_alg_free_sg(&rsgl->sgl);
665 		list_del(&rsgl->list);
666 		if (rsgl != &ctx->first_rsgl)
667 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
668 	}
669 	INIT_LIST_HEAD(&ctx->list);
670 	aead_wmem_wakeup(sk);
671 	release_sock(sk);
672 
673 	return err ? err : outlen;
674 }
675 
676 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
677 			int flags)
678 {
679 	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
680 		aead_recvmsg_async(sock, msg, flags) :
681 		aead_recvmsg_sync(sock, msg, flags);
682 }
683 
684 static unsigned int aead_poll(struct file *file, struct socket *sock,
685 			      poll_table *wait)
686 {
687 	struct sock *sk = sock->sk;
688 	struct alg_sock *ask = alg_sk(sk);
689 	struct aead_ctx *ctx = ask->private;
690 	unsigned int mask;
691 
692 	sock_poll_wait(file, sk_sleep(sk), wait);
693 	mask = 0;
694 
695 	if (!ctx->more)
696 		mask |= POLLIN | POLLRDNORM;
697 
698 	if (aead_writable(sk))
699 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
700 
701 	return mask;
702 }
703 
704 static struct proto_ops algif_aead_ops = {
705 	.family		=	PF_ALG,
706 
707 	.connect	=	sock_no_connect,
708 	.socketpair	=	sock_no_socketpair,
709 	.getname	=	sock_no_getname,
710 	.ioctl		=	sock_no_ioctl,
711 	.listen		=	sock_no_listen,
712 	.shutdown	=	sock_no_shutdown,
713 	.getsockopt	=	sock_no_getsockopt,
714 	.mmap		=	sock_no_mmap,
715 	.bind		=	sock_no_bind,
716 	.accept		=	sock_no_accept,
717 	.setsockopt	=	sock_no_setsockopt,
718 
719 	.release	=	af_alg_release,
720 	.sendmsg	=	aead_sendmsg,
721 	.sendpage	=	aead_sendpage,
722 	.recvmsg	=	aead_recvmsg,
723 	.poll		=	aead_poll,
724 };
725 
726 static void *aead_bind(const char *name, u32 type, u32 mask)
727 {
728 	return crypto_alloc_aead(name, type, mask);
729 }
730 
731 static void aead_release(void *private)
732 {
733 	crypto_free_aead(private);
734 }
735 
736 static int aead_setauthsize(void *private, unsigned int authsize)
737 {
738 	return crypto_aead_setauthsize(private, authsize);
739 }
740 
741 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
742 {
743 	return crypto_aead_setkey(private, key, keylen);
744 }
745 
746 static void aead_sock_destruct(struct sock *sk)
747 {
748 	struct alg_sock *ask = alg_sk(sk);
749 	struct aead_ctx *ctx = ask->private;
750 	unsigned int ivlen = crypto_aead_ivsize(
751 				crypto_aead_reqtfm(&ctx->aead_req));
752 
753 	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
754 	aead_put_sgl(sk);
755 	sock_kzfree_s(sk, ctx->iv, ivlen);
756 	sock_kfree_s(sk, ctx, ctx->len);
757 	af_alg_release_parent(sk);
758 }
759 
760 static int aead_accept_parent(void *private, struct sock *sk)
761 {
762 	struct aead_ctx *ctx;
763 	struct alg_sock *ask = alg_sk(sk);
764 	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
765 	unsigned int ivlen = crypto_aead_ivsize(private);
766 
767 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
768 	if (!ctx)
769 		return -ENOMEM;
770 	memset(ctx, 0, len);
771 
772 	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
773 	if (!ctx->iv) {
774 		sock_kfree_s(sk, ctx, len);
775 		return -ENOMEM;
776 	}
777 	memset(ctx->iv, 0, ivlen);
778 
779 	ctx->len = len;
780 	ctx->used = 0;
781 	ctx->more = 0;
782 	ctx->merge = 0;
783 	ctx->enc = 0;
784 	ctx->tsgl.cur = 0;
785 	ctx->aead_assoclen = 0;
786 	af_alg_init_completion(&ctx->completion);
787 	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
788 	INIT_LIST_HEAD(&ctx->list);
789 
790 	ask->private = ctx;
791 
792 	aead_request_set_tfm(&ctx->aead_req, private);
793 	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
794 				  af_alg_complete, &ctx->completion);
795 
796 	sk->sk_destruct = aead_sock_destruct;
797 
798 	return 0;
799 }
800 
801 static const struct af_alg_type algif_type_aead = {
802 	.bind		=	aead_bind,
803 	.release	=	aead_release,
804 	.setkey		=	aead_setkey,
805 	.setauthsize	=	aead_setauthsize,
806 	.accept		=	aead_accept_parent,
807 	.ops		=	&algif_aead_ops,
808 	.name		=	"aead",
809 	.owner		=	THIS_MODULE
810 };
811 
812 static int __init algif_aead_init(void)
813 {
814 	return af_alg_register_type(&algif_type_aead);
815 }
816 
817 static void __exit algif_aead_exit(void)
818 {
819 	int err = af_alg_unregister_type(&algif_type_aead);
820 	BUG_ON(err);
821 }
822 
823 module_init(algif_aead_init);
824 module_exit(algif_aead_exit);
825 MODULE_LICENSE("GPL");
826 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
827 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
828