1 /* 2 * algif_aead: User-space interface for AEAD algorithms 3 * 4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5 * 6 * This file provides the user-space API for AEAD ciphers. 7 * 8 * This file is derived from algif_skcipher.c. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the Free 12 * Software Foundation; either version 2 of the License, or (at your option) 13 * any later version. 14 */ 15 16 #include <crypto/internal/aead.h> 17 #include <crypto/scatterwalk.h> 18 #include <crypto/if_alg.h> 19 #include <linux/init.h> 20 #include <linux/list.h> 21 #include <linux/kernel.h> 22 #include <linux/sched/signal.h> 23 #include <linux/mm.h> 24 #include <linux/module.h> 25 #include <linux/net.h> 26 #include <net/sock.h> 27 28 struct aead_sg_list { 29 unsigned int cur; 30 struct scatterlist sg[ALG_MAX_PAGES]; 31 }; 32 33 struct aead_async_rsgl { 34 struct af_alg_sgl sgl; 35 struct list_head list; 36 }; 37 38 struct aead_async_req { 39 struct scatterlist *tsgl; 40 struct aead_async_rsgl first_rsgl; 41 struct list_head list; 42 struct kiocb *iocb; 43 struct sock *sk; 44 unsigned int tsgls; 45 char iv[]; 46 }; 47 48 struct aead_ctx { 49 struct aead_sg_list tsgl; 50 struct aead_async_rsgl first_rsgl; 51 struct list_head list; 52 53 void *iv; 54 55 struct af_alg_completion completion; 56 57 unsigned long used; 58 59 unsigned int len; 60 bool more; 61 bool merge; 62 bool enc; 63 64 size_t aead_assoclen; 65 struct aead_request aead_req; 66 }; 67 68 static inline int aead_sndbuf(struct sock *sk) 69 { 70 struct alg_sock *ask = alg_sk(sk); 71 struct aead_ctx *ctx = ask->private; 72 73 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 74 ctx->used, 0); 75 } 76 77 static inline bool aead_writable(struct sock *sk) 78 { 79 return PAGE_SIZE <= aead_sndbuf(sk); 80 } 81 82 static inline bool aead_sufficient_data(struct aead_ctx *ctx) 83 { 84 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 85 86 /* 87 * The minimum amount of memory needed for an AEAD cipher is 88 * the AAD and in case of decryption the tag. 89 */ 90 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 91 } 92 93 static void aead_reset_ctx(struct aead_ctx *ctx) 94 { 95 struct aead_sg_list *sgl = &ctx->tsgl; 96 97 sg_init_table(sgl->sg, ALG_MAX_PAGES); 98 sgl->cur = 0; 99 ctx->used = 0; 100 ctx->more = 0; 101 ctx->merge = 0; 102 } 103 104 static void aead_put_sgl(struct sock *sk) 105 { 106 struct alg_sock *ask = alg_sk(sk); 107 struct aead_ctx *ctx = ask->private; 108 struct aead_sg_list *sgl = &ctx->tsgl; 109 struct scatterlist *sg = sgl->sg; 110 unsigned int i; 111 112 for (i = 0; i < sgl->cur; i++) { 113 if (!sg_page(sg + i)) 114 continue; 115 116 put_page(sg_page(sg + i)); 117 sg_assign_page(sg + i, NULL); 118 } 119 aead_reset_ctx(ctx); 120 } 121 122 static void aead_wmem_wakeup(struct sock *sk) 123 { 124 struct socket_wq *wq; 125 126 if (!aead_writable(sk)) 127 return; 128 129 rcu_read_lock(); 130 wq = rcu_dereference(sk->sk_wq); 131 if (skwq_has_sleeper(wq)) 132 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 133 POLLRDNORM | 134 POLLRDBAND); 135 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 136 rcu_read_unlock(); 137 } 138 139 static int aead_wait_for_data(struct sock *sk, unsigned flags) 140 { 141 DEFINE_WAIT_FUNC(wait, woken_wake_function); 142 struct alg_sock *ask = alg_sk(sk); 143 struct aead_ctx *ctx = ask->private; 144 long timeout; 145 int err = -ERESTARTSYS; 146 147 if (flags & MSG_DONTWAIT) 148 return -EAGAIN; 149 150 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 151 add_wait_queue(sk_sleep(sk), &wait); 152 for (;;) { 153 if (signal_pending(current)) 154 break; 155 timeout = MAX_SCHEDULE_TIMEOUT; 156 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { 157 err = 0; 158 break; 159 } 160 } 161 remove_wait_queue(sk_sleep(sk), &wait); 162 163 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 164 165 return err; 166 } 167 168 static void aead_data_wakeup(struct sock *sk) 169 { 170 struct alg_sock *ask = alg_sk(sk); 171 struct aead_ctx *ctx = ask->private; 172 struct socket_wq *wq; 173 174 if (ctx->more) 175 return; 176 if (!ctx->used) 177 return; 178 179 rcu_read_lock(); 180 wq = rcu_dereference(sk->sk_wq); 181 if (skwq_has_sleeper(wq)) 182 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 183 POLLRDNORM | 184 POLLRDBAND); 185 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 186 rcu_read_unlock(); 187 } 188 189 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 190 { 191 struct sock *sk = sock->sk; 192 struct alg_sock *ask = alg_sk(sk); 193 struct aead_ctx *ctx = ask->private; 194 unsigned ivsize = 195 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 196 struct aead_sg_list *sgl = &ctx->tsgl; 197 struct af_alg_control con = {}; 198 long copied = 0; 199 bool enc = 0; 200 bool init = 0; 201 int err = -EINVAL; 202 203 if (msg->msg_controllen) { 204 err = af_alg_cmsg_send(msg, &con); 205 if (err) 206 return err; 207 208 init = 1; 209 switch (con.op) { 210 case ALG_OP_ENCRYPT: 211 enc = 1; 212 break; 213 case ALG_OP_DECRYPT: 214 enc = 0; 215 break; 216 default: 217 return -EINVAL; 218 } 219 220 if (con.iv && con.iv->ivlen != ivsize) 221 return -EINVAL; 222 } 223 224 lock_sock(sk); 225 if (!ctx->more && ctx->used) 226 goto unlock; 227 228 if (init) { 229 ctx->enc = enc; 230 if (con.iv) 231 memcpy(ctx->iv, con.iv->iv, ivsize); 232 233 ctx->aead_assoclen = con.aead_assoclen; 234 } 235 236 while (size) { 237 size_t len = size; 238 struct scatterlist *sg = NULL; 239 240 /* use the existing memory in an allocated page */ 241 if (ctx->merge) { 242 sg = sgl->sg + sgl->cur - 1; 243 len = min_t(unsigned long, len, 244 PAGE_SIZE - sg->offset - sg->length); 245 err = memcpy_from_msg(page_address(sg_page(sg)) + 246 sg->offset + sg->length, 247 msg, len); 248 if (err) 249 goto unlock; 250 251 sg->length += len; 252 ctx->merge = (sg->offset + sg->length) & 253 (PAGE_SIZE - 1); 254 255 ctx->used += len; 256 copied += len; 257 size -= len; 258 continue; 259 } 260 261 if (!aead_writable(sk)) { 262 /* user space sent too much data */ 263 aead_put_sgl(sk); 264 err = -EMSGSIZE; 265 goto unlock; 266 } 267 268 /* allocate a new page */ 269 len = min_t(unsigned long, size, aead_sndbuf(sk)); 270 while (len) { 271 size_t plen = 0; 272 273 if (sgl->cur >= ALG_MAX_PAGES) { 274 aead_put_sgl(sk); 275 err = -E2BIG; 276 goto unlock; 277 } 278 279 sg = sgl->sg + sgl->cur; 280 plen = min_t(size_t, len, PAGE_SIZE); 281 282 sg_assign_page(sg, alloc_page(GFP_KERNEL)); 283 err = -ENOMEM; 284 if (!sg_page(sg)) 285 goto unlock; 286 287 err = memcpy_from_msg(page_address(sg_page(sg)), 288 msg, plen); 289 if (err) { 290 __free_page(sg_page(sg)); 291 sg_assign_page(sg, NULL); 292 goto unlock; 293 } 294 295 sg->offset = 0; 296 sg->length = plen; 297 len -= plen; 298 ctx->used += plen; 299 copied += plen; 300 sgl->cur++; 301 size -= plen; 302 ctx->merge = plen & (PAGE_SIZE - 1); 303 } 304 } 305 306 err = 0; 307 308 ctx->more = msg->msg_flags & MSG_MORE; 309 if (!ctx->more && !aead_sufficient_data(ctx)) { 310 aead_put_sgl(sk); 311 err = -EMSGSIZE; 312 } 313 314 unlock: 315 aead_data_wakeup(sk); 316 release_sock(sk); 317 318 return err ?: copied; 319 } 320 321 static ssize_t aead_sendpage(struct socket *sock, struct page *page, 322 int offset, size_t size, int flags) 323 { 324 struct sock *sk = sock->sk; 325 struct alg_sock *ask = alg_sk(sk); 326 struct aead_ctx *ctx = ask->private; 327 struct aead_sg_list *sgl = &ctx->tsgl; 328 int err = -EINVAL; 329 330 if (flags & MSG_SENDPAGE_NOTLAST) 331 flags |= MSG_MORE; 332 333 if (sgl->cur >= ALG_MAX_PAGES) 334 return -E2BIG; 335 336 lock_sock(sk); 337 if (!ctx->more && ctx->used) 338 goto unlock; 339 340 if (!size) 341 goto done; 342 343 if (!aead_writable(sk)) { 344 /* user space sent too much data */ 345 aead_put_sgl(sk); 346 err = -EMSGSIZE; 347 goto unlock; 348 } 349 350 ctx->merge = 0; 351 352 get_page(page); 353 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 354 sgl->cur++; 355 ctx->used += size; 356 357 err = 0; 358 359 done: 360 ctx->more = flags & MSG_MORE; 361 if (!ctx->more && !aead_sufficient_data(ctx)) { 362 aead_put_sgl(sk); 363 err = -EMSGSIZE; 364 } 365 366 unlock: 367 aead_data_wakeup(sk); 368 release_sock(sk); 369 370 return err ?: size; 371 } 372 373 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ 374 ((char *)req + sizeof(struct aead_request) + \ 375 crypto_aead_reqsize(tfm)) 376 377 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ 378 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ 379 sizeof(struct aead_request) 380 381 static void aead_async_cb(struct crypto_async_request *_req, int err) 382 { 383 struct aead_request *req = _req->data; 384 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 385 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 386 struct sock *sk = areq->sk; 387 struct scatterlist *sg = areq->tsgl; 388 struct aead_async_rsgl *rsgl; 389 struct kiocb *iocb = areq->iocb; 390 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 391 392 list_for_each_entry(rsgl, &areq->list, list) { 393 af_alg_free_sg(&rsgl->sgl); 394 if (rsgl != &areq->first_rsgl) 395 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 396 } 397 398 for (i = 0; i < areq->tsgls; i++) 399 put_page(sg_page(sg + i)); 400 401 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 402 sock_kfree_s(sk, req, reqlen); 403 __sock_put(sk); 404 iocb->ki_complete(iocb, err, err); 405 } 406 407 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, 408 int flags) 409 { 410 struct sock *sk = sock->sk; 411 struct alg_sock *ask = alg_sk(sk); 412 struct aead_ctx *ctx = ask->private; 413 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 414 struct aead_async_req *areq; 415 struct aead_request *req = NULL; 416 struct aead_sg_list *sgl = &ctx->tsgl; 417 struct aead_async_rsgl *last_rsgl = NULL, *rsgl; 418 unsigned int as = crypto_aead_authsize(tfm); 419 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 420 int err = -ENOMEM; 421 unsigned long used; 422 size_t outlen = 0; 423 size_t usedpages = 0; 424 425 lock_sock(sk); 426 if (ctx->more) { 427 err = aead_wait_for_data(sk, flags); 428 if (err) 429 goto unlock; 430 } 431 432 if (!aead_sufficient_data(ctx)) 433 goto unlock; 434 435 used = ctx->used; 436 if (ctx->enc) 437 outlen = used + as; 438 else 439 outlen = used - as; 440 441 req = sock_kmalloc(sk, reqlen, GFP_KERNEL); 442 if (unlikely(!req)) 443 goto unlock; 444 445 areq = GET_ASYM_REQ(req, tfm); 446 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 447 INIT_LIST_HEAD(&areq->list); 448 areq->iocb = msg->msg_iocb; 449 areq->sk = sk; 450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 451 aead_request_set_tfm(req, tfm); 452 aead_request_set_ad(req, ctx->aead_assoclen); 453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 454 aead_async_cb, req); 455 used -= ctx->aead_assoclen; 456 457 /* take over all tx sgls from ctx */ 458 areq->tsgl = sock_kmalloc(sk, 459 sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), 460 GFP_KERNEL); 461 if (unlikely(!areq->tsgl)) 462 goto free; 463 464 sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); 465 for (i = 0; i < sgl->cur; i++) 466 sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), 467 sgl->sg[i].length, sgl->sg[i].offset); 468 469 areq->tsgls = sgl->cur; 470 471 /* create rx sgls */ 472 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { 473 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 474 (outlen - usedpages)); 475 476 if (list_empty(&areq->list)) { 477 rsgl = &areq->first_rsgl; 478 479 } else { 480 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 481 if (unlikely(!rsgl)) { 482 err = -ENOMEM; 483 goto free; 484 } 485 } 486 rsgl->sgl.npages = 0; 487 list_add_tail(&rsgl->list, &areq->list); 488 489 /* make one iovec available as scatterlist */ 490 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 491 if (err < 0) 492 goto free; 493 494 usedpages += err; 495 496 /* chain the new scatterlist with previous one */ 497 if (last_rsgl) 498 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 499 500 last_rsgl = rsgl; 501 502 iov_iter_advance(&msg->msg_iter, err); 503 } 504 505 /* ensure output buffer is sufficiently large */ 506 if (usedpages < outlen) { 507 err = -EINVAL; 508 goto unlock; 509 } 510 511 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, 512 areq->iv); 513 err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 514 if (err) { 515 if (err == -EINPROGRESS) { 516 sock_hold(sk); 517 err = -EIOCBQUEUED; 518 aead_reset_ctx(ctx); 519 goto unlock; 520 } else if (err == -EBADMSG) { 521 aead_put_sgl(sk); 522 } 523 goto free; 524 } 525 aead_put_sgl(sk); 526 527 free: 528 list_for_each_entry(rsgl, &areq->list, list) { 529 af_alg_free_sg(&rsgl->sgl); 530 if (rsgl != &areq->first_rsgl) 531 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 532 } 533 if (areq->tsgl) 534 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 535 if (req) 536 sock_kfree_s(sk, req, reqlen); 537 unlock: 538 aead_wmem_wakeup(sk); 539 release_sock(sk); 540 return err ? err : outlen; 541 } 542 543 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) 544 { 545 struct sock *sk = sock->sk; 546 struct alg_sock *ask = alg_sk(sk); 547 struct aead_ctx *ctx = ask->private; 548 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 549 struct aead_sg_list *sgl = &ctx->tsgl; 550 struct aead_async_rsgl *last_rsgl = NULL; 551 struct aead_async_rsgl *rsgl, *tmp; 552 int err = -EINVAL; 553 unsigned long used = 0; 554 size_t outlen = 0; 555 size_t usedpages = 0; 556 557 lock_sock(sk); 558 559 /* 560 * Please see documentation of aead_request_set_crypt for the 561 * description of the AEAD memory structure expected from the caller. 562 */ 563 564 if (ctx->more) { 565 err = aead_wait_for_data(sk, flags); 566 if (err) 567 goto unlock; 568 } 569 570 /* data length provided by caller via sendmsg/sendpage */ 571 used = ctx->used; 572 573 /* 574 * Make sure sufficient data is present -- note, the same check is 575 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 576 * shall provide an information to the data sender that something is 577 * wrong, but they are irrelevant to maintain the kernel integrity. 578 * We need this check here too in case user space decides to not honor 579 * the error message in sendmsg/sendpage and still call recvmsg. This 580 * check here protects the kernel integrity. 581 */ 582 if (!aead_sufficient_data(ctx)) 583 goto unlock; 584 585 /* 586 * Calculate the minimum output buffer size holding the result of the 587 * cipher operation. When encrypting data, the receiving buffer is 588 * larger by the tag length compared to the input buffer as the 589 * encryption operation generates the tag. For decryption, the input 590 * buffer provides the tag which is consumed resulting in only the 591 * plaintext without a buffer for the tag returned to the caller. 592 */ 593 if (ctx->enc) 594 outlen = used + as; 595 else 596 outlen = used - as; 597 598 /* 599 * The cipher operation input data is reduced by the associated data 600 * length as this data is processed separately later on. 601 */ 602 used -= ctx->aead_assoclen; 603 604 /* convert iovecs of output buffers into scatterlists */ 605 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { 606 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 607 (outlen - usedpages)); 608 609 if (list_empty(&ctx->list)) { 610 rsgl = &ctx->first_rsgl; 611 } else { 612 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 613 if (unlikely(!rsgl)) { 614 err = -ENOMEM; 615 goto unlock; 616 } 617 } 618 rsgl->sgl.npages = 0; 619 list_add_tail(&rsgl->list, &ctx->list); 620 621 /* make one iovec available as scatterlist */ 622 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 623 if (err < 0) 624 goto unlock; 625 usedpages += err; 626 /* chain the new scatterlist with previous one */ 627 if (last_rsgl) 628 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 629 630 last_rsgl = rsgl; 631 632 iov_iter_advance(&msg->msg_iter, err); 633 } 634 635 /* ensure output buffer is sufficiently large */ 636 if (usedpages < outlen) { 637 err = -EINVAL; 638 goto unlock; 639 } 640 641 sg_mark_end(sgl->sg + sgl->cur - 1); 642 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, 643 used, ctx->iv); 644 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 645 646 err = af_alg_wait_for_completion(ctx->enc ? 647 crypto_aead_encrypt(&ctx->aead_req) : 648 crypto_aead_decrypt(&ctx->aead_req), 649 &ctx->completion); 650 651 if (err) { 652 /* EBADMSG implies a valid cipher operation took place */ 653 if (err == -EBADMSG) 654 aead_put_sgl(sk); 655 656 goto unlock; 657 } 658 659 aead_put_sgl(sk); 660 err = 0; 661 662 unlock: 663 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 664 af_alg_free_sg(&rsgl->sgl); 665 list_del(&rsgl->list); 666 if (rsgl != &ctx->first_rsgl) 667 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 668 } 669 INIT_LIST_HEAD(&ctx->list); 670 aead_wmem_wakeup(sk); 671 release_sock(sk); 672 673 return err ? err : outlen; 674 } 675 676 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, 677 int flags) 678 { 679 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 680 aead_recvmsg_async(sock, msg, flags) : 681 aead_recvmsg_sync(sock, msg, flags); 682 } 683 684 static unsigned int aead_poll(struct file *file, struct socket *sock, 685 poll_table *wait) 686 { 687 struct sock *sk = sock->sk; 688 struct alg_sock *ask = alg_sk(sk); 689 struct aead_ctx *ctx = ask->private; 690 unsigned int mask; 691 692 sock_poll_wait(file, sk_sleep(sk), wait); 693 mask = 0; 694 695 if (!ctx->more) 696 mask |= POLLIN | POLLRDNORM; 697 698 if (aead_writable(sk)) 699 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 700 701 return mask; 702 } 703 704 static struct proto_ops algif_aead_ops = { 705 .family = PF_ALG, 706 707 .connect = sock_no_connect, 708 .socketpair = sock_no_socketpair, 709 .getname = sock_no_getname, 710 .ioctl = sock_no_ioctl, 711 .listen = sock_no_listen, 712 .shutdown = sock_no_shutdown, 713 .getsockopt = sock_no_getsockopt, 714 .mmap = sock_no_mmap, 715 .bind = sock_no_bind, 716 .accept = sock_no_accept, 717 .setsockopt = sock_no_setsockopt, 718 719 .release = af_alg_release, 720 .sendmsg = aead_sendmsg, 721 .sendpage = aead_sendpage, 722 .recvmsg = aead_recvmsg, 723 .poll = aead_poll, 724 }; 725 726 static void *aead_bind(const char *name, u32 type, u32 mask) 727 { 728 return crypto_alloc_aead(name, type, mask); 729 } 730 731 static void aead_release(void *private) 732 { 733 crypto_free_aead(private); 734 } 735 736 static int aead_setauthsize(void *private, unsigned int authsize) 737 { 738 return crypto_aead_setauthsize(private, authsize); 739 } 740 741 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 742 { 743 return crypto_aead_setkey(private, key, keylen); 744 } 745 746 static void aead_sock_destruct(struct sock *sk) 747 { 748 struct alg_sock *ask = alg_sk(sk); 749 struct aead_ctx *ctx = ask->private; 750 unsigned int ivlen = crypto_aead_ivsize( 751 crypto_aead_reqtfm(&ctx->aead_req)); 752 753 WARN_ON(atomic_read(&sk->sk_refcnt) != 0); 754 aead_put_sgl(sk); 755 sock_kzfree_s(sk, ctx->iv, ivlen); 756 sock_kfree_s(sk, ctx, ctx->len); 757 af_alg_release_parent(sk); 758 } 759 760 static int aead_accept_parent(void *private, struct sock *sk) 761 { 762 struct aead_ctx *ctx; 763 struct alg_sock *ask = alg_sk(sk); 764 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 765 unsigned int ivlen = crypto_aead_ivsize(private); 766 767 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 768 if (!ctx) 769 return -ENOMEM; 770 memset(ctx, 0, len); 771 772 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 773 if (!ctx->iv) { 774 sock_kfree_s(sk, ctx, len); 775 return -ENOMEM; 776 } 777 memset(ctx->iv, 0, ivlen); 778 779 ctx->len = len; 780 ctx->used = 0; 781 ctx->more = 0; 782 ctx->merge = 0; 783 ctx->enc = 0; 784 ctx->tsgl.cur = 0; 785 ctx->aead_assoclen = 0; 786 af_alg_init_completion(&ctx->completion); 787 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); 788 INIT_LIST_HEAD(&ctx->list); 789 790 ask->private = ctx; 791 792 aead_request_set_tfm(&ctx->aead_req, private); 793 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 794 af_alg_complete, &ctx->completion); 795 796 sk->sk_destruct = aead_sock_destruct; 797 798 return 0; 799 } 800 801 static const struct af_alg_type algif_type_aead = { 802 .bind = aead_bind, 803 .release = aead_release, 804 .setkey = aead_setkey, 805 .setauthsize = aead_setauthsize, 806 .accept = aead_accept_parent, 807 .ops = &algif_aead_ops, 808 .name = "aead", 809 .owner = THIS_MODULE 810 }; 811 812 static int __init algif_aead_init(void) 813 { 814 return af_alg_register_type(&algif_type_aead); 815 } 816 817 static void __exit algif_aead_exit(void) 818 { 819 int err = af_alg_unregister_type(&algif_type_aead); 820 BUG_ON(err); 821 } 822 823 module_init(algif_aead_init); 824 module_exit(algif_aead_exit); 825 MODULE_LICENSE("GPL"); 826 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 827 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 828