103c8efc1SHerbert Xu /* 203c8efc1SHerbert Xu * af_alg: User-space algorithm interface 303c8efc1SHerbert Xu * 403c8efc1SHerbert Xu * This file provides the user-space API for algorithms. 503c8efc1SHerbert Xu * 603c8efc1SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 703c8efc1SHerbert Xu * 803c8efc1SHerbert Xu * This program is free software; you can redistribute it and/or modify it 903c8efc1SHerbert Xu * under the terms of the GNU General Public License as published by the Free 1003c8efc1SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 1103c8efc1SHerbert Xu * any later version. 1203c8efc1SHerbert Xu * 1303c8efc1SHerbert Xu */ 1403c8efc1SHerbert Xu 1560063497SArun Sharma #include <linux/atomic.h> 1603c8efc1SHerbert Xu #include <crypto/if_alg.h> 1703c8efc1SHerbert Xu #include <linux/crypto.h> 1803c8efc1SHerbert Xu #include <linux/init.h> 1903c8efc1SHerbert Xu #include <linux/kernel.h> 2003c8efc1SHerbert Xu #include <linux/list.h> 2103c8efc1SHerbert Xu #include <linux/module.h> 2203c8efc1SHerbert Xu #include <linux/net.h> 2303c8efc1SHerbert Xu #include <linux/rwsem.h> 242d97591eSStephan Mueller #include <linux/sched/signal.h> 254c63f83cSMilan Broz #include <linux/security.h> 2603c8efc1SHerbert Xu 2703c8efc1SHerbert Xu struct alg_type_list { 2803c8efc1SHerbert Xu const struct af_alg_type *type; 2903c8efc1SHerbert Xu struct list_head list; 3003c8efc1SHerbert Xu }; 3103c8efc1SHerbert Xu 3206869524SRandy Dunlap static atomic_long_t alg_memory_allocated; 3303c8efc1SHerbert Xu 3403c8efc1SHerbert Xu static struct proto alg_proto = { 3503c8efc1SHerbert Xu .name = "ALG", 3603c8efc1SHerbert Xu .owner = THIS_MODULE, 3703c8efc1SHerbert Xu .memory_allocated = &alg_memory_allocated, 3803c8efc1SHerbert Xu .obj_size = sizeof(struct alg_sock), 3903c8efc1SHerbert Xu }; 4003c8efc1SHerbert Xu 4103c8efc1SHerbert Xu static LIST_HEAD(alg_types); 4203c8efc1SHerbert Xu static DECLARE_RWSEM(alg_types_sem); 4303c8efc1SHerbert Xu 4403c8efc1SHerbert Xu static const struct af_alg_type *alg_get_type(const char *name) 4503c8efc1SHerbert Xu { 4603c8efc1SHerbert Xu const struct af_alg_type *type = ERR_PTR(-ENOENT); 4703c8efc1SHerbert Xu struct alg_type_list *node; 4803c8efc1SHerbert Xu 4903c8efc1SHerbert Xu down_read(&alg_types_sem); 5003c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 5103c8efc1SHerbert Xu if (strcmp(node->type->name, name)) 5203c8efc1SHerbert Xu continue; 5303c8efc1SHerbert Xu 5403c8efc1SHerbert Xu if (try_module_get(node->type->owner)) 5503c8efc1SHerbert Xu type = node->type; 5603c8efc1SHerbert Xu break; 5703c8efc1SHerbert Xu } 5803c8efc1SHerbert Xu up_read(&alg_types_sem); 5903c8efc1SHerbert Xu 6003c8efc1SHerbert Xu return type; 6103c8efc1SHerbert Xu } 6203c8efc1SHerbert Xu 6303c8efc1SHerbert Xu int af_alg_register_type(const struct af_alg_type *type) 6403c8efc1SHerbert Xu { 6503c8efc1SHerbert Xu struct alg_type_list *node; 6603c8efc1SHerbert Xu int err = -EEXIST; 6703c8efc1SHerbert Xu 6803c8efc1SHerbert Xu down_write(&alg_types_sem); 6903c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 7003c8efc1SHerbert Xu if (!strcmp(node->type->name, type->name)) 7103c8efc1SHerbert Xu goto unlock; 7203c8efc1SHerbert Xu } 7303c8efc1SHerbert Xu 7403c8efc1SHerbert Xu node = kmalloc(sizeof(*node), GFP_KERNEL); 7503c8efc1SHerbert Xu err = -ENOMEM; 7603c8efc1SHerbert Xu if (!node) 7703c8efc1SHerbert Xu goto unlock; 7803c8efc1SHerbert Xu 7903c8efc1SHerbert Xu type->ops->owner = THIS_MODULE; 8037766586SHerbert Xu if (type->ops_nokey) 8137766586SHerbert Xu type->ops_nokey->owner = THIS_MODULE; 8203c8efc1SHerbert Xu node->type = type; 8303c8efc1SHerbert Xu list_add(&node->list, &alg_types); 8403c8efc1SHerbert Xu err = 0; 8503c8efc1SHerbert Xu 8603c8efc1SHerbert Xu unlock: 8703c8efc1SHerbert Xu up_write(&alg_types_sem); 8803c8efc1SHerbert Xu 8903c8efc1SHerbert Xu return err; 9003c8efc1SHerbert Xu } 9103c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_register_type); 9203c8efc1SHerbert Xu 9303c8efc1SHerbert Xu int af_alg_unregister_type(const struct af_alg_type *type) 9403c8efc1SHerbert Xu { 9503c8efc1SHerbert Xu struct alg_type_list *node; 9603c8efc1SHerbert Xu int err = -ENOENT; 9703c8efc1SHerbert Xu 9803c8efc1SHerbert Xu down_write(&alg_types_sem); 9903c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 10003c8efc1SHerbert Xu if (strcmp(node->type->name, type->name)) 10103c8efc1SHerbert Xu continue; 10203c8efc1SHerbert Xu 10303c8efc1SHerbert Xu list_del(&node->list); 10403c8efc1SHerbert Xu kfree(node); 10503c8efc1SHerbert Xu err = 0; 10603c8efc1SHerbert Xu break; 10703c8efc1SHerbert Xu } 10803c8efc1SHerbert Xu up_write(&alg_types_sem); 10903c8efc1SHerbert Xu 11003c8efc1SHerbert Xu return err; 11103c8efc1SHerbert Xu } 11203c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_unregister_type); 11303c8efc1SHerbert Xu 11403c8efc1SHerbert Xu static void alg_do_release(const struct af_alg_type *type, void *private) 11503c8efc1SHerbert Xu { 11603c8efc1SHerbert Xu if (!type) 11703c8efc1SHerbert Xu return; 11803c8efc1SHerbert Xu 11903c8efc1SHerbert Xu type->release(private); 12003c8efc1SHerbert Xu module_put(type->owner); 12103c8efc1SHerbert Xu } 12203c8efc1SHerbert Xu 12303c8efc1SHerbert Xu int af_alg_release(struct socket *sock) 12403c8efc1SHerbert Xu { 12503c8efc1SHerbert Xu if (sock->sk) 12603c8efc1SHerbert Xu sock_put(sock->sk); 12703c8efc1SHerbert Xu return 0; 12803c8efc1SHerbert Xu } 12903c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release); 13003c8efc1SHerbert Xu 131c840ac6aSHerbert Xu void af_alg_release_parent(struct sock *sk) 132c840ac6aSHerbert Xu { 133c840ac6aSHerbert Xu struct alg_sock *ask = alg_sk(sk); 134a6a48c56SHerbert Xu unsigned int nokey = ask->nokey_refcnt; 135a6a48c56SHerbert Xu bool last = nokey && !ask->refcnt; 136c840ac6aSHerbert Xu 137c840ac6aSHerbert Xu sk = ask->parent; 138c840ac6aSHerbert Xu ask = alg_sk(sk); 139c840ac6aSHerbert Xu 140c840ac6aSHerbert Xu lock_sock(sk); 141a6a48c56SHerbert Xu ask->nokey_refcnt -= nokey; 142a6a48c56SHerbert Xu if (!last) 143c840ac6aSHerbert Xu last = !--ask->refcnt; 144c840ac6aSHerbert Xu release_sock(sk); 145c840ac6aSHerbert Xu 146c840ac6aSHerbert Xu if (last) 147c840ac6aSHerbert Xu sock_put(sk); 148c840ac6aSHerbert Xu } 149c840ac6aSHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release_parent); 150c840ac6aSHerbert Xu 15103c8efc1SHerbert Xu static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 15203c8efc1SHerbert Xu { 153bb30b884SStephan Mueller const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; 15403c8efc1SHerbert Xu struct sock *sk = sock->sk; 15503c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 15603c8efc1SHerbert Xu struct sockaddr_alg *sa = (void *)uaddr; 15703c8efc1SHerbert Xu const struct af_alg_type *type; 15803c8efc1SHerbert Xu void *private; 159c840ac6aSHerbert Xu int err; 16003c8efc1SHerbert Xu 16103c8efc1SHerbert Xu if (sock->state == SS_CONNECTED) 16203c8efc1SHerbert Xu return -EINVAL; 16303c8efc1SHerbert Xu 1643f69cc60SHerbert Xu if (addr_len < sizeof(*sa)) 16503c8efc1SHerbert Xu return -EINVAL; 16603c8efc1SHerbert Xu 167a466856eSEric Dumazet /* If caller uses non-allowed flag, return error. */ 168a466856eSEric Dumazet if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) 169a466856eSEric Dumazet return -EINVAL; 170a466856eSEric Dumazet 17103c8efc1SHerbert Xu sa->salg_type[sizeof(sa->salg_type) - 1] = 0; 1723f69cc60SHerbert Xu sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0; 17303c8efc1SHerbert Xu 17403c8efc1SHerbert Xu type = alg_get_type(sa->salg_type); 17503c8efc1SHerbert Xu if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { 17603c8efc1SHerbert Xu request_module("algif-%s", sa->salg_type); 17703c8efc1SHerbert Xu type = alg_get_type(sa->salg_type); 17803c8efc1SHerbert Xu } 17903c8efc1SHerbert Xu 18003c8efc1SHerbert Xu if (IS_ERR(type)) 18103c8efc1SHerbert Xu return PTR_ERR(type); 18203c8efc1SHerbert Xu 183bb30b884SStephan Mueller private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); 18403c8efc1SHerbert Xu if (IS_ERR(private)) { 18503c8efc1SHerbert Xu module_put(type->owner); 18603c8efc1SHerbert Xu return PTR_ERR(private); 18703c8efc1SHerbert Xu } 18803c8efc1SHerbert Xu 189c840ac6aSHerbert Xu err = -EBUSY; 19003c8efc1SHerbert Xu lock_sock(sk); 191a6a48c56SHerbert Xu if (ask->refcnt | ask->nokey_refcnt) 192c840ac6aSHerbert Xu goto unlock; 19303c8efc1SHerbert Xu 19403c8efc1SHerbert Xu swap(ask->type, type); 19503c8efc1SHerbert Xu swap(ask->private, private); 19603c8efc1SHerbert Xu 197c840ac6aSHerbert Xu err = 0; 198c840ac6aSHerbert Xu 199c840ac6aSHerbert Xu unlock: 20003c8efc1SHerbert Xu release_sock(sk); 20103c8efc1SHerbert Xu 20203c8efc1SHerbert Xu alg_do_release(type, private); 20303c8efc1SHerbert Xu 204c840ac6aSHerbert Xu return err; 20503c8efc1SHerbert Xu } 20603c8efc1SHerbert Xu 20703c8efc1SHerbert Xu static int alg_setkey(struct sock *sk, char __user *ukey, 20803c8efc1SHerbert Xu unsigned int keylen) 20903c8efc1SHerbert Xu { 21003c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 21103c8efc1SHerbert Xu const struct af_alg_type *type = ask->type; 21203c8efc1SHerbert Xu u8 *key; 21303c8efc1SHerbert Xu int err; 21403c8efc1SHerbert Xu 21503c8efc1SHerbert Xu key = sock_kmalloc(sk, keylen, GFP_KERNEL); 21603c8efc1SHerbert Xu if (!key) 21703c8efc1SHerbert Xu return -ENOMEM; 21803c8efc1SHerbert Xu 21903c8efc1SHerbert Xu err = -EFAULT; 22003c8efc1SHerbert Xu if (copy_from_user(key, ukey, keylen)) 22103c8efc1SHerbert Xu goto out; 22203c8efc1SHerbert Xu 22303c8efc1SHerbert Xu err = type->setkey(ask->private, key, keylen); 22403c8efc1SHerbert Xu 22503c8efc1SHerbert Xu out: 226ad202c8cSStephan Mueller sock_kzfree_s(sk, key, keylen); 22703c8efc1SHerbert Xu 22803c8efc1SHerbert Xu return err; 22903c8efc1SHerbert Xu } 23003c8efc1SHerbert Xu 23103c8efc1SHerbert Xu static int alg_setsockopt(struct socket *sock, int level, int optname, 23203c8efc1SHerbert Xu char __user *optval, unsigned int optlen) 23303c8efc1SHerbert Xu { 23403c8efc1SHerbert Xu struct sock *sk = sock->sk; 23503c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 23603c8efc1SHerbert Xu const struct af_alg_type *type; 237c840ac6aSHerbert Xu int err = -EBUSY; 23803c8efc1SHerbert Xu 23903c8efc1SHerbert Xu lock_sock(sk); 240c840ac6aSHerbert Xu if (ask->refcnt) 241c840ac6aSHerbert Xu goto unlock; 242c840ac6aSHerbert Xu 24303c8efc1SHerbert Xu type = ask->type; 24403c8efc1SHerbert Xu 245c840ac6aSHerbert Xu err = -ENOPROTOOPT; 24603c8efc1SHerbert Xu if (level != SOL_ALG || !type) 24703c8efc1SHerbert Xu goto unlock; 24803c8efc1SHerbert Xu 24903c8efc1SHerbert Xu switch (optname) { 25003c8efc1SHerbert Xu case ALG_SET_KEY: 25103c8efc1SHerbert Xu if (sock->state == SS_CONNECTED) 25203c8efc1SHerbert Xu goto unlock; 25303c8efc1SHerbert Xu if (!type->setkey) 25403c8efc1SHerbert Xu goto unlock; 25503c8efc1SHerbert Xu 25603c8efc1SHerbert Xu err = alg_setkey(sk, optval, optlen); 25725fb8638SStephan Mueller break; 25825fb8638SStephan Mueller case ALG_SET_AEAD_AUTHSIZE: 25925fb8638SStephan Mueller if (sock->state == SS_CONNECTED) 26025fb8638SStephan Mueller goto unlock; 26125fb8638SStephan Mueller if (!type->setauthsize) 26225fb8638SStephan Mueller goto unlock; 26325fb8638SStephan Mueller err = type->setauthsize(ask->private, optlen); 26403c8efc1SHerbert Xu } 26503c8efc1SHerbert Xu 26603c8efc1SHerbert Xu unlock: 26703c8efc1SHerbert Xu release_sock(sk); 26803c8efc1SHerbert Xu 26903c8efc1SHerbert Xu return err; 27003c8efc1SHerbert Xu } 27103c8efc1SHerbert Xu 272cdfbabfbSDavid Howells int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) 27303c8efc1SHerbert Xu { 27403c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 27503c8efc1SHerbert Xu const struct af_alg_type *type; 27603c8efc1SHerbert Xu struct sock *sk2; 2776a935170SHerbert Xu unsigned int nokey; 27803c8efc1SHerbert Xu int err; 27903c8efc1SHerbert Xu 28003c8efc1SHerbert Xu lock_sock(sk); 28103c8efc1SHerbert Xu type = ask->type; 28203c8efc1SHerbert Xu 28303c8efc1SHerbert Xu err = -EINVAL; 28403c8efc1SHerbert Xu if (!type) 28503c8efc1SHerbert Xu goto unlock; 28603c8efc1SHerbert Xu 287cdfbabfbSDavid Howells sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern); 28803c8efc1SHerbert Xu err = -ENOMEM; 28903c8efc1SHerbert Xu if (!sk2) 29003c8efc1SHerbert Xu goto unlock; 29103c8efc1SHerbert Xu 29203c8efc1SHerbert Xu sock_init_data(newsock, sk2); 2932acce6aaSHerbert Xu security_sock_graft(sk2, newsock); 2944c63f83cSMilan Broz security_sk_clone(sk, sk2); 29503c8efc1SHerbert Xu 29603c8efc1SHerbert Xu err = type->accept(ask->private, sk2); 29737766586SHerbert Xu 29837766586SHerbert Xu nokey = err == -ENOKEY; 29937766586SHerbert Xu if (nokey && type->accept_nokey) 30037766586SHerbert Xu err = type->accept_nokey(ask->private, sk2); 30137766586SHerbert Xu 302a383292cSHerbert Xu if (err) 30303c8efc1SHerbert Xu goto unlock; 30403c8efc1SHerbert Xu 30503c8efc1SHerbert Xu sk2->sk_family = PF_ALG; 30603c8efc1SHerbert Xu 30737766586SHerbert Xu if (nokey || !ask->refcnt++) 30803c8efc1SHerbert Xu sock_hold(sk); 309a6a48c56SHerbert Xu ask->nokey_refcnt += nokey; 31003c8efc1SHerbert Xu alg_sk(sk2)->parent = sk; 31103c8efc1SHerbert Xu alg_sk(sk2)->type = type; 3126a935170SHerbert Xu alg_sk(sk2)->nokey_refcnt = nokey; 31303c8efc1SHerbert Xu 31403c8efc1SHerbert Xu newsock->ops = type->ops; 31503c8efc1SHerbert Xu newsock->state = SS_CONNECTED; 31603c8efc1SHerbert Xu 31737766586SHerbert Xu if (nokey) 31837766586SHerbert Xu newsock->ops = type->ops_nokey; 31937766586SHerbert Xu 32003c8efc1SHerbert Xu err = 0; 32103c8efc1SHerbert Xu 32203c8efc1SHerbert Xu unlock: 32303c8efc1SHerbert Xu release_sock(sk); 32403c8efc1SHerbert Xu 32503c8efc1SHerbert Xu return err; 32603c8efc1SHerbert Xu } 32703c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_accept); 32803c8efc1SHerbert Xu 329cdfbabfbSDavid Howells static int alg_accept(struct socket *sock, struct socket *newsock, int flags, 330cdfbabfbSDavid Howells bool kern) 33103c8efc1SHerbert Xu { 332cdfbabfbSDavid Howells return af_alg_accept(sock->sk, newsock, kern); 33303c8efc1SHerbert Xu } 33403c8efc1SHerbert Xu 33503c8efc1SHerbert Xu static const struct proto_ops alg_proto_ops = { 33603c8efc1SHerbert Xu .family = PF_ALG, 33703c8efc1SHerbert Xu .owner = THIS_MODULE, 33803c8efc1SHerbert Xu 33903c8efc1SHerbert Xu .connect = sock_no_connect, 34003c8efc1SHerbert Xu .socketpair = sock_no_socketpair, 34103c8efc1SHerbert Xu .getname = sock_no_getname, 34203c8efc1SHerbert Xu .ioctl = sock_no_ioctl, 34303c8efc1SHerbert Xu .listen = sock_no_listen, 34403c8efc1SHerbert Xu .shutdown = sock_no_shutdown, 34503c8efc1SHerbert Xu .getsockopt = sock_no_getsockopt, 34603c8efc1SHerbert Xu .mmap = sock_no_mmap, 34703c8efc1SHerbert Xu .sendpage = sock_no_sendpage, 34803c8efc1SHerbert Xu .sendmsg = sock_no_sendmsg, 34903c8efc1SHerbert Xu .recvmsg = sock_no_recvmsg, 35003c8efc1SHerbert Xu .poll = sock_no_poll, 35103c8efc1SHerbert Xu 35203c8efc1SHerbert Xu .bind = alg_bind, 35303c8efc1SHerbert Xu .release = af_alg_release, 35403c8efc1SHerbert Xu .setsockopt = alg_setsockopt, 35503c8efc1SHerbert Xu .accept = alg_accept, 35603c8efc1SHerbert Xu }; 35703c8efc1SHerbert Xu 35803c8efc1SHerbert Xu static void alg_sock_destruct(struct sock *sk) 35903c8efc1SHerbert Xu { 36003c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 36103c8efc1SHerbert Xu 36203c8efc1SHerbert Xu alg_do_release(ask->type, ask->private); 36303c8efc1SHerbert Xu } 36403c8efc1SHerbert Xu 36503c8efc1SHerbert Xu static int alg_create(struct net *net, struct socket *sock, int protocol, 36603c8efc1SHerbert Xu int kern) 36703c8efc1SHerbert Xu { 36803c8efc1SHerbert Xu struct sock *sk; 36903c8efc1SHerbert Xu int err; 37003c8efc1SHerbert Xu 37103c8efc1SHerbert Xu if (sock->type != SOCK_SEQPACKET) 37203c8efc1SHerbert Xu return -ESOCKTNOSUPPORT; 37303c8efc1SHerbert Xu if (protocol != 0) 37403c8efc1SHerbert Xu return -EPROTONOSUPPORT; 37503c8efc1SHerbert Xu 37603c8efc1SHerbert Xu err = -ENOMEM; 37711aa9c28SEric W. Biederman sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern); 37803c8efc1SHerbert Xu if (!sk) 37903c8efc1SHerbert Xu goto out; 38003c8efc1SHerbert Xu 38103c8efc1SHerbert Xu sock->ops = &alg_proto_ops; 38203c8efc1SHerbert Xu sock_init_data(sock, sk); 38303c8efc1SHerbert Xu 38403c8efc1SHerbert Xu sk->sk_family = PF_ALG; 38503c8efc1SHerbert Xu sk->sk_destruct = alg_sock_destruct; 38603c8efc1SHerbert Xu 38703c8efc1SHerbert Xu return 0; 38803c8efc1SHerbert Xu out: 38903c8efc1SHerbert Xu return err; 39003c8efc1SHerbert Xu } 39103c8efc1SHerbert Xu 39203c8efc1SHerbert Xu static const struct net_proto_family alg_family = { 39303c8efc1SHerbert Xu .family = PF_ALG, 39403c8efc1SHerbert Xu .create = alg_create, 39503c8efc1SHerbert Xu .owner = THIS_MODULE, 39603c8efc1SHerbert Xu }; 39703c8efc1SHerbert Xu 3981d10eb2fSAl Viro int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) 39903c8efc1SHerbert Xu { 4001d10eb2fSAl Viro size_t off; 4011d10eb2fSAl Viro ssize_t n; 4021d10eb2fSAl Viro int npages, i; 40303c8efc1SHerbert Xu 4041d10eb2fSAl Viro n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); 4051d10eb2fSAl Viro if (n < 0) 4061d10eb2fSAl Viro return n; 40703c8efc1SHerbert Xu 4089399f0c5SLinus Torvalds npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; 40903c8efc1SHerbert Xu if (WARN_ON(npages == 0)) 4101d10eb2fSAl Viro return -EINVAL; 41166db3739STadeusz Struk /* Add one extra for linking */ 41266db3739STadeusz Struk sg_init_table(sgl->sg, npages + 1); 41303c8efc1SHerbert Xu 4141d10eb2fSAl Viro for (i = 0, len = n; i < npages; i++) { 41503c8efc1SHerbert Xu int plen = min_t(int, len, PAGE_SIZE - off); 41603c8efc1SHerbert Xu 41703c8efc1SHerbert Xu sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); 41803c8efc1SHerbert Xu 41903c8efc1SHerbert Xu off = 0; 42003c8efc1SHerbert Xu len -= plen; 42103c8efc1SHerbert Xu } 42266db3739STadeusz Struk sg_mark_end(sgl->sg + npages - 1); 42366db3739STadeusz Struk sgl->npages = npages; 42466db3739STadeusz Struk 4251d10eb2fSAl Viro return n; 42603c8efc1SHerbert Xu } 42703c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_make_sg); 42803c8efc1SHerbert Xu 42966db3739STadeusz Struk void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) 43066db3739STadeusz Struk { 43166db3739STadeusz Struk sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 43266db3739STadeusz Struk sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); 43366db3739STadeusz Struk } 434bd507520STadeusz Struk EXPORT_SYMBOL_GPL(af_alg_link_sg); 43566db3739STadeusz Struk 43603c8efc1SHerbert Xu void af_alg_free_sg(struct af_alg_sgl *sgl) 43703c8efc1SHerbert Xu { 43803c8efc1SHerbert Xu int i; 43903c8efc1SHerbert Xu 44066db3739STadeusz Struk for (i = 0; i < sgl->npages; i++) 44103c8efc1SHerbert Xu put_page(sgl->pages[i]); 44203c8efc1SHerbert Xu } 44303c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_free_sg); 44403c8efc1SHerbert Xu 44503c8efc1SHerbert Xu int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) 44603c8efc1SHerbert Xu { 44703c8efc1SHerbert Xu struct cmsghdr *cmsg; 44803c8efc1SHerbert Xu 449f95b414eSGu Zheng for_each_cmsghdr(cmsg, msg) { 45003c8efc1SHerbert Xu if (!CMSG_OK(msg, cmsg)) 45103c8efc1SHerbert Xu return -EINVAL; 45203c8efc1SHerbert Xu if (cmsg->cmsg_level != SOL_ALG) 45303c8efc1SHerbert Xu continue; 45403c8efc1SHerbert Xu 45503c8efc1SHerbert Xu switch (cmsg->cmsg_type) { 45603c8efc1SHerbert Xu case ALG_SET_IV: 45703c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) 45803c8efc1SHerbert Xu return -EINVAL; 45903c8efc1SHerbert Xu con->iv = (void *)CMSG_DATA(cmsg); 46003c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + 46103c8efc1SHerbert Xu sizeof(*con->iv))) 46203c8efc1SHerbert Xu return -EINVAL; 46303c8efc1SHerbert Xu break; 46403c8efc1SHerbert Xu 46503c8efc1SHerbert Xu case ALG_SET_OP: 46603c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) 46703c8efc1SHerbert Xu return -EINVAL; 46803c8efc1SHerbert Xu con->op = *(u32 *)CMSG_DATA(cmsg); 46903c8efc1SHerbert Xu break; 47003c8efc1SHerbert Xu 471af8e8073SStephan Mueller case ALG_SET_AEAD_ASSOCLEN: 472af8e8073SStephan Mueller if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) 473af8e8073SStephan Mueller return -EINVAL; 474af8e8073SStephan Mueller con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg); 475af8e8073SStephan Mueller break; 476af8e8073SStephan Mueller 47703c8efc1SHerbert Xu default: 47803c8efc1SHerbert Xu return -EINVAL; 47903c8efc1SHerbert Xu } 48003c8efc1SHerbert Xu } 48103c8efc1SHerbert Xu 48203c8efc1SHerbert Xu return 0; 48303c8efc1SHerbert Xu } 48403c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_cmsg_send); 48503c8efc1SHerbert Xu 4862d97591eSStephan Mueller /** 4872d97591eSStephan Mueller * af_alg_alloc_tsgl - allocate the TX SGL 4882d97591eSStephan Mueller * 4892d97591eSStephan Mueller * @sk socket of connection to user space 4902d97591eSStephan Mueller * @return: 0 upon success, < 0 upon error 4912d97591eSStephan Mueller */ 4922d97591eSStephan Mueller int af_alg_alloc_tsgl(struct sock *sk) 4932d97591eSStephan Mueller { 4942d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 4952d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 4962d97591eSStephan Mueller struct af_alg_tsgl *sgl; 4972d97591eSStephan Mueller struct scatterlist *sg = NULL; 4982d97591eSStephan Mueller 4992d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 5002d97591eSStephan Mueller if (!list_empty(&ctx->tsgl_list)) 5012d97591eSStephan Mueller sg = sgl->sg; 5022d97591eSStephan Mueller 5032d97591eSStephan Mueller if (!sg || sgl->cur >= MAX_SGL_ENTS) { 504*0ed2dd03SKees Cook sgl = sock_kmalloc(sk, 505*0ed2dd03SKees Cook struct_size(sgl, sg, (MAX_SGL_ENTS + 1)), 5062d97591eSStephan Mueller GFP_KERNEL); 5072d97591eSStephan Mueller if (!sgl) 5082d97591eSStephan Mueller return -ENOMEM; 5092d97591eSStephan Mueller 5102d97591eSStephan Mueller sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 5112d97591eSStephan Mueller sgl->cur = 0; 5122d97591eSStephan Mueller 5132d97591eSStephan Mueller if (sg) 5142d97591eSStephan Mueller sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 5152d97591eSStephan Mueller 5162d97591eSStephan Mueller list_add_tail(&sgl->list, &ctx->tsgl_list); 5172d97591eSStephan Mueller } 5182d97591eSStephan Mueller 5192d97591eSStephan Mueller return 0; 5202d97591eSStephan Mueller } 5212d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); 5222d97591eSStephan Mueller 5232d97591eSStephan Mueller /** 5242d97591eSStephan Mueller * aead_count_tsgl - Count number of TX SG entries 5252d97591eSStephan Mueller * 5262d97591eSStephan Mueller * The counting starts from the beginning of the SGL to @bytes. If 5272d97591eSStephan Mueller * an offset is provided, the counting of the SG entries starts at the offset. 5282d97591eSStephan Mueller * 5292d97591eSStephan Mueller * @sk socket of connection to user space 5302d97591eSStephan Mueller * @bytes Count the number of SG entries holding given number of bytes. 5312d97591eSStephan Mueller * @offset Start the counting of SG entries from the given offset. 5322d97591eSStephan Mueller * @return Number of TX SG entries found given the constraints 5332d97591eSStephan Mueller */ 5342d97591eSStephan Mueller unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) 5352d97591eSStephan Mueller { 5362d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5372d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 5382d97591eSStephan Mueller struct af_alg_tsgl *sgl, *tmp; 5392d97591eSStephan Mueller unsigned int i; 5402d97591eSStephan Mueller unsigned int sgl_count = 0; 5412d97591eSStephan Mueller 5422d97591eSStephan Mueller if (!bytes) 5432d97591eSStephan Mueller return 0; 5442d97591eSStephan Mueller 5452d97591eSStephan Mueller list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 5462d97591eSStephan Mueller struct scatterlist *sg = sgl->sg; 5472d97591eSStephan Mueller 5482d97591eSStephan Mueller for (i = 0; i < sgl->cur; i++) { 5492d97591eSStephan Mueller size_t bytes_count; 5502d97591eSStephan Mueller 5512d97591eSStephan Mueller /* Skip offset */ 5522d97591eSStephan Mueller if (offset >= sg[i].length) { 5532d97591eSStephan Mueller offset -= sg[i].length; 5542d97591eSStephan Mueller bytes -= sg[i].length; 5552d97591eSStephan Mueller continue; 5562d97591eSStephan Mueller } 5572d97591eSStephan Mueller 5582d97591eSStephan Mueller bytes_count = sg[i].length - offset; 5592d97591eSStephan Mueller 5602d97591eSStephan Mueller offset = 0; 5612d97591eSStephan Mueller sgl_count++; 5622d97591eSStephan Mueller 5632d97591eSStephan Mueller /* If we have seen requested number of bytes, stop */ 5642d97591eSStephan Mueller if (bytes_count >= bytes) 5652d97591eSStephan Mueller return sgl_count; 5662d97591eSStephan Mueller 5672d97591eSStephan Mueller bytes -= bytes_count; 5682d97591eSStephan Mueller } 5692d97591eSStephan Mueller } 5702d97591eSStephan Mueller 5712d97591eSStephan Mueller return sgl_count; 5722d97591eSStephan Mueller } 5732d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_count_tsgl); 5742d97591eSStephan Mueller 5752d97591eSStephan Mueller /** 5762d97591eSStephan Mueller * aead_pull_tsgl - Release the specified buffers from TX SGL 5772d97591eSStephan Mueller * 5782d97591eSStephan Mueller * If @dst is non-null, reassign the pages to dst. The caller must release 5792d97591eSStephan Mueller * the pages. If @dst_offset is given only reassign the pages to @dst starting 5802d97591eSStephan Mueller * at the @dst_offset (byte). The caller must ensure that @dst is large 5812d97591eSStephan Mueller * enough (e.g. by using af_alg_count_tsgl with the same offset). 5822d97591eSStephan Mueller * 5832d97591eSStephan Mueller * @sk socket of connection to user space 5842d97591eSStephan Mueller * @used Number of bytes to pull from TX SGL 5852d97591eSStephan Mueller * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The 5862d97591eSStephan Mueller * caller must release the buffers in dst. 5872d97591eSStephan Mueller * @dst_offset Reassign the TX SGL from given offset. All buffers before 5882d97591eSStephan Mueller * reaching the offset is released. 5892d97591eSStephan Mueller */ 5902d97591eSStephan Mueller void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 5912d97591eSStephan Mueller size_t dst_offset) 5922d97591eSStephan Mueller { 5932d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5942d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 5952d97591eSStephan Mueller struct af_alg_tsgl *sgl; 5962d97591eSStephan Mueller struct scatterlist *sg; 597e117765aSStephan Mueller unsigned int i, j = 0; 5982d97591eSStephan Mueller 5992d97591eSStephan Mueller while (!list_empty(&ctx->tsgl_list)) { 6002d97591eSStephan Mueller sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, 6012d97591eSStephan Mueller list); 6022d97591eSStephan Mueller sg = sgl->sg; 6032d97591eSStephan Mueller 604e117765aSStephan Mueller for (i = 0; i < sgl->cur; i++) { 6052d97591eSStephan Mueller size_t plen = min_t(size_t, used, sg[i].length); 6062d97591eSStephan Mueller struct page *page = sg_page(sg + i); 6072d97591eSStephan Mueller 6082d97591eSStephan Mueller if (!page) 6092d97591eSStephan Mueller continue; 6102d97591eSStephan Mueller 6112d97591eSStephan Mueller /* 6122d97591eSStephan Mueller * Assumption: caller created af_alg_count_tsgl(len) 6132d97591eSStephan Mueller * SG entries in dst. 6142d97591eSStephan Mueller */ 6152d97591eSStephan Mueller if (dst) { 6162d97591eSStephan Mueller if (dst_offset >= plen) { 6172d97591eSStephan Mueller /* discard page before offset */ 6182d97591eSStephan Mueller dst_offset -= plen; 6192d97591eSStephan Mueller } else { 6202d97591eSStephan Mueller /* reassign page to dst after offset */ 6212d45a7e8SStephan Mueller get_page(page); 6222d97591eSStephan Mueller sg_set_page(dst + j, page, 6232d97591eSStephan Mueller plen - dst_offset, 6242d97591eSStephan Mueller sg[i].offset + dst_offset); 6252d97591eSStephan Mueller dst_offset = 0; 6262d97591eSStephan Mueller j++; 6272d97591eSStephan Mueller } 6282d97591eSStephan Mueller } 6292d97591eSStephan Mueller 6302d97591eSStephan Mueller sg[i].length -= plen; 6312d97591eSStephan Mueller sg[i].offset += plen; 6322d97591eSStephan Mueller 6332d97591eSStephan Mueller used -= plen; 6342d97591eSStephan Mueller ctx->used -= plen; 6352d97591eSStephan Mueller 6362d97591eSStephan Mueller if (sg[i].length) 6372d97591eSStephan Mueller return; 6382d97591eSStephan Mueller 6392d97591eSStephan Mueller put_page(page); 6402d97591eSStephan Mueller sg_assign_page(sg + i, NULL); 6412d97591eSStephan Mueller } 6422d97591eSStephan Mueller 6432d97591eSStephan Mueller list_del(&sgl->list); 6442d97591eSStephan Mueller sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 6452d97591eSStephan Mueller (MAX_SGL_ENTS + 1)); 6462d97591eSStephan Mueller } 6472d97591eSStephan Mueller 6482d97591eSStephan Mueller if (!ctx->used) 6492d97591eSStephan Mueller ctx->merge = 0; 6502d97591eSStephan Mueller } 6512d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); 6522d97591eSStephan Mueller 6532d97591eSStephan Mueller /** 6542d97591eSStephan Mueller * af_alg_free_areq_sgls - Release TX and RX SGLs of the request 6552d97591eSStephan Mueller * 6562d97591eSStephan Mueller * @areq Request holding the TX and RX SGL 6572d97591eSStephan Mueller */ 6582d97591eSStephan Mueller void af_alg_free_areq_sgls(struct af_alg_async_req *areq) 6592d97591eSStephan Mueller { 6602d97591eSStephan Mueller struct sock *sk = areq->sk; 6612d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 6622d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 6632d97591eSStephan Mueller struct af_alg_rsgl *rsgl, *tmp; 6642d97591eSStephan Mueller struct scatterlist *tsgl; 6652d97591eSStephan Mueller struct scatterlist *sg; 6662d97591eSStephan Mueller unsigned int i; 6672d97591eSStephan Mueller 6682d97591eSStephan Mueller list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 669af955bf1SJonathan Cameron atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); 6702d97591eSStephan Mueller af_alg_free_sg(&rsgl->sgl); 6712d97591eSStephan Mueller list_del(&rsgl->list); 6722d97591eSStephan Mueller if (rsgl != &areq->first_rsgl) 6732d97591eSStephan Mueller sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 6742d97591eSStephan Mueller } 6752d97591eSStephan Mueller 6762d97591eSStephan Mueller tsgl = areq->tsgl; 677887207edSEric Biggers if (tsgl) { 6782d97591eSStephan Mueller for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 6792d97591eSStephan Mueller if (!sg_page(sg)) 6802d97591eSStephan Mueller continue; 6812d97591eSStephan Mueller put_page(sg_page(sg)); 6822d97591eSStephan Mueller } 6832d97591eSStephan Mueller 6842d97591eSStephan Mueller sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 6852d97591eSStephan Mueller } 686887207edSEric Biggers } 6872d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); 6882d97591eSStephan Mueller 6892d97591eSStephan Mueller /** 6902d97591eSStephan Mueller * af_alg_wait_for_wmem - wait for availability of writable memory 6912d97591eSStephan Mueller * 6922d97591eSStephan Mueller * @sk socket of connection to user space 6932d97591eSStephan Mueller * @flags If MSG_DONTWAIT is set, then only report if function would sleep 6942d97591eSStephan Mueller * @return 0 when writable memory is available, < 0 upon error 6952d97591eSStephan Mueller */ 6962d97591eSStephan Mueller int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) 6972d97591eSStephan Mueller { 6982d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function); 6992d97591eSStephan Mueller int err = -ERESTARTSYS; 7002d97591eSStephan Mueller long timeout; 7012d97591eSStephan Mueller 7022d97591eSStephan Mueller if (flags & MSG_DONTWAIT) 7032d97591eSStephan Mueller return -EAGAIN; 7042d97591eSStephan Mueller 7052d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7062d97591eSStephan Mueller 7072d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait); 7082d97591eSStephan Mueller for (;;) { 7092d97591eSStephan Mueller if (signal_pending(current)) 7102d97591eSStephan Mueller break; 7112d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT; 7122d97591eSStephan Mueller if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { 7132d97591eSStephan Mueller err = 0; 7142d97591eSStephan Mueller break; 7152d97591eSStephan Mueller } 7162d97591eSStephan Mueller } 7172d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait); 7182d97591eSStephan Mueller 7192d97591eSStephan Mueller return err; 7202d97591eSStephan Mueller } 7212d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); 7222d97591eSStephan Mueller 7232d97591eSStephan Mueller /** 7242d97591eSStephan Mueller * af_alg_wmem_wakeup - wakeup caller when writable memory is available 7252d97591eSStephan Mueller * 7262d97591eSStephan Mueller * @sk socket of connection to user space 7272d97591eSStephan Mueller */ 7282d97591eSStephan Mueller void af_alg_wmem_wakeup(struct sock *sk) 7292d97591eSStephan Mueller { 7302d97591eSStephan Mueller struct socket_wq *wq; 7312d97591eSStephan Mueller 7322d97591eSStephan Mueller if (!af_alg_writable(sk)) 7332d97591eSStephan Mueller return; 7342d97591eSStephan Mueller 7352d97591eSStephan Mueller rcu_read_lock(); 7362d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq); 7372d97591eSStephan Mueller if (skwq_has_sleeper(wq)) 738a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 739a9a08845SLinus Torvalds EPOLLRDNORM | 740a9a08845SLinus Torvalds EPOLLRDBAND); 7412d97591eSStephan Mueller sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 7422d97591eSStephan Mueller rcu_read_unlock(); 7432d97591eSStephan Mueller } 7442d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); 7452d97591eSStephan Mueller 7462d97591eSStephan Mueller /** 7472d97591eSStephan Mueller * af_alg_wait_for_data - wait for availability of TX data 7482d97591eSStephan Mueller * 7492d97591eSStephan Mueller * @sk socket of connection to user space 7502d97591eSStephan Mueller * @flags If MSG_DONTWAIT is set, then only report if function would sleep 7512d97591eSStephan Mueller * @return 0 when writable memory is available, < 0 upon error 7522d97591eSStephan Mueller */ 7532d97591eSStephan Mueller int af_alg_wait_for_data(struct sock *sk, unsigned flags) 7542d97591eSStephan Mueller { 7552d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function); 7562d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 7572d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 7582d97591eSStephan Mueller long timeout; 7592d97591eSStephan Mueller int err = -ERESTARTSYS; 7602d97591eSStephan Mueller 7612d97591eSStephan Mueller if (flags & MSG_DONTWAIT) 7622d97591eSStephan Mueller return -EAGAIN; 7632d97591eSStephan Mueller 7642d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 7652d97591eSStephan Mueller 7662d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait); 7672d97591eSStephan Mueller for (;;) { 7682d97591eSStephan Mueller if (signal_pending(current)) 7692d97591eSStephan Mueller break; 7702d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT; 7712d97591eSStephan Mueller if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), 7722d97591eSStephan Mueller &wait)) { 7732d97591eSStephan Mueller err = 0; 7742d97591eSStephan Mueller break; 7752d97591eSStephan Mueller } 7762d97591eSStephan Mueller } 7772d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait); 7782d97591eSStephan Mueller 7792d97591eSStephan Mueller sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 7802d97591eSStephan Mueller 7812d97591eSStephan Mueller return err; 7822d97591eSStephan Mueller } 7832d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wait_for_data); 7842d97591eSStephan Mueller 7852d97591eSStephan Mueller /** 7862d97591eSStephan Mueller * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel 7872d97591eSStephan Mueller * 7882d97591eSStephan Mueller * @sk socket of connection to user space 7892d97591eSStephan Mueller */ 7902d97591eSStephan Mueller 7912d97591eSStephan Mueller void af_alg_data_wakeup(struct sock *sk) 7922d97591eSStephan Mueller { 7932d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 7942d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 7952d97591eSStephan Mueller struct socket_wq *wq; 7962d97591eSStephan Mueller 7972d97591eSStephan Mueller if (!ctx->used) 7982d97591eSStephan Mueller return; 7992d97591eSStephan Mueller 8002d97591eSStephan Mueller rcu_read_lock(); 8012d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq); 8022d97591eSStephan Mueller if (skwq_has_sleeper(wq)) 803a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 804a9a08845SLinus Torvalds EPOLLRDNORM | 805a9a08845SLinus Torvalds EPOLLRDBAND); 8062d97591eSStephan Mueller sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 8072d97591eSStephan Mueller rcu_read_unlock(); 8082d97591eSStephan Mueller } 8092d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_data_wakeup); 8102d97591eSStephan Mueller 8112d97591eSStephan Mueller /** 8122d97591eSStephan Mueller * af_alg_sendmsg - implementation of sendmsg system call handler 8132d97591eSStephan Mueller * 8142d97591eSStephan Mueller * The sendmsg system call handler obtains the user data and stores it 8152d97591eSStephan Mueller * in ctx->tsgl_list. This implies allocation of the required numbers of 8162d97591eSStephan Mueller * struct af_alg_tsgl. 8172d97591eSStephan Mueller * 8182d97591eSStephan Mueller * In addition, the ctx is filled with the information sent via CMSG. 8192d97591eSStephan Mueller * 8202d97591eSStephan Mueller * @sock socket of connection to user space 8212d97591eSStephan Mueller * @msg message from user space 8222d97591eSStephan Mueller * @size size of message from user space 8232d97591eSStephan Mueller * @ivsize the size of the IV for the cipher operation to verify that the 8242d97591eSStephan Mueller * user-space-provided IV has the right size 8252d97591eSStephan Mueller * @return the number of copied data upon success, < 0 upon error 8262d97591eSStephan Mueller */ 8272d97591eSStephan Mueller int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, 8282d97591eSStephan Mueller unsigned int ivsize) 8292d97591eSStephan Mueller { 8302d97591eSStephan Mueller struct sock *sk = sock->sk; 8312d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 8322d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 8332d97591eSStephan Mueller struct af_alg_tsgl *sgl; 8342d97591eSStephan Mueller struct af_alg_control con = {}; 8352d97591eSStephan Mueller long copied = 0; 8362d97591eSStephan Mueller bool enc = 0; 8372d97591eSStephan Mueller bool init = 0; 8382d97591eSStephan Mueller int err = 0; 8392d97591eSStephan Mueller 8402d97591eSStephan Mueller if (msg->msg_controllen) { 8412d97591eSStephan Mueller err = af_alg_cmsg_send(msg, &con); 8422d97591eSStephan Mueller if (err) 8432d97591eSStephan Mueller return err; 8442d97591eSStephan Mueller 8452d97591eSStephan Mueller init = 1; 8462d97591eSStephan Mueller switch (con.op) { 8472d97591eSStephan Mueller case ALG_OP_ENCRYPT: 8482d97591eSStephan Mueller enc = 1; 8492d97591eSStephan Mueller break; 8502d97591eSStephan Mueller case ALG_OP_DECRYPT: 8512d97591eSStephan Mueller enc = 0; 8522d97591eSStephan Mueller break; 8532d97591eSStephan Mueller default: 8542d97591eSStephan Mueller return -EINVAL; 8552d97591eSStephan Mueller } 8562d97591eSStephan Mueller 8572d97591eSStephan Mueller if (con.iv && con.iv->ivlen != ivsize) 8582d97591eSStephan Mueller return -EINVAL; 8592d97591eSStephan Mueller } 8602d97591eSStephan Mueller 8612d97591eSStephan Mueller lock_sock(sk); 8622d97591eSStephan Mueller if (!ctx->more && ctx->used) { 8632d97591eSStephan Mueller err = -EINVAL; 8642d97591eSStephan Mueller goto unlock; 8652d97591eSStephan Mueller } 8662d97591eSStephan Mueller 8672d97591eSStephan Mueller if (init) { 8682d97591eSStephan Mueller ctx->enc = enc; 8692d97591eSStephan Mueller if (con.iv) 8702d97591eSStephan Mueller memcpy(ctx->iv, con.iv->iv, ivsize); 8712d97591eSStephan Mueller 8722d97591eSStephan Mueller ctx->aead_assoclen = con.aead_assoclen; 8732d97591eSStephan Mueller } 8742d97591eSStephan Mueller 8752d97591eSStephan Mueller while (size) { 8762d97591eSStephan Mueller struct scatterlist *sg; 8772d97591eSStephan Mueller size_t len = size; 8782d97591eSStephan Mueller size_t plen; 8792d97591eSStephan Mueller 8802d97591eSStephan Mueller /* use the existing memory in an allocated page */ 8812d97591eSStephan Mueller if (ctx->merge) { 8822d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, 8832d97591eSStephan Mueller struct af_alg_tsgl, list); 8842d97591eSStephan Mueller sg = sgl->sg + sgl->cur - 1; 8852d97591eSStephan Mueller len = min_t(size_t, len, 8862d97591eSStephan Mueller PAGE_SIZE - sg->offset - sg->length); 8872d97591eSStephan Mueller 8882d97591eSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg)) + 8892d97591eSStephan Mueller sg->offset + sg->length, 8902d97591eSStephan Mueller msg, len); 8912d97591eSStephan Mueller if (err) 8922d97591eSStephan Mueller goto unlock; 8932d97591eSStephan Mueller 8942d97591eSStephan Mueller sg->length += len; 8952d97591eSStephan Mueller ctx->merge = (sg->offset + sg->length) & 8962d97591eSStephan Mueller (PAGE_SIZE - 1); 8972d97591eSStephan Mueller 8982d97591eSStephan Mueller ctx->used += len; 8992d97591eSStephan Mueller copied += len; 9002d97591eSStephan Mueller size -= len; 9012d97591eSStephan Mueller continue; 9022d97591eSStephan Mueller } 9032d97591eSStephan Mueller 9042d97591eSStephan Mueller if (!af_alg_writable(sk)) { 9052d97591eSStephan Mueller err = af_alg_wait_for_wmem(sk, msg->msg_flags); 9062d97591eSStephan Mueller if (err) 9072d97591eSStephan Mueller goto unlock; 9082d97591eSStephan Mueller } 9092d97591eSStephan Mueller 9102d97591eSStephan Mueller /* allocate a new page */ 9112d97591eSStephan Mueller len = min_t(unsigned long, len, af_alg_sndbuf(sk)); 9122d97591eSStephan Mueller 9132d97591eSStephan Mueller err = af_alg_alloc_tsgl(sk); 9142d97591eSStephan Mueller if (err) 9152d97591eSStephan Mueller goto unlock; 9162d97591eSStephan Mueller 9172d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, 9182d97591eSStephan Mueller list); 9192d97591eSStephan Mueller sg = sgl->sg; 9202d97591eSStephan Mueller if (sgl->cur) 9212d97591eSStephan Mueller sg_unmark_end(sg + sgl->cur - 1); 9222d97591eSStephan Mueller 9232d97591eSStephan Mueller do { 9242d97591eSStephan Mueller unsigned int i = sgl->cur; 9252d97591eSStephan Mueller 9262d97591eSStephan Mueller plen = min_t(size_t, len, PAGE_SIZE); 9272d97591eSStephan Mueller 9282d97591eSStephan Mueller sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 9292d97591eSStephan Mueller if (!sg_page(sg + i)) { 9302d97591eSStephan Mueller err = -ENOMEM; 9312d97591eSStephan Mueller goto unlock; 9322d97591eSStephan Mueller } 9332d97591eSStephan Mueller 9342d97591eSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg + i)), 9352d97591eSStephan Mueller msg, plen); 9362d97591eSStephan Mueller if (err) { 9372d97591eSStephan Mueller __free_page(sg_page(sg + i)); 9382d97591eSStephan Mueller sg_assign_page(sg + i, NULL); 9392d97591eSStephan Mueller goto unlock; 9402d97591eSStephan Mueller } 9412d97591eSStephan Mueller 9422d97591eSStephan Mueller sg[i].length = plen; 9432d97591eSStephan Mueller len -= plen; 9442d97591eSStephan Mueller ctx->used += plen; 9452d97591eSStephan Mueller copied += plen; 9462d97591eSStephan Mueller size -= plen; 9472d97591eSStephan Mueller sgl->cur++; 9482d97591eSStephan Mueller } while (len && sgl->cur < MAX_SGL_ENTS); 9492d97591eSStephan Mueller 9502d97591eSStephan Mueller if (!size) 9512d97591eSStephan Mueller sg_mark_end(sg + sgl->cur - 1); 9522d97591eSStephan Mueller 9532d97591eSStephan Mueller ctx->merge = plen & (PAGE_SIZE - 1); 9542d97591eSStephan Mueller } 9552d97591eSStephan Mueller 9562d97591eSStephan Mueller err = 0; 9572d97591eSStephan Mueller 9582d97591eSStephan Mueller ctx->more = msg->msg_flags & MSG_MORE; 9592d97591eSStephan Mueller 9602d97591eSStephan Mueller unlock: 9612d97591eSStephan Mueller af_alg_data_wakeup(sk); 9622d97591eSStephan Mueller release_sock(sk); 9632d97591eSStephan Mueller 9642d97591eSStephan Mueller return copied ?: err; 9652d97591eSStephan Mueller } 9662d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_sendmsg); 9672d97591eSStephan Mueller 9682d97591eSStephan Mueller /** 9692d97591eSStephan Mueller * af_alg_sendpage - sendpage system call handler 9702d97591eSStephan Mueller * 9712d97591eSStephan Mueller * This is a generic implementation of sendpage to fill ctx->tsgl_list. 9722d97591eSStephan Mueller */ 9732d97591eSStephan Mueller ssize_t af_alg_sendpage(struct socket *sock, struct page *page, 9742d97591eSStephan Mueller int offset, size_t size, int flags) 9752d97591eSStephan Mueller { 9762d97591eSStephan Mueller struct sock *sk = sock->sk; 9772d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 9782d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 9792d97591eSStephan Mueller struct af_alg_tsgl *sgl; 9802d97591eSStephan Mueller int err = -EINVAL; 9812d97591eSStephan Mueller 9822d97591eSStephan Mueller if (flags & MSG_SENDPAGE_NOTLAST) 9832d97591eSStephan Mueller flags |= MSG_MORE; 9842d97591eSStephan Mueller 9852d97591eSStephan Mueller lock_sock(sk); 9862d97591eSStephan Mueller if (!ctx->more && ctx->used) 9872d97591eSStephan Mueller goto unlock; 9882d97591eSStephan Mueller 9892d97591eSStephan Mueller if (!size) 9902d97591eSStephan Mueller goto done; 9912d97591eSStephan Mueller 9922d97591eSStephan Mueller if (!af_alg_writable(sk)) { 9932d97591eSStephan Mueller err = af_alg_wait_for_wmem(sk, flags); 9942d97591eSStephan Mueller if (err) 9952d97591eSStephan Mueller goto unlock; 9962d97591eSStephan Mueller } 9972d97591eSStephan Mueller 9982d97591eSStephan Mueller err = af_alg_alloc_tsgl(sk); 9992d97591eSStephan Mueller if (err) 10002d97591eSStephan Mueller goto unlock; 10012d97591eSStephan Mueller 10022d97591eSStephan Mueller ctx->merge = 0; 10032d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 10042d97591eSStephan Mueller 10052d97591eSStephan Mueller if (sgl->cur) 10062d97591eSStephan Mueller sg_unmark_end(sgl->sg + sgl->cur - 1); 10072d97591eSStephan Mueller 10082d97591eSStephan Mueller sg_mark_end(sgl->sg + sgl->cur); 10092d97591eSStephan Mueller 10102d97591eSStephan Mueller get_page(page); 10112d97591eSStephan Mueller sg_set_page(sgl->sg + sgl->cur, page, size, offset); 10122d97591eSStephan Mueller sgl->cur++; 10132d97591eSStephan Mueller ctx->used += size; 10142d97591eSStephan Mueller 10152d97591eSStephan Mueller done: 10162d97591eSStephan Mueller ctx->more = flags & MSG_MORE; 10172d97591eSStephan Mueller 10182d97591eSStephan Mueller unlock: 10192d97591eSStephan Mueller af_alg_data_wakeup(sk); 10202d97591eSStephan Mueller release_sock(sk); 10212d97591eSStephan Mueller 10222d97591eSStephan Mueller return err ?: size; 10232d97591eSStephan Mueller } 10242d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_sendpage); 10252d97591eSStephan Mueller 10262d97591eSStephan Mueller /** 10277d2c3f54SStephan Mueller * af_alg_free_resources - release resources required for crypto request 10287d2c3f54SStephan Mueller */ 10297d2c3f54SStephan Mueller void af_alg_free_resources(struct af_alg_async_req *areq) 10307d2c3f54SStephan Mueller { 10317d2c3f54SStephan Mueller struct sock *sk = areq->sk; 10327d2c3f54SStephan Mueller 10337d2c3f54SStephan Mueller af_alg_free_areq_sgls(areq); 10347d2c3f54SStephan Mueller sock_kfree_s(sk, areq, areq->areqlen); 10357d2c3f54SStephan Mueller } 10367d2c3f54SStephan Mueller EXPORT_SYMBOL_GPL(af_alg_free_resources); 10377d2c3f54SStephan Mueller 10387d2c3f54SStephan Mueller /** 10392d97591eSStephan Mueller * af_alg_async_cb - AIO callback handler 10402d97591eSStephan Mueller * 10412d97591eSStephan Mueller * This handler cleans up the struct af_alg_async_req upon completion of the 10422d97591eSStephan Mueller * AIO operation. 10432d97591eSStephan Mueller * 10442d97591eSStephan Mueller * The number of bytes to be generated with the AIO operation must be set 10452d97591eSStephan Mueller * in areq->outlen before the AIO callback handler is invoked. 10462d97591eSStephan Mueller */ 10472d97591eSStephan Mueller void af_alg_async_cb(struct crypto_async_request *_req, int err) 10482d97591eSStephan Mueller { 10492d97591eSStephan Mueller struct af_alg_async_req *areq = _req->data; 10502d97591eSStephan Mueller struct sock *sk = areq->sk; 10512d97591eSStephan Mueller struct kiocb *iocb = areq->iocb; 10522d97591eSStephan Mueller unsigned int resultlen; 10532d97591eSStephan Mueller 10542d97591eSStephan Mueller /* Buffer size written by crypto operation. */ 10552d97591eSStephan Mueller resultlen = areq->outlen; 10562d97591eSStephan Mueller 10577d2c3f54SStephan Mueller af_alg_free_resources(areq); 10587d2c3f54SStephan Mueller sock_put(sk); 10592d97591eSStephan Mueller 10602d97591eSStephan Mueller iocb->ki_complete(iocb, err ? err : resultlen, 0); 10612d97591eSStephan Mueller } 10622d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_async_cb); 10632d97591eSStephan Mueller 10642d97591eSStephan Mueller /** 10652d97591eSStephan Mueller * af_alg_poll - poll system call handler 10662d97591eSStephan Mueller */ 106738544bffSAl Viro __poll_t af_alg_poll(struct file *file, struct socket *sock, 10682d97591eSStephan Mueller poll_table *wait) 10692d97591eSStephan Mueller { 10702d97591eSStephan Mueller struct sock *sk = sock->sk; 10712d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 10722d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 107338544bffSAl Viro __poll_t mask; 10742d97591eSStephan Mueller 10752d97591eSStephan Mueller sock_poll_wait(file, sk_sleep(sk), wait); 10762d97591eSStephan Mueller mask = 0; 10772d97591eSStephan Mueller 10782d97591eSStephan Mueller if (!ctx->more || ctx->used) 1079a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 10802d97591eSStephan Mueller 10812d97591eSStephan Mueller if (af_alg_writable(sk)) 1082a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 10832d97591eSStephan Mueller 10842d97591eSStephan Mueller return mask; 10852d97591eSStephan Mueller } 10862d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_poll); 10872d97591eSStephan Mueller 10882d97591eSStephan Mueller /** 10892d97591eSStephan Mueller * af_alg_alloc_areq - allocate struct af_alg_async_req 10902d97591eSStephan Mueller * 10912d97591eSStephan Mueller * @sk socket of connection to user space 10922d97591eSStephan Mueller * @areqlen size of struct af_alg_async_req + crypto_*_reqsize 10932d97591eSStephan Mueller * @return allocated data structure or ERR_PTR upon error 10942d97591eSStephan Mueller */ 10952d97591eSStephan Mueller struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 10962d97591eSStephan Mueller unsigned int areqlen) 10972d97591eSStephan Mueller { 10982d97591eSStephan Mueller struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 10992d97591eSStephan Mueller 11002d97591eSStephan Mueller if (unlikely(!areq)) 11012d97591eSStephan Mueller return ERR_PTR(-ENOMEM); 11022d97591eSStephan Mueller 11032d97591eSStephan Mueller areq->areqlen = areqlen; 11042d97591eSStephan Mueller areq->sk = sk; 11052d97591eSStephan Mueller areq->last_rsgl = NULL; 11062d97591eSStephan Mueller INIT_LIST_HEAD(&areq->rsgl_list); 11072d97591eSStephan Mueller areq->tsgl = NULL; 11082d97591eSStephan Mueller areq->tsgl_entries = 0; 11092d97591eSStephan Mueller 11102d97591eSStephan Mueller return areq; 11112d97591eSStephan Mueller } 11122d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_alloc_areq); 11132d97591eSStephan Mueller 11142d97591eSStephan Mueller /** 11152d97591eSStephan Mueller * af_alg_get_rsgl - create the RX SGL for the output data from the crypto 11162d97591eSStephan Mueller * operation 11172d97591eSStephan Mueller * 11182d97591eSStephan Mueller * @sk socket of connection to user space 11192d97591eSStephan Mueller * @msg user space message 11202d97591eSStephan Mueller * @flags flags used to invoke recvmsg with 11212d97591eSStephan Mueller * @areq instance of the cryptographic request that will hold the RX SGL 11222d97591eSStephan Mueller * @maxsize maximum number of bytes to be pulled from user space 11232d97591eSStephan Mueller * @outlen number of bytes in the RX SGL 11242d97591eSStephan Mueller * @return 0 on success, < 0 upon error 11252d97591eSStephan Mueller */ 11262d97591eSStephan Mueller int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, 11272d97591eSStephan Mueller struct af_alg_async_req *areq, size_t maxsize, 11282d97591eSStephan Mueller size_t *outlen) 11292d97591eSStephan Mueller { 11302d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 11312d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 11322d97591eSStephan Mueller size_t len = 0; 11332d97591eSStephan Mueller 11342d97591eSStephan Mueller while (maxsize > len && msg_data_left(msg)) { 11352d97591eSStephan Mueller struct af_alg_rsgl *rsgl; 11362d97591eSStephan Mueller size_t seglen; 11372d97591eSStephan Mueller int err; 11382d97591eSStephan Mueller 11392d97591eSStephan Mueller /* limit the amount of readable buffers */ 11402d97591eSStephan Mueller if (!af_alg_readable(sk)) 11412d97591eSStephan Mueller break; 11422d97591eSStephan Mueller 11432d97591eSStephan Mueller seglen = min_t(size_t, (maxsize - len), 11442d97591eSStephan Mueller msg_data_left(msg)); 11452d97591eSStephan Mueller 11462d97591eSStephan Mueller if (list_empty(&areq->rsgl_list)) { 11472d97591eSStephan Mueller rsgl = &areq->first_rsgl; 11482d97591eSStephan Mueller } else { 11492d97591eSStephan Mueller rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 11502d97591eSStephan Mueller if (unlikely(!rsgl)) 11512d97591eSStephan Mueller return -ENOMEM; 11522d97591eSStephan Mueller } 11532d97591eSStephan Mueller 11542d97591eSStephan Mueller rsgl->sgl.npages = 0; 11552d97591eSStephan Mueller list_add_tail(&rsgl->list, &areq->rsgl_list); 11562d97591eSStephan Mueller 11572d97591eSStephan Mueller /* make one iovec available as scatterlist */ 11582d97591eSStephan Mueller err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 11592d97591eSStephan Mueller if (err < 0) 11602d97591eSStephan Mueller return err; 11612d97591eSStephan Mueller 11622d97591eSStephan Mueller /* chain the new scatterlist with previous one */ 11632d97591eSStephan Mueller if (areq->last_rsgl) 11642d97591eSStephan Mueller af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); 11652d97591eSStephan Mueller 11662d97591eSStephan Mueller areq->last_rsgl = rsgl; 11672d97591eSStephan Mueller len += err; 1168af955bf1SJonathan Cameron atomic_add(err, &ctx->rcvused); 11692d97591eSStephan Mueller rsgl->sg_num_bytes = err; 11702d97591eSStephan Mueller iov_iter_advance(&msg->msg_iter, err); 11712d97591eSStephan Mueller } 11722d97591eSStephan Mueller 11732d97591eSStephan Mueller *outlen = len; 11742d97591eSStephan Mueller return 0; 11752d97591eSStephan Mueller } 11762d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_get_rsgl); 11772d97591eSStephan Mueller 117803c8efc1SHerbert Xu static int __init af_alg_init(void) 117903c8efc1SHerbert Xu { 118003c8efc1SHerbert Xu int err = proto_register(&alg_proto, 0); 118103c8efc1SHerbert Xu 118203c8efc1SHerbert Xu if (err) 118303c8efc1SHerbert Xu goto out; 118403c8efc1SHerbert Xu 118503c8efc1SHerbert Xu err = sock_register(&alg_family); 118603c8efc1SHerbert Xu if (err != 0) 118703c8efc1SHerbert Xu goto out_unregister_proto; 118803c8efc1SHerbert Xu 118903c8efc1SHerbert Xu out: 119003c8efc1SHerbert Xu return err; 119103c8efc1SHerbert Xu 119203c8efc1SHerbert Xu out_unregister_proto: 119303c8efc1SHerbert Xu proto_unregister(&alg_proto); 119403c8efc1SHerbert Xu goto out; 119503c8efc1SHerbert Xu } 119603c8efc1SHerbert Xu 119703c8efc1SHerbert Xu static void __exit af_alg_exit(void) 119803c8efc1SHerbert Xu { 119903c8efc1SHerbert Xu sock_unregister(PF_ALG); 120003c8efc1SHerbert Xu proto_unregister(&alg_proto); 120103c8efc1SHerbert Xu } 120203c8efc1SHerbert Xu 120303c8efc1SHerbert Xu module_init(af_alg_init); 120403c8efc1SHerbert Xu module_exit(af_alg_exit); 120503c8efc1SHerbert Xu MODULE_LICENSE("GPL"); 120603c8efc1SHerbert Xu MODULE_ALIAS_NETPROTO(AF_ALG); 1207