12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
203c8efc1SHerbert Xu /*
303c8efc1SHerbert Xu * af_alg: User-space algorithm interface
403c8efc1SHerbert Xu *
503c8efc1SHerbert Xu * This file provides the user-space API for algorithms.
603c8efc1SHerbert Xu *
703c8efc1SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
803c8efc1SHerbert Xu */
903c8efc1SHerbert Xu
1060063497SArun Sharma #include <linux/atomic.h>
1103c8efc1SHerbert Xu #include <crypto/if_alg.h>
1203c8efc1SHerbert Xu #include <linux/crypto.h>
1303c8efc1SHerbert Xu #include <linux/init.h>
1403c8efc1SHerbert Xu #include <linux/kernel.h>
157984ceb1SFrederick Lawler #include <linux/key.h>
167984ceb1SFrederick Lawler #include <linux/key-type.h>
1703c8efc1SHerbert Xu #include <linux/list.h>
1803c8efc1SHerbert Xu #include <linux/module.h>
1903c8efc1SHerbert Xu #include <linux/net.h>
2003c8efc1SHerbert Xu #include <linux/rwsem.h>
21c195d66aSHerbert Xu #include <linux/sched.h>
222d97591eSStephan Mueller #include <linux/sched/signal.h>
234c63f83cSMilan Broz #include <linux/security.h>
247984ceb1SFrederick Lawler #include <linux/string.h>
257984ceb1SFrederick Lawler #include <keys/user-type.h>
267984ceb1SFrederick Lawler #include <keys/trusted-type.h>
277984ceb1SFrederick Lawler #include <keys/encrypted-type.h>
2803c8efc1SHerbert Xu
2903c8efc1SHerbert Xu struct alg_type_list {
3003c8efc1SHerbert Xu const struct af_alg_type *type;
3103c8efc1SHerbert Xu struct list_head list;
3203c8efc1SHerbert Xu };
3303c8efc1SHerbert Xu
3403c8efc1SHerbert Xu static struct proto alg_proto = {
3503c8efc1SHerbert Xu .name = "ALG",
3603c8efc1SHerbert Xu .owner = THIS_MODULE,
3703c8efc1SHerbert Xu .obj_size = sizeof(struct alg_sock),
3803c8efc1SHerbert Xu };
3903c8efc1SHerbert Xu
4003c8efc1SHerbert Xu static LIST_HEAD(alg_types);
4103c8efc1SHerbert Xu static DECLARE_RWSEM(alg_types_sem);
4203c8efc1SHerbert Xu
alg_get_type(const char * name)4303c8efc1SHerbert Xu static const struct af_alg_type *alg_get_type(const char *name)
4403c8efc1SHerbert Xu {
4503c8efc1SHerbert Xu const struct af_alg_type *type = ERR_PTR(-ENOENT);
4603c8efc1SHerbert Xu struct alg_type_list *node;
4703c8efc1SHerbert Xu
4803c8efc1SHerbert Xu down_read(&alg_types_sem);
4903c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) {
5003c8efc1SHerbert Xu if (strcmp(node->type->name, name))
5103c8efc1SHerbert Xu continue;
5203c8efc1SHerbert Xu
5303c8efc1SHerbert Xu if (try_module_get(node->type->owner))
5403c8efc1SHerbert Xu type = node->type;
5503c8efc1SHerbert Xu break;
5603c8efc1SHerbert Xu }
5703c8efc1SHerbert Xu up_read(&alg_types_sem);
5803c8efc1SHerbert Xu
5903c8efc1SHerbert Xu return type;
6003c8efc1SHerbert Xu }
6103c8efc1SHerbert Xu
af_alg_register_type(const struct af_alg_type * type)6203c8efc1SHerbert Xu int af_alg_register_type(const struct af_alg_type *type)
6303c8efc1SHerbert Xu {
6403c8efc1SHerbert Xu struct alg_type_list *node;
6503c8efc1SHerbert Xu int err = -EEXIST;
6603c8efc1SHerbert Xu
6703c8efc1SHerbert Xu down_write(&alg_types_sem);
6803c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) {
6903c8efc1SHerbert Xu if (!strcmp(node->type->name, type->name))
7003c8efc1SHerbert Xu goto unlock;
7103c8efc1SHerbert Xu }
7203c8efc1SHerbert Xu
7303c8efc1SHerbert Xu node = kmalloc(sizeof(*node), GFP_KERNEL);
7403c8efc1SHerbert Xu err = -ENOMEM;
7503c8efc1SHerbert Xu if (!node)
7603c8efc1SHerbert Xu goto unlock;
7703c8efc1SHerbert Xu
7803c8efc1SHerbert Xu type->ops->owner = THIS_MODULE;
7937766586SHerbert Xu if (type->ops_nokey)
8037766586SHerbert Xu type->ops_nokey->owner = THIS_MODULE;
8103c8efc1SHerbert Xu node->type = type;
8203c8efc1SHerbert Xu list_add(&node->list, &alg_types);
8303c8efc1SHerbert Xu err = 0;
8403c8efc1SHerbert Xu
8503c8efc1SHerbert Xu unlock:
8603c8efc1SHerbert Xu up_write(&alg_types_sem);
8703c8efc1SHerbert Xu
8803c8efc1SHerbert Xu return err;
8903c8efc1SHerbert Xu }
9003c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_register_type);
9103c8efc1SHerbert Xu
af_alg_unregister_type(const struct af_alg_type * type)9203c8efc1SHerbert Xu int af_alg_unregister_type(const struct af_alg_type *type)
9303c8efc1SHerbert Xu {
9403c8efc1SHerbert Xu struct alg_type_list *node;
9503c8efc1SHerbert Xu int err = -ENOENT;
9603c8efc1SHerbert Xu
9703c8efc1SHerbert Xu down_write(&alg_types_sem);
9803c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) {
9903c8efc1SHerbert Xu if (strcmp(node->type->name, type->name))
10003c8efc1SHerbert Xu continue;
10103c8efc1SHerbert Xu
10203c8efc1SHerbert Xu list_del(&node->list);
10303c8efc1SHerbert Xu kfree(node);
10403c8efc1SHerbert Xu err = 0;
10503c8efc1SHerbert Xu break;
10603c8efc1SHerbert Xu }
10703c8efc1SHerbert Xu up_write(&alg_types_sem);
10803c8efc1SHerbert Xu
10903c8efc1SHerbert Xu return err;
11003c8efc1SHerbert Xu }
11103c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_unregister_type);
11203c8efc1SHerbert Xu
alg_do_release(const struct af_alg_type * type,void * private)11303c8efc1SHerbert Xu static void alg_do_release(const struct af_alg_type *type, void *private)
11403c8efc1SHerbert Xu {
11503c8efc1SHerbert Xu if (!type)
11603c8efc1SHerbert Xu return;
11703c8efc1SHerbert Xu
11803c8efc1SHerbert Xu type->release(private);
11903c8efc1SHerbert Xu module_put(type->owner);
12003c8efc1SHerbert Xu }
12103c8efc1SHerbert Xu
af_alg_release(struct socket * sock)12203c8efc1SHerbert Xu int af_alg_release(struct socket *sock)
12303c8efc1SHerbert Xu {
1249060cb71SMao Wenan if (sock->sk) {
12503c8efc1SHerbert Xu sock_put(sock->sk);
1269060cb71SMao Wenan sock->sk = NULL;
1279060cb71SMao Wenan }
12803c8efc1SHerbert Xu return 0;
12903c8efc1SHerbert Xu }
13003c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release);
13103c8efc1SHerbert Xu
af_alg_release_parent(struct sock * sk)132c840ac6aSHerbert Xu void af_alg_release_parent(struct sock *sk)
133c840ac6aSHerbert Xu {
134c840ac6aSHerbert Xu struct alg_sock *ask = alg_sk(sk);
13534c86f4cSHerbert Xu unsigned int nokey = atomic_read(&ask->nokey_refcnt);
136c840ac6aSHerbert Xu
137c840ac6aSHerbert Xu sk = ask->parent;
138c840ac6aSHerbert Xu ask = alg_sk(sk);
139c840ac6aSHerbert Xu
14034c86f4cSHerbert Xu if (nokey)
14134c86f4cSHerbert Xu atomic_dec(&ask->nokey_refcnt);
142c840ac6aSHerbert Xu
14334c86f4cSHerbert Xu if (atomic_dec_and_test(&ask->refcnt))
144c840ac6aSHerbert Xu sock_put(sk);
145c840ac6aSHerbert Xu }
146c840ac6aSHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release_parent);
147c840ac6aSHerbert Xu
alg_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)14803c8efc1SHerbert Xu static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
14903c8efc1SHerbert Xu {
150bb30b884SStephan Mueller const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
15103c8efc1SHerbert Xu struct sock *sk = sock->sk;
15203c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk);
15392eb6c30SEric Biggers struct sockaddr_alg_new *sa = (void *)uaddr;
15403c8efc1SHerbert Xu const struct af_alg_type *type;
15503c8efc1SHerbert Xu void *private;
156c840ac6aSHerbert Xu int err;
15703c8efc1SHerbert Xu
15803c8efc1SHerbert Xu if (sock->state == SS_CONNECTED)
15903c8efc1SHerbert Xu return -EINVAL;
16003c8efc1SHerbert Xu
16192eb6c30SEric Biggers BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
16292eb6c30SEric Biggers offsetof(struct sockaddr_alg, salg_name));
16392eb6c30SEric Biggers BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
16492eb6c30SEric Biggers
16592eb6c30SEric Biggers if (addr_len < sizeof(*sa) + 1)
16603c8efc1SHerbert Xu return -EINVAL;
16703c8efc1SHerbert Xu
168a466856eSEric Dumazet /* If caller uses non-allowed flag, return error. */
169a466856eSEric Dumazet if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
170a466856eSEric Dumazet return -EINVAL;
171a466856eSEric Dumazet
17203c8efc1SHerbert Xu sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
17392eb6c30SEric Biggers sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
17403c8efc1SHerbert Xu
17503c8efc1SHerbert Xu type = alg_get_type(sa->salg_type);
17645586c70SMasahiro Yamada if (PTR_ERR(type) == -ENOENT) {
17703c8efc1SHerbert Xu request_module("algif-%s", sa->salg_type);
17803c8efc1SHerbert Xu type = alg_get_type(sa->salg_type);
17903c8efc1SHerbert Xu }
18003c8efc1SHerbert Xu
18103c8efc1SHerbert Xu if (IS_ERR(type))
18203c8efc1SHerbert Xu return PTR_ERR(type);
18303c8efc1SHerbert Xu
184bb30b884SStephan Mueller private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
18503c8efc1SHerbert Xu if (IS_ERR(private)) {
18603c8efc1SHerbert Xu module_put(type->owner);
18703c8efc1SHerbert Xu return PTR_ERR(private);
18803c8efc1SHerbert Xu }
18903c8efc1SHerbert Xu
190c840ac6aSHerbert Xu err = -EBUSY;
19103c8efc1SHerbert Xu lock_sock(sk);
19234c86f4cSHerbert Xu if (atomic_read(&ask->refcnt))
193c840ac6aSHerbert Xu goto unlock;
19403c8efc1SHerbert Xu
19503c8efc1SHerbert Xu swap(ask->type, type);
19603c8efc1SHerbert Xu swap(ask->private, private);
19703c8efc1SHerbert Xu
198c840ac6aSHerbert Xu err = 0;
199c840ac6aSHerbert Xu
200c840ac6aSHerbert Xu unlock:
20103c8efc1SHerbert Xu release_sock(sk);
20203c8efc1SHerbert Xu
20303c8efc1SHerbert Xu alg_do_release(type, private);
20403c8efc1SHerbert Xu
205c840ac6aSHerbert Xu return err;
20603c8efc1SHerbert Xu }
20703c8efc1SHerbert Xu
alg_setkey(struct sock * sk,sockptr_t ukey,unsigned int keylen)208a7b75c5aSChristoph Hellwig static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
20903c8efc1SHerbert Xu {
21003c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk);
21103c8efc1SHerbert Xu const struct af_alg_type *type = ask->type;
21203c8efc1SHerbert Xu u8 *key;
21303c8efc1SHerbert Xu int err;
21403c8efc1SHerbert Xu
21503c8efc1SHerbert Xu key = sock_kmalloc(sk, keylen, GFP_KERNEL);
21603c8efc1SHerbert Xu if (!key)
21703c8efc1SHerbert Xu return -ENOMEM;
21803c8efc1SHerbert Xu
21903c8efc1SHerbert Xu err = -EFAULT;
220a7b75c5aSChristoph Hellwig if (copy_from_sockptr(key, ukey, keylen))
22103c8efc1SHerbert Xu goto out;
22203c8efc1SHerbert Xu
22303c8efc1SHerbert Xu err = type->setkey(ask->private, key, keylen);
22403c8efc1SHerbert Xu
22503c8efc1SHerbert Xu out:
226ad202c8cSStephan Mueller sock_kzfree_s(sk, key, keylen);
22703c8efc1SHerbert Xu
22803c8efc1SHerbert Xu return err;
22903c8efc1SHerbert Xu }
23003c8efc1SHerbert Xu
2317984ceb1SFrederick Lawler #ifdef CONFIG_KEYS
2327984ceb1SFrederick Lawler
key_data_ptr_user(const struct key * key,unsigned int * datalen)2337984ceb1SFrederick Lawler static const u8 *key_data_ptr_user(const struct key *key,
2347984ceb1SFrederick Lawler unsigned int *datalen)
2357984ceb1SFrederick Lawler {
2367984ceb1SFrederick Lawler const struct user_key_payload *ukp;
2377984ceb1SFrederick Lawler
2387984ceb1SFrederick Lawler ukp = user_key_payload_locked(key);
2397984ceb1SFrederick Lawler if (IS_ERR_OR_NULL(ukp))
2407984ceb1SFrederick Lawler return ERR_PTR(-EKEYREVOKED);
2417984ceb1SFrederick Lawler
2427984ceb1SFrederick Lawler *datalen = key->datalen;
2437984ceb1SFrederick Lawler
2447984ceb1SFrederick Lawler return ukp->data;
2457984ceb1SFrederick Lawler }
2467984ceb1SFrederick Lawler
key_data_ptr_encrypted(const struct key * key,unsigned int * datalen)2477984ceb1SFrederick Lawler static const u8 *key_data_ptr_encrypted(const struct key *key,
2487984ceb1SFrederick Lawler unsigned int *datalen)
2497984ceb1SFrederick Lawler {
2507984ceb1SFrederick Lawler const struct encrypted_key_payload *ekp;
2517984ceb1SFrederick Lawler
2527984ceb1SFrederick Lawler ekp = dereference_key_locked(key);
2537984ceb1SFrederick Lawler if (IS_ERR_OR_NULL(ekp))
2547984ceb1SFrederick Lawler return ERR_PTR(-EKEYREVOKED);
2557984ceb1SFrederick Lawler
2567984ceb1SFrederick Lawler *datalen = ekp->decrypted_datalen;
2577984ceb1SFrederick Lawler
2587984ceb1SFrederick Lawler return ekp->decrypted_data;
2597984ceb1SFrederick Lawler }
2607984ceb1SFrederick Lawler
key_data_ptr_trusted(const struct key * key,unsigned int * datalen)2617984ceb1SFrederick Lawler static const u8 *key_data_ptr_trusted(const struct key *key,
2627984ceb1SFrederick Lawler unsigned int *datalen)
2637984ceb1SFrederick Lawler {
2647984ceb1SFrederick Lawler const struct trusted_key_payload *tkp;
2657984ceb1SFrederick Lawler
2667984ceb1SFrederick Lawler tkp = dereference_key_locked(key);
2677984ceb1SFrederick Lawler if (IS_ERR_OR_NULL(tkp))
2687984ceb1SFrederick Lawler return ERR_PTR(-EKEYREVOKED);
2697984ceb1SFrederick Lawler
2707984ceb1SFrederick Lawler *datalen = tkp->key_len;
2717984ceb1SFrederick Lawler
2727984ceb1SFrederick Lawler return tkp->key;
2737984ceb1SFrederick Lawler }
2747984ceb1SFrederick Lawler
lookup_key(key_serial_t serial)2757984ceb1SFrederick Lawler static struct key *lookup_key(key_serial_t serial)
2767984ceb1SFrederick Lawler {
2777984ceb1SFrederick Lawler key_ref_t key_ref;
2787984ceb1SFrederick Lawler
2797984ceb1SFrederick Lawler key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH);
2807984ceb1SFrederick Lawler if (IS_ERR(key_ref))
2817984ceb1SFrederick Lawler return ERR_CAST(key_ref);
2827984ceb1SFrederick Lawler
2837984ceb1SFrederick Lawler return key_ref_to_ptr(key_ref);
2847984ceb1SFrederick Lawler }
2857984ceb1SFrederick Lawler
alg_setkey_by_key_serial(struct alg_sock * ask,sockptr_t optval,unsigned int optlen)2867984ceb1SFrederick Lawler static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
2877984ceb1SFrederick Lawler unsigned int optlen)
2887984ceb1SFrederick Lawler {
2897984ceb1SFrederick Lawler const struct af_alg_type *type = ask->type;
2907984ceb1SFrederick Lawler u8 *key_data = NULL;
2917984ceb1SFrederick Lawler unsigned int key_datalen;
2927984ceb1SFrederick Lawler key_serial_t serial;
2937984ceb1SFrederick Lawler struct key *key;
2947984ceb1SFrederick Lawler const u8 *ret;
2957984ceb1SFrederick Lawler int err;
2967984ceb1SFrederick Lawler
2977984ceb1SFrederick Lawler if (optlen != sizeof(serial))
2987984ceb1SFrederick Lawler return -EINVAL;
2997984ceb1SFrederick Lawler
3007984ceb1SFrederick Lawler if (copy_from_sockptr(&serial, optval, optlen))
3017984ceb1SFrederick Lawler return -EFAULT;
3027984ceb1SFrederick Lawler
3037984ceb1SFrederick Lawler key = lookup_key(serial);
3047984ceb1SFrederick Lawler if (IS_ERR(key))
3057984ceb1SFrederick Lawler return PTR_ERR(key);
3067984ceb1SFrederick Lawler
3077984ceb1SFrederick Lawler down_read(&key->sem);
3087984ceb1SFrederick Lawler
3097984ceb1SFrederick Lawler ret = ERR_PTR(-ENOPROTOOPT);
3107984ceb1SFrederick Lawler if (!strcmp(key->type->name, "user") ||
3117984ceb1SFrederick Lawler !strcmp(key->type->name, "logon")) {
3127984ceb1SFrederick Lawler ret = key_data_ptr_user(key, &key_datalen);
3137984ceb1SFrederick Lawler } else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) &&
3147984ceb1SFrederick Lawler !strcmp(key->type->name, "encrypted")) {
3157984ceb1SFrederick Lawler ret = key_data_ptr_encrypted(key, &key_datalen);
3167984ceb1SFrederick Lawler } else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) &&
3177984ceb1SFrederick Lawler !strcmp(key->type->name, "trusted")) {
3187984ceb1SFrederick Lawler ret = key_data_ptr_trusted(key, &key_datalen);
3197984ceb1SFrederick Lawler }
3207984ceb1SFrederick Lawler
3217984ceb1SFrederick Lawler if (IS_ERR(ret)) {
3227984ceb1SFrederick Lawler up_read(&key->sem);
3236b4b53caSFrederick Lawler key_put(key);
3247984ceb1SFrederick Lawler return PTR_ERR(ret);
3257984ceb1SFrederick Lawler }
3267984ceb1SFrederick Lawler
3277984ceb1SFrederick Lawler key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
3287984ceb1SFrederick Lawler if (!key_data) {
3297984ceb1SFrederick Lawler up_read(&key->sem);
3306b4b53caSFrederick Lawler key_put(key);
3317984ceb1SFrederick Lawler return -ENOMEM;
3327984ceb1SFrederick Lawler }
3337984ceb1SFrederick Lawler
3347984ceb1SFrederick Lawler memcpy(key_data, ret, key_datalen);
3357984ceb1SFrederick Lawler
3367984ceb1SFrederick Lawler up_read(&key->sem);
3376b4b53caSFrederick Lawler key_put(key);
3387984ceb1SFrederick Lawler
3397984ceb1SFrederick Lawler err = type->setkey(ask->private, key_data, key_datalen);
3407984ceb1SFrederick Lawler
3417984ceb1SFrederick Lawler sock_kzfree_s(&ask->sk, key_data, key_datalen);
3427984ceb1SFrederick Lawler
3437984ceb1SFrederick Lawler return err;
3447984ceb1SFrederick Lawler }
3457984ceb1SFrederick Lawler
3467984ceb1SFrederick Lawler #else
3477984ceb1SFrederick Lawler
alg_setkey_by_key_serial(struct alg_sock * ask,sockptr_t optval,unsigned int optlen)3487984ceb1SFrederick Lawler static inline int alg_setkey_by_key_serial(struct alg_sock *ask,
3497984ceb1SFrederick Lawler sockptr_t optval,
3507984ceb1SFrederick Lawler unsigned int optlen)
3517984ceb1SFrederick Lawler {
3527984ceb1SFrederick Lawler return -ENOPROTOOPT;
3537984ceb1SFrederick Lawler }
3547984ceb1SFrederick Lawler
3557984ceb1SFrederick Lawler #endif
3567984ceb1SFrederick Lawler
alg_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)35703c8efc1SHerbert Xu static int alg_setsockopt(struct socket *sock, int level, int optname,
358a7b75c5aSChristoph Hellwig sockptr_t optval, unsigned int optlen)
35903c8efc1SHerbert Xu {
36003c8efc1SHerbert Xu struct sock *sk = sock->sk;
36103c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk);
36203c8efc1SHerbert Xu const struct af_alg_type *type;
363c840ac6aSHerbert Xu int err = -EBUSY;
36403c8efc1SHerbert Xu
36503c8efc1SHerbert Xu lock_sock(sk);
36634c86f4cSHerbert Xu if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
367c840ac6aSHerbert Xu goto unlock;
368c840ac6aSHerbert Xu
36903c8efc1SHerbert Xu type = ask->type;
37003c8efc1SHerbert Xu
371c840ac6aSHerbert Xu err = -ENOPROTOOPT;
37203c8efc1SHerbert Xu if (level != SOL_ALG || !type)
37303c8efc1SHerbert Xu goto unlock;
37403c8efc1SHerbert Xu
37503c8efc1SHerbert Xu switch (optname) {
37603c8efc1SHerbert Xu case ALG_SET_KEY:
3777984ceb1SFrederick Lawler case ALG_SET_KEY_BY_KEY_SERIAL:
37803c8efc1SHerbert Xu if (sock->state == SS_CONNECTED)
37903c8efc1SHerbert Xu goto unlock;
38003c8efc1SHerbert Xu if (!type->setkey)
38103c8efc1SHerbert Xu goto unlock;
38203c8efc1SHerbert Xu
3837984ceb1SFrederick Lawler if (optname == ALG_SET_KEY_BY_KEY_SERIAL)
3847984ceb1SFrederick Lawler err = alg_setkey_by_key_serial(ask, optval, optlen);
3857984ceb1SFrederick Lawler else
38603c8efc1SHerbert Xu err = alg_setkey(sk, optval, optlen);
38725fb8638SStephan Mueller break;
38825fb8638SStephan Mueller case ALG_SET_AEAD_AUTHSIZE:
38925fb8638SStephan Mueller if (sock->state == SS_CONNECTED)
39025fb8638SStephan Mueller goto unlock;
39125fb8638SStephan Mueller if (!type->setauthsize)
39225fb8638SStephan Mueller goto unlock;
39325fb8638SStephan Mueller err = type->setauthsize(ask->private, optlen);
39477ebdabeSElena Petrova break;
39577ebdabeSElena Petrova case ALG_SET_DRBG_ENTROPY:
39677ebdabeSElena Petrova if (sock->state == SS_CONNECTED)
39777ebdabeSElena Petrova goto unlock;
39877ebdabeSElena Petrova if (!type->setentropy)
39977ebdabeSElena Petrova goto unlock;
40077ebdabeSElena Petrova
40177ebdabeSElena Petrova err = type->setentropy(ask->private, optval, optlen);
40203c8efc1SHerbert Xu }
40303c8efc1SHerbert Xu
40403c8efc1SHerbert Xu unlock:
40503c8efc1SHerbert Xu release_sock(sk);
40603c8efc1SHerbert Xu
40703c8efc1SHerbert Xu return err;
40803c8efc1SHerbert Xu }
40903c8efc1SHerbert Xu
af_alg_accept(struct sock * sk,struct socket * newsock,struct proto_accept_arg * arg)41092ef0fd5SJens Axboe int af_alg_accept(struct sock *sk, struct socket *newsock,
41192ef0fd5SJens Axboe struct proto_accept_arg *arg)
41203c8efc1SHerbert Xu {
41303c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk);
41403c8efc1SHerbert Xu const struct af_alg_type *type;
41503c8efc1SHerbert Xu struct sock *sk2;
4166a935170SHerbert Xu unsigned int nokey;
41703c8efc1SHerbert Xu int err;
41803c8efc1SHerbert Xu
41903c8efc1SHerbert Xu lock_sock(sk);
42003c8efc1SHerbert Xu type = ask->type;
42103c8efc1SHerbert Xu
42203c8efc1SHerbert Xu err = -EINVAL;
42303c8efc1SHerbert Xu if (!type)
42403c8efc1SHerbert Xu goto unlock;
42503c8efc1SHerbert Xu
42692ef0fd5SJens Axboe sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern);
42703c8efc1SHerbert Xu err = -ENOMEM;
42803c8efc1SHerbert Xu if (!sk2)
42903c8efc1SHerbert Xu goto unlock;
43003c8efc1SHerbert Xu
43103c8efc1SHerbert Xu sock_init_data(newsock, sk2);
4322acce6aaSHerbert Xu security_sock_graft(sk2, newsock);
4334c63f83cSMilan Broz security_sk_clone(sk, sk2);
43403c8efc1SHerbert Xu
43577ebdabeSElena Petrova /*
43677ebdabeSElena Petrova * newsock->ops assigned here to allow type->accept call to override
43777ebdabeSElena Petrova * them when required.
43877ebdabeSElena Petrova */
43977ebdabeSElena Petrova newsock->ops = type->ops;
44003c8efc1SHerbert Xu err = type->accept(ask->private, sk2);
44137766586SHerbert Xu
44237766586SHerbert Xu nokey = err == -ENOKEY;
44337766586SHerbert Xu if (nokey && type->accept_nokey)
44437766586SHerbert Xu err = type->accept_nokey(ask->private, sk2);
44537766586SHerbert Xu
446a383292cSHerbert Xu if (err)
44703c8efc1SHerbert Xu goto unlock;
44803c8efc1SHerbert Xu
44934c86f4cSHerbert Xu if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
45003c8efc1SHerbert Xu sock_hold(sk);
45134c86f4cSHerbert Xu if (nokey) {
45234c86f4cSHerbert Xu atomic_inc(&ask->nokey_refcnt);
45334c86f4cSHerbert Xu atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
45434c86f4cSHerbert Xu }
45503c8efc1SHerbert Xu alg_sk(sk2)->parent = sk;
45603c8efc1SHerbert Xu alg_sk(sk2)->type = type;
45703c8efc1SHerbert Xu
45803c8efc1SHerbert Xu newsock->state = SS_CONNECTED;
45903c8efc1SHerbert Xu
46037766586SHerbert Xu if (nokey)
46137766586SHerbert Xu newsock->ops = type->ops_nokey;
46237766586SHerbert Xu
46303c8efc1SHerbert Xu err = 0;
46403c8efc1SHerbert Xu
46503c8efc1SHerbert Xu unlock:
46603c8efc1SHerbert Xu release_sock(sk);
46703c8efc1SHerbert Xu
46803c8efc1SHerbert Xu return err;
46903c8efc1SHerbert Xu }
47003c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_accept);
47103c8efc1SHerbert Xu
alg_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)47292ef0fd5SJens Axboe static int alg_accept(struct socket *sock, struct socket *newsock,
47392ef0fd5SJens Axboe struct proto_accept_arg *arg)
47403c8efc1SHerbert Xu {
47592ef0fd5SJens Axboe return af_alg_accept(sock->sk, newsock, arg);
47603c8efc1SHerbert Xu }
47703c8efc1SHerbert Xu
47803c8efc1SHerbert Xu static const struct proto_ops alg_proto_ops = {
47903c8efc1SHerbert Xu .family = PF_ALG,
48003c8efc1SHerbert Xu .owner = THIS_MODULE,
48103c8efc1SHerbert Xu
48203c8efc1SHerbert Xu .connect = sock_no_connect,
48303c8efc1SHerbert Xu .socketpair = sock_no_socketpair,
48403c8efc1SHerbert Xu .getname = sock_no_getname,
48503c8efc1SHerbert Xu .ioctl = sock_no_ioctl,
48603c8efc1SHerbert Xu .listen = sock_no_listen,
48703c8efc1SHerbert Xu .shutdown = sock_no_shutdown,
48803c8efc1SHerbert Xu .mmap = sock_no_mmap,
48903c8efc1SHerbert Xu .sendmsg = sock_no_sendmsg,
49003c8efc1SHerbert Xu .recvmsg = sock_no_recvmsg,
49103c8efc1SHerbert Xu
49203c8efc1SHerbert Xu .bind = alg_bind,
49303c8efc1SHerbert Xu .release = af_alg_release,
49403c8efc1SHerbert Xu .setsockopt = alg_setsockopt,
49503c8efc1SHerbert Xu .accept = alg_accept,
49603c8efc1SHerbert Xu };
49703c8efc1SHerbert Xu
alg_sock_destruct(struct sock * sk)49803c8efc1SHerbert Xu static void alg_sock_destruct(struct sock *sk)
49903c8efc1SHerbert Xu {
50003c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk);
50103c8efc1SHerbert Xu
50203c8efc1SHerbert Xu alg_do_release(ask->type, ask->private);
50303c8efc1SHerbert Xu }
50403c8efc1SHerbert Xu
alg_create(struct net * net,struct socket * sock,int protocol,int kern)50503c8efc1SHerbert Xu static int alg_create(struct net *net, struct socket *sock, int protocol,
50603c8efc1SHerbert Xu int kern)
50703c8efc1SHerbert Xu {
50803c8efc1SHerbert Xu struct sock *sk;
50903c8efc1SHerbert Xu int err;
51003c8efc1SHerbert Xu
51103c8efc1SHerbert Xu if (sock->type != SOCK_SEQPACKET)
51203c8efc1SHerbert Xu return -ESOCKTNOSUPPORT;
51303c8efc1SHerbert Xu if (protocol != 0)
51403c8efc1SHerbert Xu return -EPROTONOSUPPORT;
51503c8efc1SHerbert Xu
51603c8efc1SHerbert Xu err = -ENOMEM;
51711aa9c28SEric W. Biederman sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
51803c8efc1SHerbert Xu if (!sk)
51903c8efc1SHerbert Xu goto out;
52003c8efc1SHerbert Xu
52103c8efc1SHerbert Xu sock->ops = &alg_proto_ops;
52203c8efc1SHerbert Xu sock_init_data(sock, sk);
52303c8efc1SHerbert Xu
52403c8efc1SHerbert Xu sk->sk_destruct = alg_sock_destruct;
52503c8efc1SHerbert Xu
52603c8efc1SHerbert Xu return 0;
52703c8efc1SHerbert Xu out:
52803c8efc1SHerbert Xu return err;
52903c8efc1SHerbert Xu }
53003c8efc1SHerbert Xu
53103c8efc1SHerbert Xu static const struct net_proto_family alg_family = {
53203c8efc1SHerbert Xu .family = PF_ALG,
53303c8efc1SHerbert Xu .create = alg_create,
53403c8efc1SHerbert Xu .owner = THIS_MODULE,
53503c8efc1SHerbert Xu };
53603c8efc1SHerbert Xu
af_alg_link_sg(struct af_alg_sgl * sgl_prev,struct af_alg_sgl * sgl_new)537466e0759SEric Biggers static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
538466e0759SEric Biggers struct af_alg_sgl *sgl_new)
53966db3739STadeusz Struk {
540c1abe6f5SDavid Howells sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
541c1abe6f5SDavid Howells sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
54266db3739STadeusz Struk }
54366db3739STadeusz Struk
af_alg_free_sg(struct af_alg_sgl * sgl)54403c8efc1SHerbert Xu void af_alg_free_sg(struct af_alg_sgl *sgl)
54503c8efc1SHerbert Xu {
54603c8efc1SHerbert Xu int i;
54703c8efc1SHerbert Xu
548c662b043SDavid Howells if (sgl->sgt.sgl) {
549f9e7a5faSDavid Howells if (sgl->need_unpin)
550c1abe6f5SDavid Howells for (i = 0; i < sgl->sgt.nents; i++)
551c1abe6f5SDavid Howells unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
552c662b043SDavid Howells if (sgl->sgt.sgl != sgl->sgl)
553c662b043SDavid Howells kvfree(sgl->sgt.sgl);
554c662b043SDavid Howells sgl->sgt.sgl = NULL;
555c662b043SDavid Howells }
55603c8efc1SHerbert Xu }
55703c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_free_sg);
55803c8efc1SHerbert Xu
af_alg_cmsg_send(struct msghdr * msg,struct af_alg_control * con)559466e0759SEric Biggers static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
56003c8efc1SHerbert Xu {
56103c8efc1SHerbert Xu struct cmsghdr *cmsg;
56203c8efc1SHerbert Xu
563f95b414eSGu Zheng for_each_cmsghdr(cmsg, msg) {
56403c8efc1SHerbert Xu if (!CMSG_OK(msg, cmsg))
56503c8efc1SHerbert Xu return -EINVAL;
56603c8efc1SHerbert Xu if (cmsg->cmsg_level != SOL_ALG)
56703c8efc1SHerbert Xu continue;
56803c8efc1SHerbert Xu
56903c8efc1SHerbert Xu switch (cmsg->cmsg_type) {
57003c8efc1SHerbert Xu case ALG_SET_IV:
57103c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
57203c8efc1SHerbert Xu return -EINVAL;
57303c8efc1SHerbert Xu con->iv = (void *)CMSG_DATA(cmsg);
57403c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
57503c8efc1SHerbert Xu sizeof(*con->iv)))
57603c8efc1SHerbert Xu return -EINVAL;
57703c8efc1SHerbert Xu break;
57803c8efc1SHerbert Xu
57903c8efc1SHerbert Xu case ALG_SET_OP:
58003c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
58103c8efc1SHerbert Xu return -EINVAL;
58203c8efc1SHerbert Xu con->op = *(u32 *)CMSG_DATA(cmsg);
58303c8efc1SHerbert Xu break;
58403c8efc1SHerbert Xu
585af8e8073SStephan Mueller case ALG_SET_AEAD_ASSOCLEN:
586af8e8073SStephan Mueller if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
587af8e8073SStephan Mueller return -EINVAL;
588af8e8073SStephan Mueller con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
589af8e8073SStephan Mueller break;
590af8e8073SStephan Mueller
59103c8efc1SHerbert Xu default:
59203c8efc1SHerbert Xu return -EINVAL;
59303c8efc1SHerbert Xu }
59403c8efc1SHerbert Xu }
59503c8efc1SHerbert Xu
59603c8efc1SHerbert Xu return 0;
59703c8efc1SHerbert Xu }
59803c8efc1SHerbert Xu
5992d97591eSStephan Mueller /**
6002d97591eSStephan Mueller * af_alg_alloc_tsgl - allocate the TX SGL
6012d97591eSStephan Mueller *
602b2a4411aSRandy Dunlap * @sk: socket of connection to user space
603b2a4411aSRandy Dunlap * Return: 0 upon success, < 0 upon error
6042d97591eSStephan Mueller */
af_alg_alloc_tsgl(struct sock * sk)605466e0759SEric Biggers static int af_alg_alloc_tsgl(struct sock *sk)
6062d97591eSStephan Mueller {
6072d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
6082d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
6092d97591eSStephan Mueller struct af_alg_tsgl *sgl;
6102d97591eSStephan Mueller struct scatterlist *sg = NULL;
6112d97591eSStephan Mueller
6122d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
6132d97591eSStephan Mueller if (!list_empty(&ctx->tsgl_list))
6142d97591eSStephan Mueller sg = sgl->sg;
6152d97591eSStephan Mueller
6162d97591eSStephan Mueller if (!sg || sgl->cur >= MAX_SGL_ENTS) {
6170ed2dd03SKees Cook sgl = sock_kmalloc(sk,
6180ed2dd03SKees Cook struct_size(sgl, sg, (MAX_SGL_ENTS + 1)),
6192d97591eSStephan Mueller GFP_KERNEL);
6202d97591eSStephan Mueller if (!sgl)
6212d97591eSStephan Mueller return -ENOMEM;
6222d97591eSStephan Mueller
6232d97591eSStephan Mueller sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
6242d97591eSStephan Mueller sgl->cur = 0;
6252d97591eSStephan Mueller
6262d97591eSStephan Mueller if (sg)
6272d97591eSStephan Mueller sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
6282d97591eSStephan Mueller
6292d97591eSStephan Mueller list_add_tail(&sgl->list, &ctx->tsgl_list);
6302d97591eSStephan Mueller }
6312d97591eSStephan Mueller
6322d97591eSStephan Mueller return 0;
6332d97591eSStephan Mueller }
6342d97591eSStephan Mueller
6352d97591eSStephan Mueller /**
636b2a4411aSRandy Dunlap * af_alg_count_tsgl - Count number of TX SG entries
6372d97591eSStephan Mueller *
6382d97591eSStephan Mueller * The counting starts from the beginning of the SGL to @bytes. If
639b2a4411aSRandy Dunlap * an @offset is provided, the counting of the SG entries starts at the @offset.
6402d97591eSStephan Mueller *
641b2a4411aSRandy Dunlap * @sk: socket of connection to user space
642b2a4411aSRandy Dunlap * @bytes: Count the number of SG entries holding given number of bytes.
643b2a4411aSRandy Dunlap * @offset: Start the counting of SG entries from the given offset.
644b2a4411aSRandy Dunlap * Return: Number of TX SG entries found given the constraints
6452d97591eSStephan Mueller */
af_alg_count_tsgl(struct sock * sk,size_t bytes,size_t offset)6462d97591eSStephan Mueller unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
6472d97591eSStephan Mueller {
6487c39edfbSEric Biggers const struct alg_sock *ask = alg_sk(sk);
6497c39edfbSEric Biggers const struct af_alg_ctx *ctx = ask->private;
6507c39edfbSEric Biggers const struct af_alg_tsgl *sgl;
6512d97591eSStephan Mueller unsigned int i;
6522d97591eSStephan Mueller unsigned int sgl_count = 0;
6532d97591eSStephan Mueller
6542d97591eSStephan Mueller if (!bytes)
6552d97591eSStephan Mueller return 0;
6562d97591eSStephan Mueller
6577c39edfbSEric Biggers list_for_each_entry(sgl, &ctx->tsgl_list, list) {
6587c39edfbSEric Biggers const struct scatterlist *sg = sgl->sg;
6592d97591eSStephan Mueller
6602d97591eSStephan Mueller for (i = 0; i < sgl->cur; i++) {
6612d97591eSStephan Mueller size_t bytes_count;
6622d97591eSStephan Mueller
6632d97591eSStephan Mueller /* Skip offset */
6642d97591eSStephan Mueller if (offset >= sg[i].length) {
6652d97591eSStephan Mueller offset -= sg[i].length;
6662d97591eSStephan Mueller bytes -= sg[i].length;
6672d97591eSStephan Mueller continue;
6682d97591eSStephan Mueller }
6692d97591eSStephan Mueller
6702d97591eSStephan Mueller bytes_count = sg[i].length - offset;
6712d97591eSStephan Mueller
6722d97591eSStephan Mueller offset = 0;
6732d97591eSStephan Mueller sgl_count++;
6742d97591eSStephan Mueller
6752d97591eSStephan Mueller /* If we have seen requested number of bytes, stop */
6762d97591eSStephan Mueller if (bytes_count >= bytes)
6772d97591eSStephan Mueller return sgl_count;
6782d97591eSStephan Mueller
6792d97591eSStephan Mueller bytes -= bytes_count;
6802d97591eSStephan Mueller }
6812d97591eSStephan Mueller }
6822d97591eSStephan Mueller
6832d97591eSStephan Mueller return sgl_count;
6842d97591eSStephan Mueller }
6852d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
6862d97591eSStephan Mueller
6872d97591eSStephan Mueller /**
688b2a4411aSRandy Dunlap * af_alg_pull_tsgl - Release the specified buffers from TX SGL
6892d97591eSStephan Mueller *
690b2a4411aSRandy Dunlap * If @dst is non-null, reassign the pages to @dst. The caller must release
6912d97591eSStephan Mueller * the pages. If @dst_offset is given only reassign the pages to @dst starting
6922d97591eSStephan Mueller * at the @dst_offset (byte). The caller must ensure that @dst is large
6932d97591eSStephan Mueller * enough (e.g. by using af_alg_count_tsgl with the same offset).
6942d97591eSStephan Mueller *
695b2a4411aSRandy Dunlap * @sk: socket of connection to user space
696b2a4411aSRandy Dunlap * @used: Number of bytes to pull from TX SGL
697b2a4411aSRandy Dunlap * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
6982d97591eSStephan Mueller * caller must release the buffers in dst.
699b2a4411aSRandy Dunlap * @dst_offset: Reassign the TX SGL from given offset. All buffers before
7002d97591eSStephan Mueller * reaching the offset is released.
7012d97591eSStephan Mueller */
af_alg_pull_tsgl(struct sock * sk,size_t used,struct scatterlist * dst,size_t dst_offset)7022d97591eSStephan Mueller void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
7032d97591eSStephan Mueller size_t dst_offset)
7042d97591eSStephan Mueller {
7052d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
7062d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
7072d97591eSStephan Mueller struct af_alg_tsgl *sgl;
7082d97591eSStephan Mueller struct scatterlist *sg;
709e117765aSStephan Mueller unsigned int i, j = 0;
7102d97591eSStephan Mueller
7112d97591eSStephan Mueller while (!list_empty(&ctx->tsgl_list)) {
7122d97591eSStephan Mueller sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
7132d97591eSStephan Mueller list);
7142d97591eSStephan Mueller sg = sgl->sg;
7152d97591eSStephan Mueller
716e117765aSStephan Mueller for (i = 0; i < sgl->cur; i++) {
7172d97591eSStephan Mueller size_t plen = min_t(size_t, used, sg[i].length);
7182d97591eSStephan Mueller struct page *page = sg_page(sg + i);
7192d97591eSStephan Mueller
7202d97591eSStephan Mueller if (!page)
7212d97591eSStephan Mueller continue;
7222d97591eSStephan Mueller
7232d97591eSStephan Mueller /*
7242d97591eSStephan Mueller * Assumption: caller created af_alg_count_tsgl(len)
7252d97591eSStephan Mueller * SG entries in dst.
7262d97591eSStephan Mueller */
7272d97591eSStephan Mueller if (dst) {
7282d97591eSStephan Mueller if (dst_offset >= plen) {
7292d97591eSStephan Mueller /* discard page before offset */
7302d97591eSStephan Mueller dst_offset -= plen;
7312d97591eSStephan Mueller } else {
7322d97591eSStephan Mueller /* reassign page to dst after offset */
7332d45a7e8SStephan Mueller get_page(page);
7342d97591eSStephan Mueller sg_set_page(dst + j, page,
7352d97591eSStephan Mueller plen - dst_offset,
7362d97591eSStephan Mueller sg[i].offset + dst_offset);
7372d97591eSStephan Mueller dst_offset = 0;
7382d97591eSStephan Mueller j++;
7392d97591eSStephan Mueller }
7402d97591eSStephan Mueller }
7412d97591eSStephan Mueller
7422d97591eSStephan Mueller sg[i].length -= plen;
7432d97591eSStephan Mueller sg[i].offset += plen;
7442d97591eSStephan Mueller
7452d97591eSStephan Mueller used -= plen;
7462d97591eSStephan Mueller ctx->used -= plen;
7472d97591eSStephan Mueller
7482d97591eSStephan Mueller if (sg[i].length)
7492d97591eSStephan Mueller return;
7502d97591eSStephan Mueller
7512d97591eSStephan Mueller put_page(page);
7522d97591eSStephan Mueller sg_assign_page(sg + i, NULL);
7532d97591eSStephan Mueller }
7542d97591eSStephan Mueller
7552d97591eSStephan Mueller list_del(&sgl->list);
75691e14842SGustavo A. R. Silva sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
7572d97591eSStephan Mueller }
7582d97591eSStephan Mueller
7592d97591eSStephan Mueller if (!ctx->used)
7602d97591eSStephan Mueller ctx->merge = 0;
761f3c802a1SHerbert Xu ctx->init = ctx->more;
7622d97591eSStephan Mueller }
7632d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
7642d97591eSStephan Mueller
7652d97591eSStephan Mueller /**
7662d97591eSStephan Mueller * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
7672d97591eSStephan Mueller *
768b2a4411aSRandy Dunlap * @areq: Request holding the TX and RX SGL
7692d97591eSStephan Mueller */
af_alg_free_areq_sgls(struct af_alg_async_req * areq)770466e0759SEric Biggers static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
7712d97591eSStephan Mueller {
7722d97591eSStephan Mueller struct sock *sk = areq->sk;
7732d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
7742d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
7752d97591eSStephan Mueller struct af_alg_rsgl *rsgl, *tmp;
7762d97591eSStephan Mueller struct scatterlist *tsgl;
7772d97591eSStephan Mueller struct scatterlist *sg;
7782d97591eSStephan Mueller unsigned int i;
7792d97591eSStephan Mueller
7802d97591eSStephan Mueller list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
781af955bf1SJonathan Cameron atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
7822d97591eSStephan Mueller af_alg_free_sg(&rsgl->sgl);
7832d97591eSStephan Mueller list_del(&rsgl->list);
7842d97591eSStephan Mueller if (rsgl != &areq->first_rsgl)
7852d97591eSStephan Mueller sock_kfree_s(sk, rsgl, sizeof(*rsgl));
7862d97591eSStephan Mueller }
7872d97591eSStephan Mueller
7882d97591eSStephan Mueller tsgl = areq->tsgl;
789887207edSEric Biggers if (tsgl) {
7902d97591eSStephan Mueller for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
7912d97591eSStephan Mueller if (!sg_page(sg))
7922d97591eSStephan Mueller continue;
7932d97591eSStephan Mueller put_page(sg_page(sg));
7942d97591eSStephan Mueller }
7952d97591eSStephan Mueller
7962d97591eSStephan Mueller sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
7972d97591eSStephan Mueller }
798887207edSEric Biggers }
7992d97591eSStephan Mueller
8002d97591eSStephan Mueller /**
8012d97591eSStephan Mueller * af_alg_wait_for_wmem - wait for availability of writable memory
8022d97591eSStephan Mueller *
803b2a4411aSRandy Dunlap * @sk: socket of connection to user space
804b2a4411aSRandy Dunlap * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
805b2a4411aSRandy Dunlap * Return: 0 when writable memory is available, < 0 upon error
8062d97591eSStephan Mueller */
af_alg_wait_for_wmem(struct sock * sk,unsigned int flags)807466e0759SEric Biggers static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
8082d97591eSStephan Mueller {
8092d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function);
8102d97591eSStephan Mueller int err = -ERESTARTSYS;
8112d97591eSStephan Mueller long timeout;
8122d97591eSStephan Mueller
8132d97591eSStephan Mueller if (flags & MSG_DONTWAIT)
8142d97591eSStephan Mueller return -EAGAIN;
8152d97591eSStephan Mueller
8162d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
8172d97591eSStephan Mueller
8182d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait);
8192d97591eSStephan Mueller for (;;) {
8202d97591eSStephan Mueller if (signal_pending(current))
8212d97591eSStephan Mueller break;
8222d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT;
8232d97591eSStephan Mueller if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
8242d97591eSStephan Mueller err = 0;
8252d97591eSStephan Mueller break;
8262d97591eSStephan Mueller }
8272d97591eSStephan Mueller }
8282d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait);
8292d97591eSStephan Mueller
8302d97591eSStephan Mueller return err;
8312d97591eSStephan Mueller }
8322d97591eSStephan Mueller
8332d97591eSStephan Mueller /**
8342d97591eSStephan Mueller * af_alg_wmem_wakeup - wakeup caller when writable memory is available
8352d97591eSStephan Mueller *
836b2a4411aSRandy Dunlap * @sk: socket of connection to user space
8372d97591eSStephan Mueller */
af_alg_wmem_wakeup(struct sock * sk)8382d97591eSStephan Mueller void af_alg_wmem_wakeup(struct sock *sk)
8392d97591eSStephan Mueller {
8402d97591eSStephan Mueller struct socket_wq *wq;
8412d97591eSStephan Mueller
8422d97591eSStephan Mueller if (!af_alg_writable(sk))
8432d97591eSStephan Mueller return;
8442d97591eSStephan Mueller
8452d97591eSStephan Mueller rcu_read_lock();
8462d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq);
8472d97591eSStephan Mueller if (skwq_has_sleeper(wq))
848a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
849a9a08845SLinus Torvalds EPOLLRDNORM |
850a9a08845SLinus Torvalds EPOLLRDBAND);
8511abe267fSEric Dumazet sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
8522d97591eSStephan Mueller rcu_read_unlock();
8532d97591eSStephan Mueller }
8542d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
8552d97591eSStephan Mueller
8562d97591eSStephan Mueller /**
8572d97591eSStephan Mueller * af_alg_wait_for_data - wait for availability of TX data
8582d97591eSStephan Mueller *
859b2a4411aSRandy Dunlap * @sk: socket of connection to user space
860b2a4411aSRandy Dunlap * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
861b2a4411aSRandy Dunlap * @min: Set to minimum request size if partial requests are allowed.
862b2a4411aSRandy Dunlap * Return: 0 when writable memory is available, < 0 upon error
8632d97591eSStephan Mueller */
af_alg_wait_for_data(struct sock * sk,unsigned flags,unsigned min)864f3c802a1SHerbert Xu int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
8652d97591eSStephan Mueller {
8662d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function);
8672d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
8682d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
8692d97591eSStephan Mueller long timeout;
8702d97591eSStephan Mueller int err = -ERESTARTSYS;
8712d97591eSStephan Mueller
8722d97591eSStephan Mueller if (flags & MSG_DONTWAIT)
8732d97591eSStephan Mueller return -EAGAIN;
8742d97591eSStephan Mueller
8752d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
8762d97591eSStephan Mueller
8772d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait);
8782d97591eSStephan Mueller for (;;) {
8792d97591eSStephan Mueller if (signal_pending(current))
8802d97591eSStephan Mueller break;
8812d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT;
882f3c802a1SHerbert Xu if (sk_wait_event(sk, &timeout,
883f3c802a1SHerbert Xu ctx->init && (!ctx->more ||
884f3c802a1SHerbert Xu (min && ctx->used >= min)),
8852d97591eSStephan Mueller &wait)) {
8862d97591eSStephan Mueller err = 0;
8872d97591eSStephan Mueller break;
8882d97591eSStephan Mueller }
8892d97591eSStephan Mueller }
8902d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait);
8912d97591eSStephan Mueller
8922d97591eSStephan Mueller sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
8932d97591eSStephan Mueller
8942d97591eSStephan Mueller return err;
8952d97591eSStephan Mueller }
8962d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
8972d97591eSStephan Mueller
8982d97591eSStephan Mueller /**
8992d97591eSStephan Mueller * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
9002d97591eSStephan Mueller *
901b2a4411aSRandy Dunlap * @sk: socket of connection to user space
9022d97591eSStephan Mueller */
af_alg_data_wakeup(struct sock * sk)903466e0759SEric Biggers static void af_alg_data_wakeup(struct sock *sk)
9042d97591eSStephan Mueller {
9052d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
9062d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
9072d97591eSStephan Mueller struct socket_wq *wq;
9082d97591eSStephan Mueller
9092d97591eSStephan Mueller if (!ctx->used)
9102d97591eSStephan Mueller return;
9112d97591eSStephan Mueller
9122d97591eSStephan Mueller rcu_read_lock();
9132d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq);
9142d97591eSStephan Mueller if (skwq_has_sleeper(wq))
915a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
916a9a08845SLinus Torvalds EPOLLRDNORM |
917a9a08845SLinus Torvalds EPOLLRDBAND);
9181abe267fSEric Dumazet sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
9192d97591eSStephan Mueller rcu_read_unlock();
9202d97591eSStephan Mueller }
9212d97591eSStephan Mueller
9222d97591eSStephan Mueller /**
9232d97591eSStephan Mueller * af_alg_sendmsg - implementation of sendmsg system call handler
9242d97591eSStephan Mueller *
9252d97591eSStephan Mueller * The sendmsg system call handler obtains the user data and stores it
9262d97591eSStephan Mueller * in ctx->tsgl_list. This implies allocation of the required numbers of
9272d97591eSStephan Mueller * struct af_alg_tsgl.
9282d97591eSStephan Mueller *
9292d97591eSStephan Mueller * In addition, the ctx is filled with the information sent via CMSG.
9302d97591eSStephan Mueller *
931b2a4411aSRandy Dunlap * @sock: socket of connection to user space
932b2a4411aSRandy Dunlap * @msg: message from user space
933b2a4411aSRandy Dunlap * @size: size of message from user space
934b2a4411aSRandy Dunlap * @ivsize: the size of the IV for the cipher operation to verify that the
9352d97591eSStephan Mueller * user-space-provided IV has the right size
936b2a4411aSRandy Dunlap * Return: the number of copied data upon success, < 0 upon error
9372d97591eSStephan Mueller */
af_alg_sendmsg(struct socket * sock,struct msghdr * msg,size_t size,unsigned int ivsize)9382d97591eSStephan Mueller int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
9392d97591eSStephan Mueller unsigned int ivsize)
9402d97591eSStephan Mueller {
9412d97591eSStephan Mueller struct sock *sk = sock->sk;
9422d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
9432d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
9442d97591eSStephan Mueller struct af_alg_tsgl *sgl;
9452d97591eSStephan Mueller struct af_alg_control con = {};
9462d97591eSStephan Mueller long copied = 0;
947fcb90d51SLothar Rubusch bool enc = false;
948fcb90d51SLothar Rubusch bool init = false;
9492d97591eSStephan Mueller int err = 0;
9502d97591eSStephan Mueller
9512d97591eSStephan Mueller if (msg->msg_controllen) {
9522d97591eSStephan Mueller err = af_alg_cmsg_send(msg, &con);
9532d97591eSStephan Mueller if (err)
9542d97591eSStephan Mueller return err;
9552d97591eSStephan Mueller
956fcb90d51SLothar Rubusch init = true;
9572d97591eSStephan Mueller switch (con.op) {
9582d97591eSStephan Mueller case ALG_OP_ENCRYPT:
959fcb90d51SLothar Rubusch enc = true;
9602d97591eSStephan Mueller break;
9612d97591eSStephan Mueller case ALG_OP_DECRYPT:
962fcb90d51SLothar Rubusch enc = false;
9632d97591eSStephan Mueller break;
9642d97591eSStephan Mueller default:
9652d97591eSStephan Mueller return -EINVAL;
9662d97591eSStephan Mueller }
9672d97591eSStephan Mueller
9682d97591eSStephan Mueller if (con.iv && con.iv->ivlen != ivsize)
9692d97591eSStephan Mueller return -EINVAL;
9702d97591eSStephan Mueller }
9712d97591eSStephan Mueller
9722d97591eSStephan Mueller lock_sock(sk);
973c195d66aSHerbert Xu if (ctx->init && !ctx->more) {
974c195d66aSHerbert Xu if (ctx->used) {
9752d97591eSStephan Mueller err = -EINVAL;
9762d97591eSStephan Mueller goto unlock;
9772d97591eSStephan Mueller }
978c195d66aSHerbert Xu
979c195d66aSHerbert Xu pr_info_once(
980c195d66aSHerbert Xu "%s sent an empty control message without MSG_MORE.\n",
981c195d66aSHerbert Xu current->comm);
982c195d66aSHerbert Xu }
983662bb52fSHerbert Xu ctx->init = true;
9842d97591eSStephan Mueller
9852d97591eSStephan Mueller if (init) {
9862d97591eSStephan Mueller ctx->enc = enc;
9872d97591eSStephan Mueller if (con.iv)
9882d97591eSStephan Mueller memcpy(ctx->iv, con.iv->iv, ivsize);
9892d97591eSStephan Mueller
9902d97591eSStephan Mueller ctx->aead_assoclen = con.aead_assoclen;
9912d97591eSStephan Mueller }
9922d97591eSStephan Mueller
9932d97591eSStephan Mueller while (size) {
9942d97591eSStephan Mueller struct scatterlist *sg;
9952d97591eSStephan Mueller size_t len = size;
996bf63e250SDavid Howells ssize_t plen;
9972d97591eSStephan Mueller
9982d97591eSStephan Mueller /* use the existing memory in an allocated page */
999d3dccb0aSDavid Howells if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
10002d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev,
10012d97591eSStephan Mueller struct af_alg_tsgl, list);
10022d97591eSStephan Mueller sg = sgl->sg + sgl->cur - 1;
10032d97591eSStephan Mueller len = min_t(size_t, len,
10042d97591eSStephan Mueller PAGE_SIZE - sg->offset - sg->length);
10052d97591eSStephan Mueller
10062d97591eSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg)) +
10072d97591eSStephan Mueller sg->offset + sg->length,
10082d97591eSStephan Mueller msg, len);
10092d97591eSStephan Mueller if (err)
10102d97591eSStephan Mueller goto unlock;
10112d97591eSStephan Mueller
10122d97591eSStephan Mueller sg->length += len;
10132d97591eSStephan Mueller ctx->merge = (sg->offset + sg->length) &
10142d97591eSStephan Mueller (PAGE_SIZE - 1);
10152d97591eSStephan Mueller
10162d97591eSStephan Mueller ctx->used += len;
10172d97591eSStephan Mueller copied += len;
10182d97591eSStephan Mueller size -= len;
10192d97591eSStephan Mueller continue;
10202d97591eSStephan Mueller }
10212d97591eSStephan Mueller
10222d97591eSStephan Mueller if (!af_alg_writable(sk)) {
10232d97591eSStephan Mueller err = af_alg_wait_for_wmem(sk, msg->msg_flags);
10242d97591eSStephan Mueller if (err)
10252d97591eSStephan Mueller goto unlock;
10262d97591eSStephan Mueller }
10272d97591eSStephan Mueller
10282d97591eSStephan Mueller /* allocate a new page */
10292d97591eSStephan Mueller len = min_t(unsigned long, len, af_alg_sndbuf(sk));
10302d97591eSStephan Mueller
10312d97591eSStephan Mueller err = af_alg_alloc_tsgl(sk);
10322d97591eSStephan Mueller if (err)
10332d97591eSStephan Mueller goto unlock;
10342d97591eSStephan Mueller
10352d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
10362d97591eSStephan Mueller list);
10372d97591eSStephan Mueller sg = sgl->sg;
10382d97591eSStephan Mueller if (sgl->cur)
10392d97591eSStephan Mueller sg_unmark_end(sg + sgl->cur - 1);
10402d97591eSStephan Mueller
1041bf63e250SDavid Howells if (msg->msg_flags & MSG_SPLICE_PAGES) {
1042bf63e250SDavid Howells struct sg_table sgtable = {
1043bf63e250SDavid Howells .sgl = sg,
1044bf63e250SDavid Howells .nents = sgl->cur,
1045bf63e250SDavid Howells .orig_nents = sgl->cur,
1046bf63e250SDavid Howells };
1047bf63e250SDavid Howells
1048bf63e250SDavid Howells plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable,
104943804992SDavid Howells MAX_SGL_ENTS - sgl->cur, 0);
1050bf63e250SDavid Howells if (plen < 0) {
1051bf63e250SDavid Howells err = plen;
1052bf63e250SDavid Howells goto unlock;
1053bf63e250SDavid Howells }
1054bf63e250SDavid Howells
1055bf63e250SDavid Howells for (; sgl->cur < sgtable.nents; sgl->cur++)
1056bf63e250SDavid Howells get_page(sg_page(&sg[sgl->cur]));
1057bf63e250SDavid Howells len -= plen;
1058bf63e250SDavid Howells ctx->used += plen;
1059bf63e250SDavid Howells copied += plen;
1060bf63e250SDavid Howells size -= plen;
1061d3dccb0aSDavid Howells ctx->merge = 0;
1062bf63e250SDavid Howells } else {
10632d97591eSStephan Mueller do {
10645f21d7d2SJiasheng Jiang struct page *pg;
10652d97591eSStephan Mueller unsigned int i = sgl->cur;
10662d97591eSStephan Mueller
10672d97591eSStephan Mueller plen = min_t(size_t, len, PAGE_SIZE);
10682d97591eSStephan Mueller
10695f21d7d2SJiasheng Jiang pg = alloc_page(GFP_KERNEL);
10705f21d7d2SJiasheng Jiang if (!pg) {
10712d97591eSStephan Mueller err = -ENOMEM;
10722d97591eSStephan Mueller goto unlock;
10732d97591eSStephan Mueller }
10742d97591eSStephan Mueller
10755f21d7d2SJiasheng Jiang sg_assign_page(sg + i, pg);
10765f21d7d2SJiasheng Jiang
107773d7409cSDavid Howells err = memcpy_from_msg(
107873d7409cSDavid Howells page_address(sg_page(sg + i)),
10792d97591eSStephan Mueller msg, plen);
10802d97591eSStephan Mueller if (err) {
10812d97591eSStephan Mueller __free_page(sg_page(sg + i));
10822d97591eSStephan Mueller sg_assign_page(sg + i, NULL);
10832d97591eSStephan Mueller goto unlock;
10842d97591eSStephan Mueller }
10852d97591eSStephan Mueller
10862d97591eSStephan Mueller sg[i].length = plen;
10872d97591eSStephan Mueller len -= plen;
10882d97591eSStephan Mueller ctx->used += plen;
10892d97591eSStephan Mueller copied += plen;
10902d97591eSStephan Mueller size -= plen;
10912d97591eSStephan Mueller sgl->cur++;
10922d97591eSStephan Mueller } while (len && sgl->cur < MAX_SGL_ENTS);
1093d3dccb0aSDavid Howells
1094d3dccb0aSDavid Howells ctx->merge = plen & (PAGE_SIZE - 1);
109573d7409cSDavid Howells }
10962d97591eSStephan Mueller
10972d97591eSStephan Mueller if (!size)
10982d97591eSStephan Mueller sg_mark_end(sg + sgl->cur - 1);
10992d97591eSStephan Mueller }
11002d97591eSStephan Mueller
11012d97591eSStephan Mueller err = 0;
11022d97591eSStephan Mueller
11032d97591eSStephan Mueller ctx->more = msg->msg_flags & MSG_MORE;
11042d97591eSStephan Mueller
11052d97591eSStephan Mueller unlock:
11062d97591eSStephan Mueller af_alg_data_wakeup(sk);
11072d97591eSStephan Mueller release_sock(sk);
11082d97591eSStephan Mueller
11092d97591eSStephan Mueller return copied ?: err;
11102d97591eSStephan Mueller }
11112d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_sendmsg);
11122d97591eSStephan Mueller
11132d97591eSStephan Mueller /**
11147d2c3f54SStephan Mueller * af_alg_free_resources - release resources required for crypto request
1115b2a4411aSRandy Dunlap * @areq: Request holding the TX and RX SGL
11167d2c3f54SStephan Mueller */
af_alg_free_resources(struct af_alg_async_req * areq)11177d2c3f54SStephan Mueller void af_alg_free_resources(struct af_alg_async_req *areq)
11187d2c3f54SStephan Mueller {
11197d2c3f54SStephan Mueller struct sock *sk = areq->sk;
112067b164a8SHerbert Xu struct af_alg_ctx *ctx;
11217d2c3f54SStephan Mueller
11227d2c3f54SStephan Mueller af_alg_free_areq_sgls(areq);
11237d2c3f54SStephan Mueller sock_kfree_s(sk, areq, areq->areqlen);
112467b164a8SHerbert Xu
112567b164a8SHerbert Xu ctx = alg_sk(sk)->private;
112667b164a8SHerbert Xu ctx->inflight = false;
11277d2c3f54SStephan Mueller }
11287d2c3f54SStephan Mueller EXPORT_SYMBOL_GPL(af_alg_free_resources);
11297d2c3f54SStephan Mueller
11307d2c3f54SStephan Mueller /**
11312d97591eSStephan Mueller * af_alg_async_cb - AIO callback handler
1132255e48ebSHerbert Xu * @data: async request completion data
1133b2a4411aSRandy Dunlap * @err: if non-zero, error result to be returned via ki_complete();
1134b2a4411aSRandy Dunlap * otherwise return the AIO output length via ki_complete().
11352d97591eSStephan Mueller *
11362d97591eSStephan Mueller * This handler cleans up the struct af_alg_async_req upon completion of the
11372d97591eSStephan Mueller * AIO operation.
11382d97591eSStephan Mueller *
11392d97591eSStephan Mueller * The number of bytes to be generated with the AIO operation must be set
11402d97591eSStephan Mueller * in areq->outlen before the AIO callback handler is invoked.
11412d97591eSStephan Mueller */
af_alg_async_cb(void * data,int err)1142255e48ebSHerbert Xu void af_alg_async_cb(void *data, int err)
11432d97591eSStephan Mueller {
1144255e48ebSHerbert Xu struct af_alg_async_req *areq = data;
11452d97591eSStephan Mueller struct sock *sk = areq->sk;
11462d97591eSStephan Mueller struct kiocb *iocb = areq->iocb;
11472d97591eSStephan Mueller unsigned int resultlen;
11482d97591eSStephan Mueller
11492d97591eSStephan Mueller /* Buffer size written by crypto operation. */
11502d97591eSStephan Mueller resultlen = areq->outlen;
11512d97591eSStephan Mueller
11527d2c3f54SStephan Mueller af_alg_free_resources(areq);
11537d2c3f54SStephan Mueller sock_put(sk);
11542d97591eSStephan Mueller
11556b19b766SJens Axboe iocb->ki_complete(iocb, err ? err : (int)resultlen);
11562d97591eSStephan Mueller }
11572d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_async_cb);
11582d97591eSStephan Mueller
1159a11e1d43SLinus Torvalds /**
1160a11e1d43SLinus Torvalds * af_alg_poll - poll system call handler
1161b2a4411aSRandy Dunlap * @file: file pointer
1162b2a4411aSRandy Dunlap * @sock: socket to poll
1163b2a4411aSRandy Dunlap * @wait: poll_table
1164a11e1d43SLinus Torvalds */
af_alg_poll(struct file * file,struct socket * sock,poll_table * wait)1165a11e1d43SLinus Torvalds __poll_t af_alg_poll(struct file *file, struct socket *sock,
1166a11e1d43SLinus Torvalds poll_table *wait)
11672d97591eSStephan Mueller {
11682d97591eSStephan Mueller struct sock *sk = sock->sk;
11692d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
11702d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
1171a11e1d43SLinus Torvalds __poll_t mask;
1172a11e1d43SLinus Torvalds
117389ab066dSKarsten Graul sock_poll_wait(file, sock, wait);
1174a11e1d43SLinus Torvalds mask = 0;
11752d97591eSStephan Mueller
11762d97591eSStephan Mueller if (!ctx->more || ctx->used)
1177a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM;
11782d97591eSStephan Mueller
11792d97591eSStephan Mueller if (af_alg_writable(sk))
1180a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
11812d97591eSStephan Mueller
11822d97591eSStephan Mueller return mask;
11832d97591eSStephan Mueller }
1184a11e1d43SLinus Torvalds EXPORT_SYMBOL_GPL(af_alg_poll);
11852d97591eSStephan Mueller
11862d97591eSStephan Mueller /**
11872d97591eSStephan Mueller * af_alg_alloc_areq - allocate struct af_alg_async_req
11882d97591eSStephan Mueller *
1189b2a4411aSRandy Dunlap * @sk: socket of connection to user space
1190b2a4411aSRandy Dunlap * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
1191b2a4411aSRandy Dunlap * Return: allocated data structure or ERR_PTR upon error
11922d97591eSStephan Mueller */
af_alg_alloc_areq(struct sock * sk,unsigned int areqlen)11932d97591eSStephan Mueller struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
11942d97591eSStephan Mueller unsigned int areqlen)
11952d97591eSStephan Mueller {
119667b164a8SHerbert Xu struct af_alg_ctx *ctx = alg_sk(sk)->private;
119767b164a8SHerbert Xu struct af_alg_async_req *areq;
11982d97591eSStephan Mueller
119967b164a8SHerbert Xu /* Only one AIO request can be in flight. */
120067b164a8SHerbert Xu if (ctx->inflight)
120167b164a8SHerbert Xu return ERR_PTR(-EBUSY);
120267b164a8SHerbert Xu
120367b164a8SHerbert Xu areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
12042d97591eSStephan Mueller if (unlikely(!areq))
12052d97591eSStephan Mueller return ERR_PTR(-ENOMEM);
12062d97591eSStephan Mueller
120767b164a8SHerbert Xu ctx->inflight = true;
120867b164a8SHerbert Xu
12092d97591eSStephan Mueller areq->areqlen = areqlen;
12102d97591eSStephan Mueller areq->sk = sk;
12116a4b8aa0SDavid Howells areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
12122d97591eSStephan Mueller areq->last_rsgl = NULL;
12132d97591eSStephan Mueller INIT_LIST_HEAD(&areq->rsgl_list);
12142d97591eSStephan Mueller areq->tsgl = NULL;
12152d97591eSStephan Mueller areq->tsgl_entries = 0;
12162d97591eSStephan Mueller
12172d97591eSStephan Mueller return areq;
12182d97591eSStephan Mueller }
12192d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
12202d97591eSStephan Mueller
12212d97591eSStephan Mueller /**
12222d97591eSStephan Mueller * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
12232d97591eSStephan Mueller * operation
12242d97591eSStephan Mueller *
1225b2a4411aSRandy Dunlap * @sk: socket of connection to user space
1226b2a4411aSRandy Dunlap * @msg: user space message
1227b2a4411aSRandy Dunlap * @flags: flags used to invoke recvmsg with
1228b2a4411aSRandy Dunlap * @areq: instance of the cryptographic request that will hold the RX SGL
1229b2a4411aSRandy Dunlap * @maxsize: maximum number of bytes to be pulled from user space
1230b2a4411aSRandy Dunlap * @outlen: number of bytes in the RX SGL
1231b2a4411aSRandy Dunlap * Return: 0 on success, < 0 upon error
12322d97591eSStephan Mueller */
af_alg_get_rsgl(struct sock * sk,struct msghdr * msg,int flags,struct af_alg_async_req * areq,size_t maxsize,size_t * outlen)12332d97591eSStephan Mueller int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
12342d97591eSStephan Mueller struct af_alg_async_req *areq, size_t maxsize,
12352d97591eSStephan Mueller size_t *outlen)
12362d97591eSStephan Mueller {
12372d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk);
12382d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private;
12392d97591eSStephan Mueller size_t len = 0;
12402d97591eSStephan Mueller
12412d97591eSStephan Mueller while (maxsize > len && msg_data_left(msg)) {
12422d97591eSStephan Mueller struct af_alg_rsgl *rsgl;
1243c1abe6f5SDavid Howells ssize_t err;
12442d97591eSStephan Mueller size_t seglen;
12452d97591eSStephan Mueller
12462d97591eSStephan Mueller /* limit the amount of readable buffers */
12472d97591eSStephan Mueller if (!af_alg_readable(sk))
12482d97591eSStephan Mueller break;
12492d97591eSStephan Mueller
12502d97591eSStephan Mueller seglen = min_t(size_t, (maxsize - len),
12512d97591eSStephan Mueller msg_data_left(msg));
12522d97591eSStephan Mueller
12532d97591eSStephan Mueller if (list_empty(&areq->rsgl_list)) {
12542d97591eSStephan Mueller rsgl = &areq->first_rsgl;
12552d97591eSStephan Mueller } else {
12562d97591eSStephan Mueller rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
12572d97591eSStephan Mueller if (unlikely(!rsgl))
12582d97591eSStephan Mueller return -ENOMEM;
12592d97591eSStephan Mueller }
12602d97591eSStephan Mueller
1261080aa61eSPavel Skripkin rsgl->sgl.need_unpin =
1262080aa61eSPavel Skripkin iov_iter_extract_will_pin(&msg->msg_iter);
1263c1abe6f5SDavid Howells rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
1264c1abe6f5SDavid Howells rsgl->sgl.sgt.nents = 0;
1265c1abe6f5SDavid Howells rsgl->sgl.sgt.orig_nents = 0;
12662d97591eSStephan Mueller list_add_tail(&rsgl->list, &areq->rsgl_list);
12672d97591eSStephan Mueller
1268c1abe6f5SDavid Howells sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES);
1269c1abe6f5SDavid Howells err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt,
1270c1abe6f5SDavid Howells ALG_MAX_PAGES, 0);
12712546da99SStephan Mueller if (err < 0) {
12722546da99SStephan Mueller rsgl->sg_num_bytes = 0;
12732d97591eSStephan Mueller return err;
12742546da99SStephan Mueller }
12752d97591eSStephan Mueller
1276c1abe6f5SDavid Howells sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1);
1277c1abe6f5SDavid Howells
12782d97591eSStephan Mueller /* chain the new scatterlist with previous one */
12792d97591eSStephan Mueller if (areq->last_rsgl)
12802d97591eSStephan Mueller af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
12812d97591eSStephan Mueller
12822d97591eSStephan Mueller areq->last_rsgl = rsgl;
12832d97591eSStephan Mueller len += err;
1284af955bf1SJonathan Cameron atomic_add(err, &ctx->rcvused);
12852d97591eSStephan Mueller rsgl->sg_num_bytes = err;
12862d97591eSStephan Mueller }
12872d97591eSStephan Mueller
12882d97591eSStephan Mueller *outlen = len;
12892d97591eSStephan Mueller return 0;
12902d97591eSStephan Mueller }
12912d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
12922d97591eSStephan Mueller
af_alg_init(void)129303c8efc1SHerbert Xu static int __init af_alg_init(void)
129403c8efc1SHerbert Xu {
129503c8efc1SHerbert Xu int err = proto_register(&alg_proto, 0);
129603c8efc1SHerbert Xu
129703c8efc1SHerbert Xu if (err)
129803c8efc1SHerbert Xu goto out;
129903c8efc1SHerbert Xu
130003c8efc1SHerbert Xu err = sock_register(&alg_family);
130103c8efc1SHerbert Xu if (err != 0)
130203c8efc1SHerbert Xu goto out_unregister_proto;
130303c8efc1SHerbert Xu
130403c8efc1SHerbert Xu out:
130503c8efc1SHerbert Xu return err;
130603c8efc1SHerbert Xu
130703c8efc1SHerbert Xu out_unregister_proto:
130803c8efc1SHerbert Xu proto_unregister(&alg_proto);
130903c8efc1SHerbert Xu goto out;
131003c8efc1SHerbert Xu }
131103c8efc1SHerbert Xu
af_alg_exit(void)131203c8efc1SHerbert Xu static void __exit af_alg_exit(void)
131303c8efc1SHerbert Xu {
131403c8efc1SHerbert Xu sock_unregister(PF_ALG);
131503c8efc1SHerbert Xu proto_unregister(&alg_proto);
131603c8efc1SHerbert Xu }
131703c8efc1SHerbert Xu
131803c8efc1SHerbert Xu module_init(af_alg_init);
131903c8efc1SHerbert Xu module_exit(af_alg_exit);
1320*7c699fe9SJeff Johnson MODULE_DESCRIPTION("Crypto userspace interface");
132103c8efc1SHerbert Xu MODULE_LICENSE("GPL");
132203c8efc1SHerbert Xu MODULE_ALIAS_NETPROTO(AF_ALG);
1323