1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * algif_skcipher: User-space interface for skcipher algorithms
4 *
5 * This file provides the user-space API for symmetric key ciphers.
6 *
7 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * The following concept of the memory management is used:
10 *
11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
12 * filled by user space with the data submitted via sendmsg. Filling up the TX
13 * SGL does not cause a crypto operation -- the data will only be tracked by
14 * the kernel. Upon receipt of one recvmsg call, the caller must provide a
15 * buffer which is tracked with the RX SGL.
16 *
17 * During the processing of the recvmsg operation, the cipher request is
18 * allocated and prepared. As part of the recvmsg operation, the processed
19 * TX buffers are extracted from the TX SGL into a separate SGL.
20 *
21 * After the completion of the crypto operation, the RX SGL and the cipher
22 * request is released. The extracted TX SGL parts are released together with
23 * the RX SGL release.
24 */
25
26 #include <crypto/scatterwalk.h>
27 #include <crypto/skcipher.h>
28 #include <crypto/if_alg.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/mm.h>
33 #include <linux/module.h>
34 #include <linux/net.h>
35 #include <net/sock.h>
36
skcipher_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)37 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
38 size_t size)
39 {
40 struct sock *sk = sock->sk;
41 struct alg_sock *ask = alg_sk(sk);
42 struct sock *psk = ask->parent;
43 struct alg_sock *pask = alg_sk(psk);
44 struct crypto_skcipher *tfm = pask->private;
45 unsigned ivsize = crypto_skcipher_ivsize(tfm);
46
47 return af_alg_sendmsg(sock, msg, size, ivsize);
48 }
49
algif_skcipher_export(struct sock * sk,struct skcipher_request * req)50 static int algif_skcipher_export(struct sock *sk, struct skcipher_request *req)
51 {
52 struct alg_sock *ask = alg_sk(sk);
53 struct crypto_skcipher *tfm;
54 struct af_alg_ctx *ctx;
55 struct alg_sock *pask;
56 unsigned statesize;
57 struct sock *psk;
58 int err;
59
60 if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
61 return 0;
62
63 ctx = ask->private;
64 psk = ask->parent;
65 pask = alg_sk(psk);
66 tfm = pask->private;
67
68 statesize = crypto_skcipher_statesize(tfm);
69 ctx->state = sock_kmalloc(sk, statesize, GFP_ATOMIC);
70 if (!ctx->state)
71 return -ENOMEM;
72
73 err = crypto_skcipher_export(req, ctx->state);
74 if (err) {
75 sock_kzfree_s(sk, ctx->state, statesize);
76 ctx->state = NULL;
77 }
78
79 return err;
80 }
81
algif_skcipher_done(void * data,int err)82 static void algif_skcipher_done(void *data, int err)
83 {
84 struct af_alg_async_req *areq = data;
85 struct sock *sk = areq->sk;
86
87 if (err)
88 goto out;
89
90 err = algif_skcipher_export(sk, &areq->cra_u.skcipher_req);
91
92 out:
93 af_alg_async_cb(data, err);
94 }
95
_skcipher_recvmsg(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)96 static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
97 size_t ignored, int flags)
98 {
99 struct sock *sk = sock->sk;
100 struct alg_sock *ask = alg_sk(sk);
101 struct sock *psk = ask->parent;
102 struct alg_sock *pask = alg_sk(psk);
103 struct af_alg_ctx *ctx = ask->private;
104 struct crypto_skcipher *tfm = pask->private;
105 unsigned int bs = crypto_skcipher_chunksize(tfm);
106 struct af_alg_async_req *areq;
107 unsigned cflags = 0;
108 int err = 0;
109 size_t len = 0;
110
111 if (!ctx->init || (ctx->more && ctx->used < bs)) {
112 err = af_alg_wait_for_data(sk, flags, bs);
113 if (err)
114 return err;
115 }
116
117 /* Allocate cipher request for current operation. */
118 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
119 crypto_skcipher_reqsize(tfm));
120 if (IS_ERR(areq))
121 return PTR_ERR(areq);
122
123 /* convert iovecs of output buffers into RX SGL */
124 err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
125 if (err)
126 goto free;
127
128 /*
129 * If more buffers are to be expected to be processed, process only
130 * full block size buffers.
131 */
132 if (ctx->more || len < ctx->used) {
133 if (len < bs) {
134 err = -EINVAL;
135 goto free;
136 }
137
138 len -= len % bs;
139 cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL;
140 }
141
142 /*
143 * Create a per request TX SGL for this request which tracks the
144 * SG entries from the global TX SGL.
145 */
146 areq->tsgl_entries = af_alg_count_tsgl(sk, len);
147 if (!areq->tsgl_entries)
148 areq->tsgl_entries = 1;
149 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
150 areq->tsgl_entries),
151 GFP_KERNEL);
152 if (!areq->tsgl) {
153 err = -ENOMEM;
154 goto free;
155 }
156 sg_init_table(areq->tsgl, areq->tsgl_entries);
157 af_alg_pull_tsgl(sk, len, areq->tsgl);
158
159 /* Initialize the crypto operation */
160 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
161 skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
162 areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
163
164 if (ctx->state) {
165 err = crypto_skcipher_import(&areq->cra_u.skcipher_req,
166 ctx->state);
167 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
168 ctx->state = NULL;
169 if (err)
170 goto free;
171 cflags |= CRYPTO_SKCIPHER_REQ_CONT;
172 }
173
174 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
175 /* AIO operation */
176 sock_hold(sk);
177 areq->iocb = msg->msg_iocb;
178
179 /* Remember output size that will be generated. */
180 areq->outlen = len;
181
182 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
183 cflags |
184 CRYPTO_TFM_REQ_MAY_SLEEP,
185 algif_skcipher_done, areq);
186 err = ctx->enc ?
187 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
188 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
189
190 /* AIO operation in progress */
191 if (err == -EINPROGRESS)
192 return -EIOCBQUEUED;
193
194 sock_put(sk);
195 } else {
196 /* Synchronous operation */
197 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
198 cflags |
199 CRYPTO_TFM_REQ_MAY_SLEEP |
200 CRYPTO_TFM_REQ_MAY_BACKLOG,
201 crypto_req_done, &ctx->wait);
202 err = crypto_wait_req(ctx->enc ?
203 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
204 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
205 &ctx->wait);
206
207 if (!err)
208 err = algif_skcipher_export(
209 sk, &areq->cra_u.skcipher_req);
210 }
211
212 free:
213 af_alg_free_resources(areq);
214
215 return err ? err : len;
216 }
217
skcipher_recvmsg(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)218 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
219 size_t ignored, int flags)
220 {
221 struct sock *sk = sock->sk;
222 int ret = 0;
223
224 lock_sock(sk);
225 while (msg_data_left(msg)) {
226 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
227
228 /*
229 * This error covers -EIOCBQUEUED which implies that we can
230 * only handle one AIO request. If the caller wants to have
231 * multiple AIO requests in parallel, he must make multiple
232 * separate AIO calls.
233 *
234 * Also return the error if no data has been processed so far.
235 */
236 if (err <= 0) {
237 if (err == -EIOCBQUEUED || !ret)
238 ret = err;
239 goto out;
240 }
241
242 ret += err;
243 }
244
245 out:
246 af_alg_wmem_wakeup(sk);
247 release_sock(sk);
248 return ret;
249 }
250
251 static struct proto_ops algif_skcipher_ops = {
252 .family = PF_ALG,
253
254 .connect = sock_no_connect,
255 .socketpair = sock_no_socketpair,
256 .getname = sock_no_getname,
257 .ioctl = sock_no_ioctl,
258 .listen = sock_no_listen,
259 .shutdown = sock_no_shutdown,
260 .mmap = sock_no_mmap,
261 .bind = sock_no_bind,
262 .accept = sock_no_accept,
263
264 .release = af_alg_release,
265 .sendmsg = skcipher_sendmsg,
266 .recvmsg = skcipher_recvmsg,
267 .poll = af_alg_poll,
268 };
269
skcipher_check_key(struct socket * sock)270 static int skcipher_check_key(struct socket *sock)
271 {
272 int err = 0;
273 struct sock *psk;
274 struct alg_sock *pask;
275 struct crypto_skcipher *tfm;
276 struct sock *sk = sock->sk;
277 struct alg_sock *ask = alg_sk(sk);
278
279 lock_sock(sk);
280 if (!atomic_read(&ask->nokey_refcnt))
281 goto unlock_child;
282
283 psk = ask->parent;
284 pask = alg_sk(ask->parent);
285 tfm = pask->private;
286
287 err = -ENOKEY;
288 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
289 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
290 goto unlock;
291
292 atomic_dec(&pask->nokey_refcnt);
293 atomic_set(&ask->nokey_refcnt, 0);
294
295 err = 0;
296
297 unlock:
298 release_sock(psk);
299 unlock_child:
300 release_sock(sk);
301
302 return err;
303 }
304
skcipher_sendmsg_nokey(struct socket * sock,struct msghdr * msg,size_t size)305 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
306 size_t size)
307 {
308 int err;
309
310 err = skcipher_check_key(sock);
311 if (err)
312 return err;
313
314 return skcipher_sendmsg(sock, msg, size);
315 }
316
skcipher_recvmsg_nokey(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)317 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
318 size_t ignored, int flags)
319 {
320 int err;
321
322 err = skcipher_check_key(sock);
323 if (err)
324 return err;
325
326 return skcipher_recvmsg(sock, msg, ignored, flags);
327 }
328
329 static struct proto_ops algif_skcipher_ops_nokey = {
330 .family = PF_ALG,
331
332 .connect = sock_no_connect,
333 .socketpair = sock_no_socketpair,
334 .getname = sock_no_getname,
335 .ioctl = sock_no_ioctl,
336 .listen = sock_no_listen,
337 .shutdown = sock_no_shutdown,
338 .mmap = sock_no_mmap,
339 .bind = sock_no_bind,
340 .accept = sock_no_accept,
341
342 .release = af_alg_release,
343 .sendmsg = skcipher_sendmsg_nokey,
344 .recvmsg = skcipher_recvmsg_nokey,
345 .poll = af_alg_poll,
346 };
347
skcipher_bind(const char * name,u32 type,u32 mask)348 static void *skcipher_bind(const char *name, u32 type, u32 mask)
349 {
350 return crypto_alloc_skcipher(name, type, mask);
351 }
352
skcipher_release(void * private)353 static void skcipher_release(void *private)
354 {
355 crypto_free_skcipher(private);
356 }
357
skcipher_setkey(void * private,const u8 * key,unsigned int keylen)358 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
359 {
360 return crypto_skcipher_setkey(private, key, keylen);
361 }
362
skcipher_sock_destruct(struct sock * sk)363 static void skcipher_sock_destruct(struct sock *sk)
364 {
365 struct alg_sock *ask = alg_sk(sk);
366 struct af_alg_ctx *ctx = ask->private;
367 struct sock *psk = ask->parent;
368 struct alg_sock *pask = alg_sk(psk);
369 struct crypto_skcipher *tfm = pask->private;
370
371 af_alg_pull_tsgl(sk, ctx->used, NULL);
372 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
373 if (ctx->state)
374 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
375 sock_kfree_s(sk, ctx, ctx->len);
376 af_alg_release_parent(sk);
377 }
378
skcipher_accept_parent_nokey(void * private,struct sock * sk)379 static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
380 {
381 struct af_alg_ctx *ctx;
382 struct alg_sock *ask = alg_sk(sk);
383 struct crypto_skcipher *tfm = private;
384 unsigned int len = sizeof(*ctx);
385
386 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
387 if (!ctx)
388 return -ENOMEM;
389 memset(ctx, 0, len);
390
391 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
392 GFP_KERNEL);
393 if (!ctx->iv) {
394 sock_kfree_s(sk, ctx, len);
395 return -ENOMEM;
396 }
397 memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
398
399 INIT_LIST_HEAD(&ctx->tsgl_list);
400 ctx->len = len;
401 crypto_init_wait(&ctx->wait);
402
403 ask->private = ctx;
404
405 sk->sk_destruct = skcipher_sock_destruct;
406
407 return 0;
408 }
409
skcipher_accept_parent(void * private,struct sock * sk)410 static int skcipher_accept_parent(void *private, struct sock *sk)
411 {
412 struct crypto_skcipher *tfm = private;
413
414 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
415 return -ENOKEY;
416
417 return skcipher_accept_parent_nokey(private, sk);
418 }
419
420 static const struct af_alg_type algif_type_skcipher = {
421 .bind = skcipher_bind,
422 .release = skcipher_release,
423 .setkey = skcipher_setkey,
424 .accept = skcipher_accept_parent,
425 .accept_nokey = skcipher_accept_parent_nokey,
426 .ops = &algif_skcipher_ops,
427 .ops_nokey = &algif_skcipher_ops_nokey,
428 .name = "skcipher",
429 .owner = THIS_MODULE
430 };
431
algif_skcipher_init(void)432 static int __init algif_skcipher_init(void)
433 {
434 return af_alg_register_type(&algif_type_skcipher);
435 }
436
algif_skcipher_exit(void)437 static void __exit algif_skcipher_exit(void)
438 {
439 int err = af_alg_unregister_type(&algif_type_skcipher);
440 BUG_ON(err);
441 }
442
443 module_init(algif_skcipher_init);
444 module_exit(algif_skcipher_exit);
445 MODULE_DESCRIPTION("Userspace interface for skcipher algorithms");
446 MODULE_LICENSE("GPL");
447