1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48
49 #include "tls.h"
50
51 struct tls_decrypt_arg {
52 struct_group(inargs,
53 bool zc;
54 bool async;
55 bool async_done;
56 u8 tail;
57 );
58
59 struct sk_buff *skb;
60 };
61
62 struct tls_decrypt_ctx {
63 struct sock *sk;
64 u8 iv[TLS_MAX_IV_SIZE];
65 u8 aad[TLS_MAX_AAD_SIZE];
66 u8 tail;
67 bool free_sgout;
68 struct scatterlist sg[];
69 };
70
tls_err_abort(struct sock * sk,int err)71 noinline void tls_err_abort(struct sock *sk, int err)
72 {
73 WARN_ON_ONCE(err >= 0);
74 /* sk->sk_err should contain a positive error code. */
75 WRITE_ONCE(sk->sk_err, -err);
76 /* Paired with smp_rmb() in tcp_poll() */
77 smp_wmb();
78 sk_error_report(sk);
79 }
80
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)81 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
82 unsigned int recursion_level)
83 {
84 int start = skb_headlen(skb);
85 int i, chunk = start - offset;
86 struct sk_buff *frag_iter;
87 int elt = 0;
88
89 if (unlikely(recursion_level >= 24))
90 return -EMSGSIZE;
91
92 if (chunk > 0) {
93 if (chunk > len)
94 chunk = len;
95 elt++;
96 len -= chunk;
97 if (len == 0)
98 return elt;
99 offset += chunk;
100 }
101
102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 int end;
104
105 WARN_ON(start > offset + len);
106
107 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
108 chunk = end - offset;
109 if (chunk > 0) {
110 if (chunk > len)
111 chunk = len;
112 elt++;
113 len -= chunk;
114 if (len == 0)
115 return elt;
116 offset += chunk;
117 }
118 start = end;
119 }
120
121 if (unlikely(skb_has_frag_list(skb))) {
122 skb_walk_frags(skb, frag_iter) {
123 int end, ret;
124
125 WARN_ON(start > offset + len);
126
127 end = start + frag_iter->len;
128 chunk = end - offset;
129 if (chunk > 0) {
130 if (chunk > len)
131 chunk = len;
132 ret = __skb_nsg(frag_iter, offset - start, chunk,
133 recursion_level + 1);
134 if (unlikely(ret < 0))
135 return ret;
136 elt += ret;
137 len -= chunk;
138 if (len == 0)
139 return elt;
140 offset += chunk;
141 }
142 start = end;
143 }
144 }
145 BUG_ON(len);
146 return elt;
147 }
148
149 /* Return the number of scatterlist elements required to completely map the
150 * skb, or -EMSGSIZE if the recursion depth is exceeded.
151 */
skb_nsg(struct sk_buff * skb,int offset,int len)152 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 {
154 return __skb_nsg(skb, offset, len, 0);
155 }
156
tls_padding_length(struct tls_prot_info * prot,struct sk_buff * skb,struct tls_decrypt_arg * darg)157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
158 struct tls_decrypt_arg *darg)
159 {
160 struct strp_msg *rxm = strp_msg(skb);
161 struct tls_msg *tlm = tls_msg(skb);
162 int sub = 0;
163
164 /* Determine zero-padding length */
165 if (prot->version == TLS_1_3_VERSION) {
166 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
167 char content_type = darg->zc ? darg->tail : 0;
168 int err;
169
170 while (content_type == 0) {
171 if (offset < prot->prepend_size)
172 return -EBADMSG;
173 err = skb_copy_bits(skb, rxm->offset + offset,
174 &content_type, 1);
175 if (err)
176 return err;
177 if (content_type)
178 break;
179 sub++;
180 offset--;
181 }
182 tlm->control = content_type;
183 }
184 return sub;
185 }
186
tls_decrypt_done(void * data,int err)187 static void tls_decrypt_done(void *data, int err)
188 {
189 struct aead_request *aead_req = data;
190 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
191 struct scatterlist *sgout = aead_req->dst;
192 struct tls_sw_context_rx *ctx;
193 struct tls_decrypt_ctx *dctx;
194 struct tls_context *tls_ctx;
195 struct scatterlist *sg;
196 unsigned int pages;
197 struct sock *sk;
198 int aead_size;
199
200 /* If requests get too backlogged crypto API returns -EBUSY and calls
201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 * -EINPROGRESS -> 0.
205 * We have a single struct crypto_async_request per direction, this
206 * scheme doesn't help us, so just ignore the first ->complete().
207 */
208 if (err == -EINPROGRESS)
209 return;
210
211 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
212 aead_size = ALIGN(aead_size, __alignof__(*dctx));
213 dctx = (void *)((u8 *)aead_req + aead_size);
214
215 sk = dctx->sk;
216 tls_ctx = tls_get_ctx(sk);
217 ctx = tls_sw_ctx_rx(tls_ctx);
218
219 /* Propagate if there was an err */
220 if (err) {
221 if (err == -EBADMSG)
222 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
223 ctx->async_wait.err = err;
224 tls_err_abort(sk, err);
225 }
226
227 /* Free the destination pages if skb was not decrypted inplace */
228 if (dctx->free_sgout) {
229 /* Skip the first S/G entry as it points to AAD */
230 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
231 if (!sg)
232 break;
233 put_page(sg_page(sg));
234 }
235 }
236
237 kfree(aead_req);
238
239 if (atomic_dec_and_test(&ctx->decrypt_pending))
240 complete(&ctx->async_wait.completion);
241 }
242
tls_decrypt_async_wait(struct tls_sw_context_rx * ctx)243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 {
245 if (!atomic_dec_and_test(&ctx->decrypt_pending))
246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
247 atomic_inc(&ctx->decrypt_pending);
248
249 return ctx->async_wait.err;
250 }
251
tls_do_decryption(struct sock * sk,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,struct tls_decrypt_arg * darg)252 static int tls_do_decryption(struct sock *sk,
253 struct scatterlist *sgin,
254 struct scatterlist *sgout,
255 char *iv_recv,
256 size_t data_len,
257 struct aead_request *aead_req,
258 struct tls_decrypt_arg *darg)
259 {
260 struct tls_context *tls_ctx = tls_get_ctx(sk);
261 struct tls_prot_info *prot = &tls_ctx->prot_info;
262 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
263 int ret;
264
265 aead_request_set_tfm(aead_req, ctx->aead_recv);
266 aead_request_set_ad(aead_req, prot->aad_size);
267 aead_request_set_crypt(aead_req, sgin, sgout,
268 data_len + prot->tag_size,
269 (u8 *)iv_recv);
270
271 if (darg->async) {
272 aead_request_set_callback(aead_req,
273 CRYPTO_TFM_REQ_MAY_BACKLOG,
274 tls_decrypt_done, aead_req);
275 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
276 atomic_inc(&ctx->decrypt_pending);
277 } else {
278 DECLARE_CRYPTO_WAIT(wait);
279
280 aead_request_set_callback(aead_req,
281 CRYPTO_TFM_REQ_MAY_BACKLOG,
282 crypto_req_done, &wait);
283 ret = crypto_aead_decrypt(aead_req);
284 if (ret == -EINPROGRESS || ret == -EBUSY)
285 ret = crypto_wait_req(ret, &wait);
286 return ret;
287 }
288
289 ret = crypto_aead_decrypt(aead_req);
290 if (ret == -EINPROGRESS)
291 return 0;
292
293 if (ret == -EBUSY) {
294 ret = tls_decrypt_async_wait(ctx);
295 darg->async_done = true;
296 /* all completions have run, we're not doing async anymore */
297 darg->async = false;
298 return ret;
299 }
300
301 atomic_dec(&ctx->decrypt_pending);
302 darg->async = false;
303
304 return ret;
305 }
306
tls_trim_both_msgs(struct sock * sk,int target_size)307 static void tls_trim_both_msgs(struct sock *sk, int target_size)
308 {
309 struct tls_context *tls_ctx = tls_get_ctx(sk);
310 struct tls_prot_info *prot = &tls_ctx->prot_info;
311 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
312 struct tls_rec *rec = ctx->open_rec;
313
314 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
315 if (target_size > 0)
316 target_size += prot->overhead_size;
317 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
318 }
319
tls_alloc_encrypted_msg(struct sock * sk,int len)320 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
321 {
322 struct tls_context *tls_ctx = tls_get_ctx(sk);
323 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
324 struct tls_rec *rec = ctx->open_rec;
325 struct sk_msg *msg_en = &rec->msg_encrypted;
326
327 return sk_msg_alloc(sk, msg_en, len, 0);
328 }
329
tls_clone_plaintext_msg(struct sock * sk,int required)330 static int tls_clone_plaintext_msg(struct sock *sk, int required)
331 {
332 struct tls_context *tls_ctx = tls_get_ctx(sk);
333 struct tls_prot_info *prot = &tls_ctx->prot_info;
334 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
335 struct tls_rec *rec = ctx->open_rec;
336 struct sk_msg *msg_pl = &rec->msg_plaintext;
337 struct sk_msg *msg_en = &rec->msg_encrypted;
338 int skip, len;
339
340 /* We add page references worth len bytes from encrypted sg
341 * at the end of plaintext sg. It is guaranteed that msg_en
342 * has enough required room (ensured by caller).
343 */
344 len = required - msg_pl->sg.size;
345
346 /* Skip initial bytes in msg_en's data to be able to use
347 * same offset of both plain and encrypted data.
348 */
349 skip = prot->prepend_size + msg_pl->sg.size;
350
351 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
352 }
353
tls_get_rec(struct sock * sk)354 static struct tls_rec *tls_get_rec(struct sock *sk)
355 {
356 struct tls_context *tls_ctx = tls_get_ctx(sk);
357 struct tls_prot_info *prot = &tls_ctx->prot_info;
358 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
359 struct sk_msg *msg_pl, *msg_en;
360 struct tls_rec *rec;
361 int mem_size;
362
363 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
364
365 rec = kzalloc(mem_size, sk->sk_allocation);
366 if (!rec)
367 return NULL;
368
369 msg_pl = &rec->msg_plaintext;
370 msg_en = &rec->msg_encrypted;
371
372 sk_msg_init(msg_pl);
373 sk_msg_init(msg_en);
374
375 sg_init_table(rec->sg_aead_in, 2);
376 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
377 sg_unmark_end(&rec->sg_aead_in[1]);
378
379 sg_init_table(rec->sg_aead_out, 2);
380 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
381 sg_unmark_end(&rec->sg_aead_out[1]);
382
383 rec->sk = sk;
384
385 return rec;
386 }
387
tls_free_rec(struct sock * sk,struct tls_rec * rec)388 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
389 {
390 sk_msg_free(sk, &rec->msg_encrypted);
391 sk_msg_free(sk, &rec->msg_plaintext);
392 kfree(rec);
393 }
394
tls_free_open_rec(struct sock * sk)395 static void tls_free_open_rec(struct sock *sk)
396 {
397 struct tls_context *tls_ctx = tls_get_ctx(sk);
398 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
399 struct tls_rec *rec = ctx->open_rec;
400
401 if (rec) {
402 tls_free_rec(sk, rec);
403 ctx->open_rec = NULL;
404 }
405 }
406
tls_tx_records(struct sock * sk,int flags)407 int tls_tx_records(struct sock *sk, int flags)
408 {
409 struct tls_context *tls_ctx = tls_get_ctx(sk);
410 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
411 struct tls_rec *rec, *tmp;
412 struct sk_msg *msg_en;
413 int tx_flags, rc = 0;
414
415 if (tls_is_partially_sent_record(tls_ctx)) {
416 rec = list_first_entry(&ctx->tx_list,
417 struct tls_rec, list);
418
419 if (flags == -1)
420 tx_flags = rec->tx_flags;
421 else
422 tx_flags = flags;
423
424 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
425 if (rc)
426 goto tx_err;
427
428 /* Full record has been transmitted.
429 * Remove the head of tx_list
430 */
431 list_del(&rec->list);
432 sk_msg_free(sk, &rec->msg_plaintext);
433 kfree(rec);
434 }
435
436 /* Tx all ready records */
437 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
438 if (READ_ONCE(rec->tx_ready)) {
439 if (flags == -1)
440 tx_flags = rec->tx_flags;
441 else
442 tx_flags = flags;
443
444 msg_en = &rec->msg_encrypted;
445 rc = tls_push_sg(sk, tls_ctx,
446 &msg_en->sg.data[msg_en->sg.curr],
447 0, tx_flags);
448 if (rc)
449 goto tx_err;
450
451 list_del(&rec->list);
452 sk_msg_free(sk, &rec->msg_plaintext);
453 kfree(rec);
454 } else {
455 break;
456 }
457 }
458
459 tx_err:
460 if (rc < 0 && rc != -EAGAIN)
461 tls_err_abort(sk, rc);
462
463 return rc;
464 }
465
tls_encrypt_done(void * data,int err)466 static void tls_encrypt_done(void *data, int err)
467 {
468 struct tls_sw_context_tx *ctx;
469 struct tls_context *tls_ctx;
470 struct tls_prot_info *prot;
471 struct tls_rec *rec = data;
472 struct scatterlist *sge;
473 struct sk_msg *msg_en;
474 struct sock *sk;
475
476 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
477 return;
478
479 msg_en = &rec->msg_encrypted;
480
481 sk = rec->sk;
482 tls_ctx = tls_get_ctx(sk);
483 prot = &tls_ctx->prot_info;
484 ctx = tls_sw_ctx_tx(tls_ctx);
485
486 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
487 sge->offset -= prot->prepend_size;
488 sge->length += prot->prepend_size;
489
490 /* Check if error is previously set on socket */
491 if (err || sk->sk_err) {
492 rec = NULL;
493
494 /* If err is already set on socket, return the same code */
495 if (sk->sk_err) {
496 ctx->async_wait.err = -sk->sk_err;
497 } else {
498 ctx->async_wait.err = err;
499 tls_err_abort(sk, err);
500 }
501 }
502
503 if (rec) {
504 struct tls_rec *first_rec;
505
506 /* Mark the record as ready for transmission */
507 smp_store_mb(rec->tx_ready, true);
508
509 /* If received record is at head of tx_list, schedule tx */
510 first_rec = list_first_entry(&ctx->tx_list,
511 struct tls_rec, list);
512 if (rec == first_rec) {
513 /* Schedule the transmission */
514 if (!test_and_set_bit(BIT_TX_SCHEDULED,
515 &ctx->tx_bitmask))
516 schedule_delayed_work(&ctx->tx_work.work, 1);
517 }
518 }
519
520 if (atomic_dec_and_test(&ctx->encrypt_pending))
521 complete(&ctx->async_wait.completion);
522 }
523
tls_encrypt_async_wait(struct tls_sw_context_tx * ctx)524 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
525 {
526 if (!atomic_dec_and_test(&ctx->encrypt_pending))
527 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
528 atomic_inc(&ctx->encrypt_pending);
529
530 return ctx->async_wait.err;
531 }
532
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)533 static int tls_do_encryption(struct sock *sk,
534 struct tls_context *tls_ctx,
535 struct tls_sw_context_tx *ctx,
536 struct aead_request *aead_req,
537 size_t data_len, u32 start)
538 {
539 struct tls_prot_info *prot = &tls_ctx->prot_info;
540 struct tls_rec *rec = ctx->open_rec;
541 struct sk_msg *msg_en = &rec->msg_encrypted;
542 struct scatterlist *sge = sk_msg_elem(msg_en, start);
543 int rc, iv_offset = 0;
544
545 /* For CCM based ciphers, first byte of IV is a constant */
546 switch (prot->cipher_type) {
547 case TLS_CIPHER_AES_CCM_128:
548 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
549 iv_offset = 1;
550 break;
551 case TLS_CIPHER_SM4_CCM:
552 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
553 iv_offset = 1;
554 break;
555 }
556
557 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
558 prot->iv_size + prot->salt_size);
559
560 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
561 tls_ctx->tx.rec_seq);
562
563 sge->offset += prot->prepend_size;
564 sge->length -= prot->prepend_size;
565
566 msg_en->sg.curr = start;
567
568 aead_request_set_tfm(aead_req, ctx->aead_send);
569 aead_request_set_ad(aead_req, prot->aad_size);
570 aead_request_set_crypt(aead_req, rec->sg_aead_in,
571 rec->sg_aead_out,
572 data_len, rec->iv_data);
573
574 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
575 tls_encrypt_done, rec);
576
577 /* Add the record in tx_list */
578 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
579 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
580 atomic_inc(&ctx->encrypt_pending);
581
582 rc = crypto_aead_encrypt(aead_req);
583 if (rc == -EBUSY) {
584 rc = tls_encrypt_async_wait(ctx);
585 rc = rc ?: -EINPROGRESS;
586 }
587 if (!rc || rc != -EINPROGRESS) {
588 atomic_dec(&ctx->encrypt_pending);
589 sge->offset -= prot->prepend_size;
590 sge->length += prot->prepend_size;
591 }
592
593 if (!rc) {
594 WRITE_ONCE(rec->tx_ready, true);
595 } else if (rc != -EINPROGRESS) {
596 list_del(&rec->list);
597 return rc;
598 }
599
600 /* Unhook the record from context if encryption is not failure */
601 ctx->open_rec = NULL;
602 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
603 return rc;
604 }
605
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)606 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
607 struct tls_rec **to, struct sk_msg *msg_opl,
608 struct sk_msg *msg_oen, u32 split_point,
609 u32 tx_overhead_size, u32 *orig_end)
610 {
611 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
612 struct scatterlist *sge, *osge, *nsge;
613 u32 orig_size = msg_opl->sg.size;
614 struct scatterlist tmp = { };
615 struct sk_msg *msg_npl;
616 struct tls_rec *new;
617 int ret;
618
619 new = tls_get_rec(sk);
620 if (!new)
621 return -ENOMEM;
622 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
623 tx_overhead_size, 0);
624 if (ret < 0) {
625 tls_free_rec(sk, new);
626 return ret;
627 }
628
629 *orig_end = msg_opl->sg.end;
630 i = msg_opl->sg.start;
631 sge = sk_msg_elem(msg_opl, i);
632 while (apply && sge->length) {
633 if (sge->length > apply) {
634 u32 len = sge->length - apply;
635
636 get_page(sg_page(sge));
637 sg_set_page(&tmp, sg_page(sge), len,
638 sge->offset + apply);
639 sge->length = apply;
640 bytes += apply;
641 apply = 0;
642 } else {
643 apply -= sge->length;
644 bytes += sge->length;
645 }
646
647 sk_msg_iter_var_next(i);
648 if (i == msg_opl->sg.end)
649 break;
650 sge = sk_msg_elem(msg_opl, i);
651 }
652
653 msg_opl->sg.end = i;
654 msg_opl->sg.curr = i;
655 msg_opl->sg.copybreak = 0;
656 msg_opl->apply_bytes = 0;
657 msg_opl->sg.size = bytes;
658
659 msg_npl = &new->msg_plaintext;
660 msg_npl->apply_bytes = apply;
661 msg_npl->sg.size = orig_size - bytes;
662
663 j = msg_npl->sg.start;
664 nsge = sk_msg_elem(msg_npl, j);
665 if (tmp.length) {
666 memcpy(nsge, &tmp, sizeof(*nsge));
667 sk_msg_iter_var_next(j);
668 nsge = sk_msg_elem(msg_npl, j);
669 }
670
671 osge = sk_msg_elem(msg_opl, i);
672 while (osge->length) {
673 memcpy(nsge, osge, sizeof(*nsge));
674 sg_unmark_end(nsge);
675 sk_msg_iter_var_next(i);
676 sk_msg_iter_var_next(j);
677 if (i == *orig_end)
678 break;
679 osge = sk_msg_elem(msg_opl, i);
680 nsge = sk_msg_elem(msg_npl, j);
681 }
682
683 msg_npl->sg.end = j;
684 msg_npl->sg.curr = j;
685 msg_npl->sg.copybreak = 0;
686
687 *to = new;
688 return 0;
689 }
690
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)691 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
692 struct tls_rec *from, u32 orig_end)
693 {
694 struct sk_msg *msg_npl = &from->msg_plaintext;
695 struct sk_msg *msg_opl = &to->msg_plaintext;
696 struct scatterlist *osge, *nsge;
697 u32 i, j;
698
699 i = msg_opl->sg.end;
700 sk_msg_iter_var_prev(i);
701 j = msg_npl->sg.start;
702
703 osge = sk_msg_elem(msg_opl, i);
704 nsge = sk_msg_elem(msg_npl, j);
705
706 if (sg_page(osge) == sg_page(nsge) &&
707 osge->offset + osge->length == nsge->offset) {
708 osge->length += nsge->length;
709 put_page(sg_page(nsge));
710 }
711
712 msg_opl->sg.end = orig_end;
713 msg_opl->sg.curr = orig_end;
714 msg_opl->sg.copybreak = 0;
715 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
716 msg_opl->sg.size += msg_npl->sg.size;
717
718 sk_msg_free(sk, &to->msg_encrypted);
719 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
720
721 kfree(from);
722 }
723
tls_push_record(struct sock * sk,int flags,unsigned char record_type)724 static int tls_push_record(struct sock *sk, int flags,
725 unsigned char record_type)
726 {
727 struct tls_context *tls_ctx = tls_get_ctx(sk);
728 struct tls_prot_info *prot = &tls_ctx->prot_info;
729 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
730 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
731 u32 i, split_point, orig_end;
732 struct sk_msg *msg_pl, *msg_en;
733 struct aead_request *req;
734 bool split;
735 int rc;
736
737 if (!rec)
738 return 0;
739
740 msg_pl = &rec->msg_plaintext;
741 msg_en = &rec->msg_encrypted;
742
743 split_point = msg_pl->apply_bytes;
744 split = split_point && split_point < msg_pl->sg.size;
745 if (unlikely((!split &&
746 msg_pl->sg.size +
747 prot->overhead_size > msg_en->sg.size) ||
748 (split &&
749 split_point +
750 prot->overhead_size > msg_en->sg.size))) {
751 split = true;
752 split_point = msg_en->sg.size;
753 }
754 if (split) {
755 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
756 split_point, prot->overhead_size,
757 &orig_end);
758 if (rc < 0)
759 return rc;
760 /* This can happen if above tls_split_open_record allocates
761 * a single large encryption buffer instead of two smaller
762 * ones. In this case adjust pointers and continue without
763 * split.
764 */
765 if (!msg_pl->sg.size) {
766 tls_merge_open_record(sk, rec, tmp, orig_end);
767 msg_pl = &rec->msg_plaintext;
768 msg_en = &rec->msg_encrypted;
769 split = false;
770 }
771 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
772 prot->overhead_size);
773 }
774
775 rec->tx_flags = flags;
776 req = &rec->aead_req;
777
778 i = msg_pl->sg.end;
779 sk_msg_iter_var_prev(i);
780
781 rec->content_type = record_type;
782 if (prot->version == TLS_1_3_VERSION) {
783 /* Add content type to end of message. No padding added */
784 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
785 sg_mark_end(&rec->sg_content_type);
786 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
787 &rec->sg_content_type);
788 } else {
789 sg_mark_end(sk_msg_elem(msg_pl, i));
790 }
791
792 if (msg_pl->sg.end < msg_pl->sg.start) {
793 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
794 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
795 msg_pl->sg.data);
796 }
797
798 i = msg_pl->sg.start;
799 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
800
801 i = msg_en->sg.end;
802 sk_msg_iter_var_prev(i);
803 sg_mark_end(sk_msg_elem(msg_en, i));
804
805 i = msg_en->sg.start;
806 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
807
808 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
809 tls_ctx->tx.rec_seq, record_type, prot);
810
811 tls_fill_prepend(tls_ctx,
812 page_address(sg_page(&msg_en->sg.data[i])) +
813 msg_en->sg.data[i].offset,
814 msg_pl->sg.size + prot->tail_size,
815 record_type);
816
817 tls_ctx->pending_open_record_frags = false;
818
819 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
820 msg_pl->sg.size + prot->tail_size, i);
821 if (rc < 0) {
822 if (rc != -EINPROGRESS) {
823 tls_err_abort(sk, -EBADMSG);
824 if (split) {
825 tls_ctx->pending_open_record_frags = true;
826 tls_merge_open_record(sk, rec, tmp, orig_end);
827 }
828 }
829 ctx->async_capable = 1;
830 return rc;
831 } else if (split) {
832 msg_pl = &tmp->msg_plaintext;
833 msg_en = &tmp->msg_encrypted;
834 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
835 tls_ctx->pending_open_record_frags = true;
836 ctx->open_rec = tmp;
837 }
838
839 return tls_tx_records(sk, flags);
840 }
841
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)842 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
843 bool full_record, u8 record_type,
844 ssize_t *copied, int flags)
845 {
846 struct tls_context *tls_ctx = tls_get_ctx(sk);
847 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
848 struct sk_msg msg_redir = { };
849 struct sk_psock *psock;
850 struct sock *sk_redir;
851 struct tls_rec *rec;
852 bool enospc, policy, redir_ingress;
853 int err = 0, send;
854 u32 delta = 0;
855
856 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
857 psock = sk_psock_get(sk);
858 if (!psock || !policy) {
859 err = tls_push_record(sk, flags, record_type);
860 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
861 *copied -= sk_msg_free(sk, msg);
862 tls_free_open_rec(sk);
863 err = -sk->sk_err;
864 }
865 if (psock)
866 sk_psock_put(sk, psock);
867 return err;
868 }
869 more_data:
870 enospc = sk_msg_full(msg);
871 if (psock->eval == __SK_NONE) {
872 delta = msg->sg.size;
873 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
874 delta -= msg->sg.size;
875 }
876 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
877 !enospc && !full_record) {
878 err = -ENOSPC;
879 goto out_err;
880 }
881 msg->cork_bytes = 0;
882 send = msg->sg.size;
883 if (msg->apply_bytes && msg->apply_bytes < send)
884 send = msg->apply_bytes;
885
886 switch (psock->eval) {
887 case __SK_PASS:
888 err = tls_push_record(sk, flags, record_type);
889 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
890 *copied -= sk_msg_free(sk, msg);
891 tls_free_open_rec(sk);
892 err = -sk->sk_err;
893 goto out_err;
894 }
895 break;
896 case __SK_REDIRECT:
897 redir_ingress = psock->redir_ingress;
898 sk_redir = psock->sk_redir;
899 memcpy(&msg_redir, msg, sizeof(*msg));
900 if (msg->apply_bytes < send)
901 msg->apply_bytes = 0;
902 else
903 msg->apply_bytes -= send;
904 sk_msg_return_zero(sk, msg, send);
905 msg->sg.size -= send;
906 release_sock(sk);
907 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
908 &msg_redir, send, flags);
909 lock_sock(sk);
910 if (err < 0) {
911 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
912 msg->sg.size = 0;
913 }
914 if (msg->sg.size == 0)
915 tls_free_open_rec(sk);
916 break;
917 case __SK_DROP:
918 default:
919 sk_msg_free_partial(sk, msg, send);
920 if (msg->apply_bytes < send)
921 msg->apply_bytes = 0;
922 else
923 msg->apply_bytes -= send;
924 if (msg->sg.size == 0)
925 tls_free_open_rec(sk);
926 *copied -= (send + delta);
927 err = -EACCES;
928 }
929
930 if (likely(!err)) {
931 bool reset_eval = !ctx->open_rec;
932
933 rec = ctx->open_rec;
934 if (rec) {
935 msg = &rec->msg_plaintext;
936 if (!msg->apply_bytes)
937 reset_eval = true;
938 }
939 if (reset_eval) {
940 psock->eval = __SK_NONE;
941 if (psock->sk_redir) {
942 sock_put(psock->sk_redir);
943 psock->sk_redir = NULL;
944 }
945 }
946 if (rec)
947 goto more_data;
948 }
949 out_err:
950 sk_psock_put(sk, psock);
951 return err;
952 }
953
tls_sw_push_pending_record(struct sock * sk,int flags)954 static int tls_sw_push_pending_record(struct sock *sk, int flags)
955 {
956 struct tls_context *tls_ctx = tls_get_ctx(sk);
957 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
958 struct tls_rec *rec = ctx->open_rec;
959 struct sk_msg *msg_pl;
960 size_t copied;
961
962 if (!rec)
963 return 0;
964
965 msg_pl = &rec->msg_plaintext;
966 copied = msg_pl->sg.size;
967 if (!copied)
968 return 0;
969
970 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
971 &copied, flags);
972 }
973
tls_sw_sendmsg_splice(struct sock * sk,struct msghdr * msg,struct sk_msg * msg_pl,size_t try_to_copy,ssize_t * copied)974 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
975 struct sk_msg *msg_pl, size_t try_to_copy,
976 ssize_t *copied)
977 {
978 struct page *page = NULL, **pages = &page;
979
980 do {
981 ssize_t part;
982 size_t off;
983
984 part = iov_iter_extract_pages(&msg->msg_iter, &pages,
985 try_to_copy, 1, 0, &off);
986 if (part <= 0)
987 return part ?: -EIO;
988
989 if (WARN_ON_ONCE(!sendpage_ok(page))) {
990 iov_iter_revert(&msg->msg_iter, part);
991 return -EIO;
992 }
993
994 sk_msg_page_add(msg_pl, page, part, off);
995 msg_pl->sg.copybreak = 0;
996 msg_pl->sg.curr = msg_pl->sg.end;
997 sk_mem_charge(sk, part);
998 *copied += part;
999 try_to_copy -= part;
1000 } while (try_to_copy && !sk_msg_full(msg_pl));
1001
1002 return 0;
1003 }
1004
tls_sw_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1005 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
1006 size_t size)
1007 {
1008 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1009 struct tls_context *tls_ctx = tls_get_ctx(sk);
1010 struct tls_prot_info *prot = &tls_ctx->prot_info;
1011 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1012 bool async_capable = ctx->async_capable;
1013 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1014 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1015 bool eor = !(msg->msg_flags & MSG_MORE);
1016 size_t try_to_copy;
1017 ssize_t copied = 0;
1018 struct sk_msg *msg_pl, *msg_en;
1019 struct tls_rec *rec;
1020 int required_size;
1021 int num_async = 0;
1022 bool full_record;
1023 int record_room;
1024 int num_zc = 0;
1025 int orig_size;
1026 int ret = 0;
1027
1028 if (!eor && (msg->msg_flags & MSG_EOR))
1029 return -EINVAL;
1030
1031 if (unlikely(msg->msg_controllen)) {
1032 ret = tls_process_cmsg(sk, msg, &record_type);
1033 if (ret) {
1034 if (ret == -EINPROGRESS)
1035 num_async++;
1036 else if (ret != -EAGAIN)
1037 goto send_end;
1038 }
1039 }
1040
1041 while (msg_data_left(msg)) {
1042 if (sk->sk_err) {
1043 ret = -sk->sk_err;
1044 goto send_end;
1045 }
1046
1047 if (ctx->open_rec)
1048 rec = ctx->open_rec;
1049 else
1050 rec = ctx->open_rec = tls_get_rec(sk);
1051 if (!rec) {
1052 ret = -ENOMEM;
1053 goto send_end;
1054 }
1055
1056 msg_pl = &rec->msg_plaintext;
1057 msg_en = &rec->msg_encrypted;
1058
1059 orig_size = msg_pl->sg.size;
1060 full_record = false;
1061 try_to_copy = msg_data_left(msg);
1062 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1063 if (try_to_copy >= record_room) {
1064 try_to_copy = record_room;
1065 full_record = true;
1066 }
1067
1068 required_size = msg_pl->sg.size + try_to_copy +
1069 prot->overhead_size;
1070
1071 if (!sk_stream_memory_free(sk))
1072 goto wait_for_sndbuf;
1073
1074 alloc_encrypted:
1075 ret = tls_alloc_encrypted_msg(sk, required_size);
1076 if (ret) {
1077 if (ret != -ENOSPC)
1078 goto wait_for_memory;
1079
1080 /* Adjust try_to_copy according to the amount that was
1081 * actually allocated. The difference is due
1082 * to max sg elements limit
1083 */
1084 try_to_copy -= required_size - msg_en->sg.size;
1085 full_record = true;
1086 }
1087
1088 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1089 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1090 try_to_copy, &copied);
1091 if (ret < 0)
1092 goto send_end;
1093 tls_ctx->pending_open_record_frags = true;
1094
1095 if (sk_msg_full(msg_pl))
1096 full_record = true;
1097
1098 if (full_record || eor)
1099 goto copied;
1100 continue;
1101 }
1102
1103 if (!is_kvec && (full_record || eor) && !async_capable) {
1104 u32 first = msg_pl->sg.end;
1105
1106 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1107 msg_pl, try_to_copy);
1108 if (ret)
1109 goto fallback_to_reg_send;
1110
1111 num_zc++;
1112 copied += try_to_copy;
1113
1114 sk_msg_sg_copy_set(msg_pl, first);
1115 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1116 record_type, &copied,
1117 msg->msg_flags);
1118 if (ret) {
1119 if (ret == -EINPROGRESS)
1120 num_async++;
1121 else if (ret == -ENOMEM)
1122 goto wait_for_memory;
1123 else if (ctx->open_rec && ret == -ENOSPC)
1124 goto rollback_iter;
1125 else if (ret != -EAGAIN)
1126 goto send_end;
1127 }
1128 continue;
1129 rollback_iter:
1130 copied -= try_to_copy;
1131 sk_msg_sg_copy_clear(msg_pl, first);
1132 iov_iter_revert(&msg->msg_iter,
1133 msg_pl->sg.size - orig_size);
1134 fallback_to_reg_send:
1135 sk_msg_trim(sk, msg_pl, orig_size);
1136 }
1137
1138 required_size = msg_pl->sg.size + try_to_copy;
1139
1140 ret = tls_clone_plaintext_msg(sk, required_size);
1141 if (ret) {
1142 if (ret != -ENOSPC)
1143 goto send_end;
1144
1145 /* Adjust try_to_copy according to the amount that was
1146 * actually allocated. The difference is due
1147 * to max sg elements limit
1148 */
1149 try_to_copy -= required_size - msg_pl->sg.size;
1150 full_record = true;
1151 sk_msg_trim(sk, msg_en,
1152 msg_pl->sg.size + prot->overhead_size);
1153 }
1154
1155 if (try_to_copy) {
1156 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1157 msg_pl, try_to_copy);
1158 if (ret < 0)
1159 goto trim_sgl;
1160 }
1161
1162 /* Open records defined only if successfully copied, otherwise
1163 * we would trim the sg but not reset the open record frags.
1164 */
1165 tls_ctx->pending_open_record_frags = true;
1166 copied += try_to_copy;
1167 copied:
1168 if (full_record || eor) {
1169 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1170 record_type, &copied,
1171 msg->msg_flags);
1172 if (ret) {
1173 if (ret == -EINPROGRESS)
1174 num_async++;
1175 else if (ret == -ENOMEM)
1176 goto wait_for_memory;
1177 else if (ret != -EAGAIN) {
1178 if (ret == -ENOSPC)
1179 ret = 0;
1180 goto send_end;
1181 }
1182 }
1183 }
1184
1185 continue;
1186
1187 wait_for_sndbuf:
1188 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1189 wait_for_memory:
1190 ret = sk_stream_wait_memory(sk, &timeo);
1191 if (ret) {
1192 trim_sgl:
1193 if (ctx->open_rec)
1194 tls_trim_both_msgs(sk, orig_size);
1195 goto send_end;
1196 }
1197
1198 if (ctx->open_rec && msg_en->sg.size < required_size)
1199 goto alloc_encrypted;
1200 }
1201
1202 if (!num_async) {
1203 goto send_end;
1204 } else if (num_zc || eor) {
1205 int err;
1206
1207 /* Wait for pending encryptions to get completed */
1208 err = tls_encrypt_async_wait(ctx);
1209 if (err) {
1210 ret = err;
1211 copied = 0;
1212 }
1213 }
1214
1215 /* Transmit if any encryptions have completed */
1216 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1217 cancel_delayed_work(&ctx->tx_work.work);
1218 tls_tx_records(sk, msg->msg_flags);
1219 }
1220
1221 send_end:
1222 ret = sk_stream_error(sk, msg->msg_flags, ret);
1223 return copied > 0 ? copied : ret;
1224 }
1225
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1226 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1227 {
1228 struct tls_context *tls_ctx = tls_get_ctx(sk);
1229 int ret;
1230
1231 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1232 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1233 MSG_SENDPAGE_NOPOLICY))
1234 return -EOPNOTSUPP;
1235
1236 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1237 if (ret)
1238 return ret;
1239 lock_sock(sk);
1240 ret = tls_sw_sendmsg_locked(sk, msg, size);
1241 release_sock(sk);
1242 mutex_unlock(&tls_ctx->tx_lock);
1243 return ret;
1244 }
1245
1246 /*
1247 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1248 */
tls_sw_splice_eof(struct socket * sock)1249 void tls_sw_splice_eof(struct socket *sock)
1250 {
1251 struct sock *sk = sock->sk;
1252 struct tls_context *tls_ctx = tls_get_ctx(sk);
1253 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1254 struct tls_rec *rec;
1255 struct sk_msg *msg_pl;
1256 ssize_t copied = 0;
1257 bool retrying = false;
1258 int ret = 0;
1259
1260 if (!ctx->open_rec)
1261 return;
1262
1263 mutex_lock(&tls_ctx->tx_lock);
1264 lock_sock(sk);
1265
1266 retry:
1267 /* same checks as in tls_sw_push_pending_record() */
1268 rec = ctx->open_rec;
1269 if (!rec)
1270 goto unlock;
1271
1272 msg_pl = &rec->msg_plaintext;
1273 if (msg_pl->sg.size == 0)
1274 goto unlock;
1275
1276 /* Check the BPF advisor and perform transmission. */
1277 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1278 &copied, 0);
1279 switch (ret) {
1280 case 0:
1281 case -EAGAIN:
1282 if (retrying)
1283 goto unlock;
1284 retrying = true;
1285 goto retry;
1286 case -EINPROGRESS:
1287 break;
1288 default:
1289 goto unlock;
1290 }
1291
1292 /* Wait for pending encryptions to get completed */
1293 if (tls_encrypt_async_wait(ctx))
1294 goto unlock;
1295
1296 /* Transmit if any encryptions have completed */
1297 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1298 cancel_delayed_work(&ctx->tx_work.work);
1299 tls_tx_records(sk, 0);
1300 }
1301
1302 unlock:
1303 release_sock(sk);
1304 mutex_unlock(&tls_ctx->tx_lock);
1305 }
1306
1307 static int
tls_rx_rec_wait(struct sock * sk,struct sk_psock * psock,bool nonblock,bool released)1308 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1309 bool released)
1310 {
1311 struct tls_context *tls_ctx = tls_get_ctx(sk);
1312 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1313 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1314 int ret = 0;
1315 long timeo;
1316
1317 /* a rekey is pending, let userspace deal with it */
1318 if (unlikely(ctx->key_update_pending))
1319 return -EKEYEXPIRED;
1320
1321 timeo = sock_rcvtimeo(sk, nonblock);
1322
1323 while (!tls_strp_msg_ready(ctx)) {
1324 if (!sk_psock_queue_empty(psock))
1325 return 0;
1326
1327 if (sk->sk_err)
1328 return sock_error(sk);
1329
1330 if (ret < 0)
1331 return ret;
1332
1333 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1334 tls_strp_check_rcv(&ctx->strp);
1335 if (tls_strp_msg_ready(ctx))
1336 break;
1337 }
1338
1339 if (sk->sk_shutdown & RCV_SHUTDOWN)
1340 return 0;
1341
1342 if (sock_flag(sk, SOCK_DONE))
1343 return 0;
1344
1345 if (!timeo)
1346 return -EAGAIN;
1347
1348 released = true;
1349 add_wait_queue(sk_sleep(sk), &wait);
1350 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1351 ret = sk_wait_event(sk, &timeo,
1352 tls_strp_msg_ready(ctx) ||
1353 !sk_psock_queue_empty(psock),
1354 &wait);
1355 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1356 remove_wait_queue(sk_sleep(sk), &wait);
1357
1358 /* Handle signals */
1359 if (signal_pending(current))
1360 return sock_intr_errno(timeo);
1361 }
1362
1363 tls_strp_msg_load(&ctx->strp, released);
1364
1365 return 1;
1366 }
1367
tls_setup_from_iter(struct iov_iter * from,int length,int * pages_used,struct scatterlist * to,int to_max_pages)1368 static int tls_setup_from_iter(struct iov_iter *from,
1369 int length, int *pages_used,
1370 struct scatterlist *to,
1371 int to_max_pages)
1372 {
1373 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1374 struct page *pages[MAX_SKB_FRAGS];
1375 unsigned int size = 0;
1376 ssize_t copied, use;
1377 size_t offset;
1378
1379 while (length > 0) {
1380 i = 0;
1381 maxpages = to_max_pages - num_elem;
1382 if (maxpages == 0) {
1383 rc = -EFAULT;
1384 goto out;
1385 }
1386 copied = iov_iter_get_pages2(from, pages,
1387 length,
1388 maxpages, &offset);
1389 if (copied <= 0) {
1390 rc = -EFAULT;
1391 goto out;
1392 }
1393
1394 length -= copied;
1395 size += copied;
1396 while (copied) {
1397 use = min_t(int, copied, PAGE_SIZE - offset);
1398
1399 sg_set_page(&to[num_elem],
1400 pages[i], use, offset);
1401 sg_unmark_end(&to[num_elem]);
1402 /* We do not uncharge memory from this API */
1403
1404 offset = 0;
1405 copied -= use;
1406
1407 i++;
1408 num_elem++;
1409 }
1410 }
1411 /* Mark the end in the last sg entry if newly added */
1412 if (num_elem > *pages_used)
1413 sg_mark_end(&to[num_elem - 1]);
1414 out:
1415 if (rc)
1416 iov_iter_revert(from, size);
1417 *pages_used = num_elem;
1418
1419 return rc;
1420 }
1421
1422 static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock * sk,struct sk_buff * skb,unsigned int full_len)1423 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1424 unsigned int full_len)
1425 {
1426 struct strp_msg *clr_rxm;
1427 struct sk_buff *clr_skb;
1428 int err;
1429
1430 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1431 &err, sk->sk_allocation);
1432 if (!clr_skb)
1433 return NULL;
1434
1435 skb_copy_header(clr_skb, skb);
1436 clr_skb->len = full_len;
1437 clr_skb->data_len = full_len;
1438
1439 clr_rxm = strp_msg(clr_skb);
1440 clr_rxm->offset = 0;
1441
1442 return clr_skb;
1443 }
1444
1445 /* Decrypt handlers
1446 *
1447 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1448 * They must transform the darg in/out argument are as follows:
1449 * | Input | Output
1450 * -------------------------------------------------------------------
1451 * zc | Zero-copy decrypt allowed | Zero-copy performed
1452 * async | Async decrypt allowed | Async crypto used / in progress
1453 * skb | * | Output skb
1454 *
1455 * If ZC decryption was performed darg.skb will point to the input skb.
1456 */
1457
1458 /* This function decrypts the input skb into either out_iov or in out_sg
1459 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1460 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1461 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1462 * NULL, then the decryption happens inside skb buffers itself, i.e.
1463 * zero-copy gets disabled and 'darg->zc' is updated.
1464 */
tls_decrypt_sg(struct sock * sk,struct iov_iter * out_iov,struct scatterlist * out_sg,struct tls_decrypt_arg * darg)1465 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1466 struct scatterlist *out_sg,
1467 struct tls_decrypt_arg *darg)
1468 {
1469 struct tls_context *tls_ctx = tls_get_ctx(sk);
1470 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1471 struct tls_prot_info *prot = &tls_ctx->prot_info;
1472 int n_sgin, n_sgout, aead_size, err, pages = 0;
1473 struct sk_buff *skb = tls_strp_msg(ctx);
1474 const struct strp_msg *rxm = strp_msg(skb);
1475 const struct tls_msg *tlm = tls_msg(skb);
1476 struct aead_request *aead_req;
1477 struct scatterlist *sgin = NULL;
1478 struct scatterlist *sgout = NULL;
1479 const int data_len = rxm->full_len - prot->overhead_size;
1480 int tail_pages = !!prot->tail_size;
1481 struct tls_decrypt_ctx *dctx;
1482 struct sk_buff *clear_skb;
1483 int iv_offset = 0;
1484 u8 *mem;
1485
1486 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1487 rxm->full_len - prot->prepend_size);
1488 if (n_sgin < 1)
1489 return n_sgin ?: -EBADMSG;
1490
1491 if (darg->zc && (out_iov || out_sg)) {
1492 clear_skb = NULL;
1493
1494 if (out_iov)
1495 n_sgout = 1 + tail_pages +
1496 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1497 else
1498 n_sgout = sg_nents(out_sg);
1499 } else {
1500 darg->zc = false;
1501
1502 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1503 if (!clear_skb)
1504 return -ENOMEM;
1505
1506 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1507 }
1508
1509 /* Increment to accommodate AAD */
1510 n_sgin = n_sgin + 1;
1511
1512 /* Allocate a single block of memory which contains
1513 * aead_req || tls_decrypt_ctx.
1514 * Both structs are variable length.
1515 */
1516 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1517 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1518 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1519 sk->sk_allocation);
1520 if (!mem) {
1521 err = -ENOMEM;
1522 goto exit_free_skb;
1523 }
1524
1525 /* Segment the allocated memory */
1526 aead_req = (struct aead_request *)mem;
1527 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1528 dctx->sk = sk;
1529 sgin = &dctx->sg[0];
1530 sgout = &dctx->sg[n_sgin];
1531
1532 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1533 switch (prot->cipher_type) {
1534 case TLS_CIPHER_AES_CCM_128:
1535 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1536 iv_offset = 1;
1537 break;
1538 case TLS_CIPHER_SM4_CCM:
1539 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1540 iv_offset = 1;
1541 break;
1542 }
1543
1544 /* Prepare IV */
1545 if (prot->version == TLS_1_3_VERSION ||
1546 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1547 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1548 prot->iv_size + prot->salt_size);
1549 } else {
1550 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1551 &dctx->iv[iv_offset] + prot->salt_size,
1552 prot->iv_size);
1553 if (err < 0)
1554 goto exit_free;
1555 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1556 }
1557 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1558
1559 /* Prepare AAD */
1560 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1561 prot->tail_size,
1562 tls_ctx->rx.rec_seq, tlm->control, prot);
1563
1564 /* Prepare sgin */
1565 sg_init_table(sgin, n_sgin);
1566 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1567 err = skb_to_sgvec(skb, &sgin[1],
1568 rxm->offset + prot->prepend_size,
1569 rxm->full_len - prot->prepend_size);
1570 if (err < 0)
1571 goto exit_free;
1572
1573 if (clear_skb) {
1574 sg_init_table(sgout, n_sgout);
1575 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1576
1577 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1578 data_len + prot->tail_size);
1579 if (err < 0)
1580 goto exit_free;
1581 } else if (out_iov) {
1582 sg_init_table(sgout, n_sgout);
1583 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1584
1585 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1586 (n_sgout - 1 - tail_pages));
1587 if (err < 0)
1588 goto exit_free_pages;
1589
1590 if (prot->tail_size) {
1591 sg_unmark_end(&sgout[pages]);
1592 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1593 prot->tail_size);
1594 sg_mark_end(&sgout[pages + 1]);
1595 }
1596 } else if (out_sg) {
1597 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1598 }
1599 dctx->free_sgout = !!pages;
1600
1601 /* Prepare and submit AEAD request */
1602 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1603 data_len + prot->tail_size, aead_req, darg);
1604 if (err) {
1605 if (darg->async_done)
1606 goto exit_free_skb;
1607 goto exit_free_pages;
1608 }
1609
1610 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1611 clear_skb = NULL;
1612
1613 if (unlikely(darg->async)) {
1614 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1615 if (err)
1616 __skb_queue_tail(&ctx->async_hold, darg->skb);
1617 return err;
1618 }
1619
1620 if (unlikely(darg->async_done))
1621 return 0;
1622
1623 if (prot->tail_size)
1624 darg->tail = dctx->tail;
1625
1626 exit_free_pages:
1627 /* Release the pages in case iov was mapped to pages */
1628 for (; pages > 0; pages--)
1629 put_page(sg_page(&sgout[pages]));
1630 exit_free:
1631 kfree(mem);
1632 exit_free_skb:
1633 consume_skb(clear_skb);
1634 return err;
1635 }
1636
1637 static int
tls_decrypt_sw(struct sock * sk,struct tls_context * tls_ctx,struct msghdr * msg,struct tls_decrypt_arg * darg)1638 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1639 struct msghdr *msg, struct tls_decrypt_arg *darg)
1640 {
1641 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1642 struct tls_prot_info *prot = &tls_ctx->prot_info;
1643 struct strp_msg *rxm;
1644 int pad, err;
1645
1646 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1647 if (err < 0) {
1648 if (err == -EBADMSG)
1649 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1650 return err;
1651 }
1652 /* keep going even for ->async, the code below is TLS 1.3 */
1653
1654 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1655 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1656 darg->tail != TLS_RECORD_TYPE_DATA)) {
1657 darg->zc = false;
1658 if (!darg->tail)
1659 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1660 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1661 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1662 }
1663
1664 pad = tls_padding_length(prot, darg->skb, darg);
1665 if (pad < 0) {
1666 if (darg->skb != tls_strp_msg(ctx))
1667 consume_skb(darg->skb);
1668 return pad;
1669 }
1670
1671 rxm = strp_msg(darg->skb);
1672 rxm->full_len -= pad;
1673
1674 return 0;
1675 }
1676
1677 static int
tls_decrypt_device(struct sock * sk,struct msghdr * msg,struct tls_context * tls_ctx,struct tls_decrypt_arg * darg)1678 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1679 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1680 {
1681 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1682 struct tls_prot_info *prot = &tls_ctx->prot_info;
1683 struct strp_msg *rxm;
1684 int pad, err;
1685
1686 if (tls_ctx->rx_conf != TLS_HW)
1687 return 0;
1688
1689 err = tls_device_decrypted(sk, tls_ctx);
1690 if (err <= 0)
1691 return err;
1692
1693 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1694 if (pad < 0)
1695 return pad;
1696
1697 darg->async = false;
1698 darg->skb = tls_strp_msg(ctx);
1699 /* ->zc downgrade check, in case TLS 1.3 gets here */
1700 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1701 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1702
1703 rxm = strp_msg(darg->skb);
1704 rxm->full_len -= pad;
1705
1706 if (!darg->zc) {
1707 /* Non-ZC case needs a real skb */
1708 darg->skb = tls_strp_msg_detach(ctx);
1709 if (!darg->skb)
1710 return -ENOMEM;
1711 } else {
1712 unsigned int off, len;
1713
1714 /* In ZC case nobody cares about the output skb.
1715 * Just copy the data here. Note the skb is not fully trimmed.
1716 */
1717 off = rxm->offset + prot->prepend_size;
1718 len = rxm->full_len - prot->overhead_size;
1719
1720 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1721 if (err)
1722 return err;
1723 }
1724 return 1;
1725 }
1726
tls_check_pending_rekey(struct sock * sk,struct tls_context * ctx,struct sk_buff * skb)1727 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
1728 struct sk_buff *skb)
1729 {
1730 const struct strp_msg *rxm = strp_msg(skb);
1731 const struct tls_msg *tlm = tls_msg(skb);
1732 char hs_type;
1733 int err;
1734
1735 if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
1736 return 0;
1737
1738 if (rxm->full_len < 1)
1739 return 0;
1740
1741 err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
1742 if (err < 0) {
1743 DEBUG_NET_WARN_ON_ONCE(1);
1744 return err;
1745 }
1746
1747 if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
1748 struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
1749
1750 WRITE_ONCE(rx_ctx->key_update_pending, true);
1751 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
1752 }
1753
1754 return 0;
1755 }
1756
tls_rx_one_record(struct sock * sk,struct msghdr * msg,struct tls_decrypt_arg * darg)1757 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1758 struct tls_decrypt_arg *darg)
1759 {
1760 struct tls_context *tls_ctx = tls_get_ctx(sk);
1761 struct tls_prot_info *prot = &tls_ctx->prot_info;
1762 struct strp_msg *rxm;
1763 int err;
1764
1765 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1766 if (!err)
1767 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1768 if (err < 0)
1769 return err;
1770
1771 rxm = strp_msg(darg->skb);
1772 rxm->offset += prot->prepend_size;
1773 rxm->full_len -= prot->overhead_size;
1774 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1775
1776 return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
1777 }
1778
decrypt_skb(struct sock * sk,struct scatterlist * sgout)1779 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1780 {
1781 struct tls_decrypt_arg darg = { .zc = true, };
1782
1783 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1784 }
1785
tls_record_content_type(struct msghdr * msg,struct tls_msg * tlm,u8 * control)1786 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1787 u8 *control)
1788 {
1789 int err;
1790
1791 if (!*control) {
1792 *control = tlm->control;
1793 if (!*control)
1794 return -EBADMSG;
1795
1796 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1797 sizeof(*control), control);
1798 if (*control != TLS_RECORD_TYPE_DATA) {
1799 if (err || msg->msg_flags & MSG_CTRUNC)
1800 return -EIO;
1801 }
1802 } else if (*control != tlm->control) {
1803 return 0;
1804 }
1805
1806 return 1;
1807 }
1808
tls_rx_rec_done(struct tls_sw_context_rx * ctx)1809 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1810 {
1811 tls_strp_msg_done(&ctx->strp);
1812 }
1813
1814 /* This function traverses the rx_list in tls receive context to copies the
1815 * decrypted records into the buffer provided by caller zero copy is not
1816 * true. Further, the records are removed from the rx_list if it is not a peek
1817 * case and the record has been consumed completely.
1818 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,size_t skip,size_t len,bool is_peek,bool * more)1819 static int process_rx_list(struct tls_sw_context_rx *ctx,
1820 struct msghdr *msg,
1821 u8 *control,
1822 size_t skip,
1823 size_t len,
1824 bool is_peek,
1825 bool *more)
1826 {
1827 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1828 struct tls_msg *tlm;
1829 ssize_t copied = 0;
1830 int err;
1831
1832 while (skip && skb) {
1833 struct strp_msg *rxm = strp_msg(skb);
1834 tlm = tls_msg(skb);
1835
1836 err = tls_record_content_type(msg, tlm, control);
1837 if (err <= 0)
1838 goto more;
1839
1840 if (skip < rxm->full_len)
1841 break;
1842
1843 skip = skip - rxm->full_len;
1844 skb = skb_peek_next(skb, &ctx->rx_list);
1845 }
1846
1847 while (len && skb) {
1848 struct sk_buff *next_skb;
1849 struct strp_msg *rxm = strp_msg(skb);
1850 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1851
1852 tlm = tls_msg(skb);
1853
1854 err = tls_record_content_type(msg, tlm, control);
1855 if (err <= 0)
1856 goto more;
1857
1858 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1859 msg, chunk);
1860 if (err < 0)
1861 goto more;
1862
1863 len = len - chunk;
1864 copied = copied + chunk;
1865
1866 /* Consume the data from record if it is non-peek case*/
1867 if (!is_peek) {
1868 rxm->offset = rxm->offset + chunk;
1869 rxm->full_len = rxm->full_len - chunk;
1870
1871 /* Return if there is unconsumed data in the record */
1872 if (rxm->full_len - skip)
1873 break;
1874 }
1875
1876 /* The remaining skip-bytes must lie in 1st record in rx_list.
1877 * So from the 2nd record, 'skip' should be 0.
1878 */
1879 skip = 0;
1880
1881 if (msg)
1882 msg->msg_flags |= MSG_EOR;
1883
1884 next_skb = skb_peek_next(skb, &ctx->rx_list);
1885
1886 if (!is_peek) {
1887 __skb_unlink(skb, &ctx->rx_list);
1888 consume_skb(skb);
1889 }
1890
1891 skb = next_skb;
1892 }
1893 err = 0;
1894
1895 out:
1896 return copied ? : err;
1897 more:
1898 if (more)
1899 *more = true;
1900 goto out;
1901 }
1902
1903 static bool
tls_read_flush_backlog(struct sock * sk,struct tls_prot_info * prot,size_t len_left,size_t decrypted,ssize_t done,size_t * flushed_at)1904 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1905 size_t len_left, size_t decrypted, ssize_t done,
1906 size_t *flushed_at)
1907 {
1908 size_t max_rec;
1909
1910 if (len_left <= decrypted)
1911 return false;
1912
1913 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1914 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1915 return false;
1916
1917 *flushed_at = done;
1918 return sk_flush_backlog(sk);
1919 }
1920
tls_rx_reader_acquire(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1921 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1922 bool nonblock)
1923 {
1924 long timeo;
1925 int ret;
1926
1927 timeo = sock_rcvtimeo(sk, nonblock);
1928
1929 while (unlikely(ctx->reader_present)) {
1930 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1931
1932 ctx->reader_contended = 1;
1933
1934 add_wait_queue(&ctx->wq, &wait);
1935 ret = sk_wait_event(sk, &timeo,
1936 !READ_ONCE(ctx->reader_present), &wait);
1937 remove_wait_queue(&ctx->wq, &wait);
1938
1939 if (timeo <= 0)
1940 return -EAGAIN;
1941 if (signal_pending(current))
1942 return sock_intr_errno(timeo);
1943 if (ret < 0)
1944 return ret;
1945 }
1946
1947 WRITE_ONCE(ctx->reader_present, 1);
1948
1949 return 0;
1950 }
1951
tls_rx_reader_lock(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1952 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1953 bool nonblock)
1954 {
1955 int err;
1956
1957 lock_sock(sk);
1958 err = tls_rx_reader_acquire(sk, ctx, nonblock);
1959 if (err)
1960 release_sock(sk);
1961 return err;
1962 }
1963
tls_rx_reader_release(struct sock * sk,struct tls_sw_context_rx * ctx)1964 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
1965 {
1966 if (unlikely(ctx->reader_contended)) {
1967 if (wq_has_sleeper(&ctx->wq))
1968 wake_up(&ctx->wq);
1969 else
1970 ctx->reader_contended = 0;
1971
1972 WARN_ON_ONCE(!ctx->reader_present);
1973 }
1974
1975 WRITE_ONCE(ctx->reader_present, 0);
1976 }
1977
tls_rx_reader_unlock(struct sock * sk,struct tls_sw_context_rx * ctx)1978 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
1979 {
1980 tls_rx_reader_release(sk, ctx);
1981 release_sock(sk);
1982 }
1983
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)1984 int tls_sw_recvmsg(struct sock *sk,
1985 struct msghdr *msg,
1986 size_t len,
1987 int flags,
1988 int *addr_len)
1989 {
1990 struct tls_context *tls_ctx = tls_get_ctx(sk);
1991 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1992 struct tls_prot_info *prot = &tls_ctx->prot_info;
1993 ssize_t decrypted = 0, async_copy_bytes = 0;
1994 struct sk_psock *psock;
1995 unsigned char control = 0;
1996 size_t flushed_at = 0;
1997 struct strp_msg *rxm;
1998 struct tls_msg *tlm;
1999 ssize_t copied = 0;
2000 ssize_t peeked = 0;
2001 bool async = false;
2002 int target, err;
2003 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2004 bool is_peek = flags & MSG_PEEK;
2005 bool rx_more = false;
2006 bool released = true;
2007 bool bpf_strp_enabled;
2008 bool zc_capable;
2009
2010 if (unlikely(flags & MSG_ERRQUEUE))
2011 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2012
2013 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2014 if (err < 0)
2015 return err;
2016 psock = sk_psock_get(sk);
2017 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2018
2019 /* If crypto failed the connection is broken */
2020 err = ctx->async_wait.err;
2021 if (err)
2022 goto end;
2023
2024 /* Process pending decrypted records. It must be non-zero-copy */
2025 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2026 if (err < 0)
2027 goto end;
2028
2029 copied = err;
2030 if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
2031 goto end;
2032
2033 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2034 len = len - copied;
2035
2036 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2037 ctx->zc_capable;
2038 decrypted = 0;
2039 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2040 struct tls_decrypt_arg darg;
2041 int to_decrypt, chunk;
2042
2043 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2044 released);
2045 if (err <= 0) {
2046 if (psock) {
2047 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2048 flags);
2049 if (chunk > 0) {
2050 decrypted += chunk;
2051 len -= chunk;
2052 continue;
2053 }
2054 }
2055 goto recv_end;
2056 }
2057
2058 memset(&darg.inargs, 0, sizeof(darg.inargs));
2059
2060 rxm = strp_msg(tls_strp_msg(ctx));
2061 tlm = tls_msg(tls_strp_msg(ctx));
2062
2063 to_decrypt = rxm->full_len - prot->overhead_size;
2064
2065 if (zc_capable && to_decrypt <= len &&
2066 tlm->control == TLS_RECORD_TYPE_DATA)
2067 darg.zc = true;
2068
2069 /* Do not use async mode if record is non-data */
2070 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2071 darg.async = ctx->async_capable;
2072 else
2073 darg.async = false;
2074
2075 err = tls_rx_one_record(sk, msg, &darg);
2076 if (err < 0) {
2077 tls_err_abort(sk, -EBADMSG);
2078 goto recv_end;
2079 }
2080
2081 async |= darg.async;
2082
2083 /* If the type of records being processed is not known yet,
2084 * set it to record type just dequeued. If it is already known,
2085 * but does not match the record type just dequeued, go to end.
2086 * We always get record type here since for tls1.2, record type
2087 * is known just after record is dequeued from stream parser.
2088 * For tls1.3, we disable async.
2089 */
2090 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2091 if (err <= 0) {
2092 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2093 tls_rx_rec_done(ctx);
2094 put_on_rx_list_err:
2095 __skb_queue_tail(&ctx->rx_list, darg.skb);
2096 goto recv_end;
2097 }
2098
2099 /* periodically flush backlog, and feed strparser */
2100 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2101 decrypted + copied,
2102 &flushed_at);
2103
2104 /* TLS 1.3 may have updated the length by more than overhead */
2105 rxm = strp_msg(darg.skb);
2106 chunk = rxm->full_len;
2107 tls_rx_rec_done(ctx);
2108
2109 if (!darg.zc) {
2110 bool partially_consumed = chunk > len;
2111 struct sk_buff *skb = darg.skb;
2112
2113 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2114
2115 if (async) {
2116 /* TLS 1.2-only, to_decrypt must be text len */
2117 chunk = min_t(int, to_decrypt, len);
2118 async_copy_bytes += chunk;
2119 put_on_rx_list:
2120 decrypted += chunk;
2121 len -= chunk;
2122 __skb_queue_tail(&ctx->rx_list, skb);
2123 if (unlikely(control != TLS_RECORD_TYPE_DATA))
2124 break;
2125 continue;
2126 }
2127
2128 if (bpf_strp_enabled) {
2129 released = true;
2130 err = sk_psock_tls_strp_read(psock, skb);
2131 if (err != __SK_PASS) {
2132 rxm->offset = rxm->offset + rxm->full_len;
2133 rxm->full_len = 0;
2134 if (err == __SK_DROP)
2135 consume_skb(skb);
2136 continue;
2137 }
2138 }
2139
2140 if (partially_consumed)
2141 chunk = len;
2142
2143 err = skb_copy_datagram_msg(skb, rxm->offset,
2144 msg, chunk);
2145 if (err < 0)
2146 goto put_on_rx_list_err;
2147
2148 if (is_peek) {
2149 peeked += chunk;
2150 goto put_on_rx_list;
2151 }
2152
2153 if (partially_consumed) {
2154 rxm->offset += chunk;
2155 rxm->full_len -= chunk;
2156 goto put_on_rx_list;
2157 }
2158
2159 consume_skb(skb);
2160 }
2161
2162 decrypted += chunk;
2163 len -= chunk;
2164
2165 /* Return full control message to userspace before trying
2166 * to parse another message type
2167 */
2168 msg->msg_flags |= MSG_EOR;
2169 if (control != TLS_RECORD_TYPE_DATA)
2170 break;
2171 }
2172
2173 recv_end:
2174 if (async) {
2175 int ret;
2176
2177 /* Wait for all previously submitted records to be decrypted */
2178 ret = tls_decrypt_async_wait(ctx);
2179 __skb_queue_purge(&ctx->async_hold);
2180
2181 if (ret) {
2182 if (err >= 0 || err == -EINPROGRESS)
2183 err = ret;
2184 goto end;
2185 }
2186
2187 /* Drain records from the rx_list & copy if required */
2188 if (is_peek)
2189 err = process_rx_list(ctx, msg, &control, copied + peeked,
2190 decrypted - peeked, is_peek, NULL);
2191 else
2192 err = process_rx_list(ctx, msg, &control, 0,
2193 async_copy_bytes, is_peek, NULL);
2194
2195 /* we could have copied less than we wanted, and possibly nothing */
2196 decrypted += max(err, 0) - async_copy_bytes;
2197 }
2198
2199 copied += decrypted;
2200
2201 end:
2202 tls_rx_reader_unlock(sk, ctx);
2203 if (psock)
2204 sk_psock_put(sk, psock);
2205 return copied ? : err;
2206 }
2207
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2208 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2209 struct pipe_inode_info *pipe,
2210 size_t len, unsigned int flags)
2211 {
2212 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2213 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2214 struct strp_msg *rxm = NULL;
2215 struct sock *sk = sock->sk;
2216 struct tls_msg *tlm;
2217 struct sk_buff *skb;
2218 ssize_t copied = 0;
2219 int chunk;
2220 int err;
2221
2222 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2223 if (err < 0)
2224 return err;
2225
2226 if (!skb_queue_empty(&ctx->rx_list)) {
2227 skb = __skb_dequeue(&ctx->rx_list);
2228 } else {
2229 struct tls_decrypt_arg darg;
2230
2231 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2232 true);
2233 if (err <= 0)
2234 goto splice_read_end;
2235
2236 memset(&darg.inargs, 0, sizeof(darg.inargs));
2237
2238 err = tls_rx_one_record(sk, NULL, &darg);
2239 if (err < 0) {
2240 tls_err_abort(sk, -EBADMSG);
2241 goto splice_read_end;
2242 }
2243
2244 tls_rx_rec_done(ctx);
2245 skb = darg.skb;
2246 }
2247
2248 rxm = strp_msg(skb);
2249 tlm = tls_msg(skb);
2250
2251 /* splice does not support reading control messages */
2252 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2253 err = -EINVAL;
2254 goto splice_requeue;
2255 }
2256
2257 chunk = min_t(unsigned int, rxm->full_len, len);
2258 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2259 if (copied < 0)
2260 goto splice_requeue;
2261
2262 if (chunk < rxm->full_len) {
2263 rxm->offset += len;
2264 rxm->full_len -= len;
2265 goto splice_requeue;
2266 }
2267
2268 consume_skb(skb);
2269
2270 splice_read_end:
2271 tls_rx_reader_unlock(sk, ctx);
2272 return copied ? : err;
2273
2274 splice_requeue:
2275 __skb_queue_head(&ctx->rx_list, skb);
2276 goto splice_read_end;
2277 }
2278
tls_sw_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t read_actor)2279 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2280 sk_read_actor_t read_actor)
2281 {
2282 struct tls_context *tls_ctx = tls_get_ctx(sk);
2283 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2284 struct tls_prot_info *prot = &tls_ctx->prot_info;
2285 struct strp_msg *rxm = NULL;
2286 struct sk_buff *skb = NULL;
2287 struct sk_psock *psock;
2288 size_t flushed_at = 0;
2289 bool released = true;
2290 struct tls_msg *tlm;
2291 ssize_t copied = 0;
2292 ssize_t decrypted;
2293 int err, used;
2294
2295 psock = sk_psock_get(sk);
2296 if (psock) {
2297 sk_psock_put(sk, psock);
2298 return -EINVAL;
2299 }
2300 err = tls_rx_reader_acquire(sk, ctx, true);
2301 if (err < 0)
2302 return err;
2303
2304 /* If crypto failed the connection is broken */
2305 err = ctx->async_wait.err;
2306 if (err)
2307 goto read_sock_end;
2308
2309 decrypted = 0;
2310 do {
2311 if (!skb_queue_empty(&ctx->rx_list)) {
2312 skb = __skb_dequeue(&ctx->rx_list);
2313 rxm = strp_msg(skb);
2314 tlm = tls_msg(skb);
2315 } else {
2316 struct tls_decrypt_arg darg;
2317
2318 err = tls_rx_rec_wait(sk, NULL, true, released);
2319 if (err <= 0)
2320 goto read_sock_end;
2321
2322 memset(&darg.inargs, 0, sizeof(darg.inargs));
2323
2324 err = tls_rx_one_record(sk, NULL, &darg);
2325 if (err < 0) {
2326 tls_err_abort(sk, -EBADMSG);
2327 goto read_sock_end;
2328 }
2329
2330 released = tls_read_flush_backlog(sk, prot, INT_MAX,
2331 0, decrypted,
2332 &flushed_at);
2333 skb = darg.skb;
2334 rxm = strp_msg(skb);
2335 tlm = tls_msg(skb);
2336 decrypted += rxm->full_len;
2337
2338 tls_rx_rec_done(ctx);
2339 }
2340
2341 /* read_sock does not support reading control messages */
2342 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2343 err = -EINVAL;
2344 goto read_sock_requeue;
2345 }
2346
2347 used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2348 if (used <= 0) {
2349 if (!copied)
2350 err = used;
2351 goto read_sock_requeue;
2352 }
2353 copied += used;
2354 if (used < rxm->full_len) {
2355 rxm->offset += used;
2356 rxm->full_len -= used;
2357 if (!desc->count)
2358 goto read_sock_requeue;
2359 } else {
2360 consume_skb(skb);
2361 if (!desc->count)
2362 skb = NULL;
2363 }
2364 } while (skb);
2365
2366 read_sock_end:
2367 tls_rx_reader_release(sk, ctx);
2368 return copied ? : err;
2369
2370 read_sock_requeue:
2371 __skb_queue_head(&ctx->rx_list, skb);
2372 goto read_sock_end;
2373 }
2374
tls_sw_sock_is_readable(struct sock * sk)2375 bool tls_sw_sock_is_readable(struct sock *sk)
2376 {
2377 struct tls_context *tls_ctx = tls_get_ctx(sk);
2378 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2379 bool ingress_empty = true;
2380 struct sk_psock *psock;
2381
2382 rcu_read_lock();
2383 psock = sk_psock(sk);
2384 if (psock)
2385 ingress_empty = list_empty(&psock->ingress_msg);
2386 rcu_read_unlock();
2387
2388 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2389 !skb_queue_empty(&ctx->rx_list);
2390 }
2391
tls_rx_msg_size(struct tls_strparser * strp,struct sk_buff * skb)2392 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2393 {
2394 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2395 struct tls_prot_info *prot = &tls_ctx->prot_info;
2396 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
2397 size_t cipher_overhead;
2398 size_t data_len = 0;
2399 int ret;
2400
2401 /* Verify that we have a full TLS header, or wait for more data */
2402 if (strp->stm.offset + prot->prepend_size > skb->len)
2403 return 0;
2404
2405 /* Sanity-check size of on-stack buffer. */
2406 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2407 ret = -EINVAL;
2408 goto read_failure;
2409 }
2410
2411 /* Linearize header to local buffer */
2412 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2413 if (ret < 0)
2414 goto read_failure;
2415
2416 strp->mark = header[0];
2417
2418 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2419
2420 cipher_overhead = prot->tag_size;
2421 if (prot->version != TLS_1_3_VERSION &&
2422 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2423 cipher_overhead += prot->iv_size;
2424
2425 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2426 prot->tail_size) {
2427 ret = -EMSGSIZE;
2428 goto read_failure;
2429 }
2430 if (data_len < cipher_overhead) {
2431 ret = -EBADMSG;
2432 goto read_failure;
2433 }
2434
2435 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2436 if (header[1] != TLS_1_2_VERSION_MINOR ||
2437 header[2] != TLS_1_2_VERSION_MAJOR) {
2438 ret = -EINVAL;
2439 goto read_failure;
2440 }
2441
2442 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2443 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2444 return data_len + TLS_HEADER_SIZE;
2445
2446 read_failure:
2447 tls_err_abort(strp->sk, ret);
2448
2449 return ret;
2450 }
2451
tls_rx_msg_ready(struct tls_strparser * strp)2452 void tls_rx_msg_ready(struct tls_strparser *strp)
2453 {
2454 struct tls_sw_context_rx *ctx;
2455
2456 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2457 ctx->saved_data_ready(strp->sk);
2458 }
2459
tls_data_ready(struct sock * sk)2460 static void tls_data_ready(struct sock *sk)
2461 {
2462 struct tls_context *tls_ctx = tls_get_ctx(sk);
2463 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2464 struct sk_psock *psock;
2465 gfp_t alloc_save;
2466
2467 trace_sk_data_ready(sk);
2468
2469 alloc_save = sk->sk_allocation;
2470 sk->sk_allocation = GFP_ATOMIC;
2471 tls_strp_data_ready(&ctx->strp);
2472 sk->sk_allocation = alloc_save;
2473
2474 psock = sk_psock_get(sk);
2475 if (psock) {
2476 if (!list_empty(&psock->ingress_msg))
2477 ctx->saved_data_ready(sk);
2478 sk_psock_put(sk, psock);
2479 }
2480 }
2481
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2482 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2483 {
2484 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2485
2486 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2487 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2488 cancel_delayed_work_sync(&ctx->tx_work.work);
2489 }
2490
tls_sw_release_resources_tx(struct sock * sk)2491 void tls_sw_release_resources_tx(struct sock *sk)
2492 {
2493 struct tls_context *tls_ctx = tls_get_ctx(sk);
2494 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2495 struct tls_rec *rec, *tmp;
2496
2497 /* Wait for any pending async encryptions to complete */
2498 tls_encrypt_async_wait(ctx);
2499
2500 tls_tx_records(sk, -1);
2501
2502 /* Free up un-sent records in tx_list. First, free
2503 * the partially sent record if any at head of tx_list.
2504 */
2505 if (tls_ctx->partially_sent_record) {
2506 tls_free_partial_record(sk, tls_ctx);
2507 rec = list_first_entry(&ctx->tx_list,
2508 struct tls_rec, list);
2509 list_del(&rec->list);
2510 sk_msg_free(sk, &rec->msg_plaintext);
2511 kfree(rec);
2512 }
2513
2514 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2515 list_del(&rec->list);
2516 sk_msg_free(sk, &rec->msg_encrypted);
2517 sk_msg_free(sk, &rec->msg_plaintext);
2518 kfree(rec);
2519 }
2520
2521 crypto_free_aead(ctx->aead_send);
2522 tls_free_open_rec(sk);
2523 }
2524
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2525 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2526 {
2527 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2528
2529 kfree(ctx);
2530 }
2531
tls_sw_release_resources_rx(struct sock * sk)2532 void tls_sw_release_resources_rx(struct sock *sk)
2533 {
2534 struct tls_context *tls_ctx = tls_get_ctx(sk);
2535 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2536
2537 if (ctx->aead_recv) {
2538 __skb_queue_purge(&ctx->rx_list);
2539 crypto_free_aead(ctx->aead_recv);
2540 tls_strp_stop(&ctx->strp);
2541 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2542 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2543 * never swapped.
2544 */
2545 if (ctx->saved_data_ready) {
2546 write_lock_bh(&sk->sk_callback_lock);
2547 sk->sk_data_ready = ctx->saved_data_ready;
2548 write_unlock_bh(&sk->sk_callback_lock);
2549 }
2550 }
2551 }
2552
tls_sw_strparser_done(struct tls_context * tls_ctx)2553 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2554 {
2555 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2556
2557 tls_strp_done(&ctx->strp);
2558 }
2559
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2560 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2561 {
2562 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2563
2564 kfree(ctx);
2565 }
2566
tls_sw_free_resources_rx(struct sock * sk)2567 void tls_sw_free_resources_rx(struct sock *sk)
2568 {
2569 struct tls_context *tls_ctx = tls_get_ctx(sk);
2570
2571 tls_sw_release_resources_rx(sk);
2572 tls_sw_free_ctx_rx(tls_ctx);
2573 }
2574
2575 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2576 static void tx_work_handler(struct work_struct *work)
2577 {
2578 struct delayed_work *delayed_work = to_delayed_work(work);
2579 struct tx_work *tx_work = container_of(delayed_work,
2580 struct tx_work, work);
2581 struct sock *sk = tx_work->sk;
2582 struct tls_context *tls_ctx = tls_get_ctx(sk);
2583 struct tls_sw_context_tx *ctx;
2584
2585 if (unlikely(!tls_ctx))
2586 return;
2587
2588 ctx = tls_sw_ctx_tx(tls_ctx);
2589 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2590 return;
2591
2592 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2593 return;
2594
2595 if (mutex_trylock(&tls_ctx->tx_lock)) {
2596 lock_sock(sk);
2597 tls_tx_records(sk, -1);
2598 release_sock(sk);
2599 mutex_unlock(&tls_ctx->tx_lock);
2600 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2601 /* Someone is holding the tx_lock, they will likely run Tx
2602 * and cancel the work on their way out of the lock section.
2603 * Schedule a long delay just in case.
2604 */
2605 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2606 }
2607 }
2608
tls_is_tx_ready(struct tls_sw_context_tx * ctx)2609 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2610 {
2611 struct tls_rec *rec;
2612
2613 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2614 if (!rec)
2615 return false;
2616
2617 return READ_ONCE(rec->tx_ready);
2618 }
2619
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2620 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2621 {
2622 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2623
2624 /* Schedule the transmission if tx list is ready */
2625 if (tls_is_tx_ready(tx_ctx) &&
2626 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2627 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2628 }
2629
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2630 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2631 {
2632 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2633
2634 write_lock_bh(&sk->sk_callback_lock);
2635 rx_ctx->saved_data_ready = sk->sk_data_ready;
2636 sk->sk_data_ready = tls_data_ready;
2637 write_unlock_bh(&sk->sk_callback_lock);
2638 }
2639
tls_update_rx_zc_capable(struct tls_context * tls_ctx)2640 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2641 {
2642 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2643
2644 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2645 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2646 }
2647
init_ctx_tx(struct tls_context * ctx,struct sock * sk)2648 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2649 {
2650 struct tls_sw_context_tx *sw_ctx_tx;
2651
2652 if (!ctx->priv_ctx_tx) {
2653 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2654 if (!sw_ctx_tx)
2655 return NULL;
2656 } else {
2657 sw_ctx_tx = ctx->priv_ctx_tx;
2658 }
2659
2660 crypto_init_wait(&sw_ctx_tx->async_wait);
2661 atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2662 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2663 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2664 sw_ctx_tx->tx_work.sk = sk;
2665
2666 return sw_ctx_tx;
2667 }
2668
init_ctx_rx(struct tls_context * ctx)2669 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2670 {
2671 struct tls_sw_context_rx *sw_ctx_rx;
2672
2673 if (!ctx->priv_ctx_rx) {
2674 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2675 if (!sw_ctx_rx)
2676 return NULL;
2677 } else {
2678 sw_ctx_rx = ctx->priv_ctx_rx;
2679 }
2680
2681 crypto_init_wait(&sw_ctx_rx->async_wait);
2682 atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2683 init_waitqueue_head(&sw_ctx_rx->wq);
2684 skb_queue_head_init(&sw_ctx_rx->rx_list);
2685 skb_queue_head_init(&sw_ctx_rx->async_hold);
2686
2687 return sw_ctx_rx;
2688 }
2689
init_prot_info(struct tls_prot_info * prot,const struct tls_crypto_info * crypto_info,const struct tls_cipher_desc * cipher_desc)2690 int init_prot_info(struct tls_prot_info *prot,
2691 const struct tls_crypto_info *crypto_info,
2692 const struct tls_cipher_desc *cipher_desc)
2693 {
2694 u16 nonce_size = cipher_desc->nonce;
2695
2696 if (crypto_info->version == TLS_1_3_VERSION) {
2697 nonce_size = 0;
2698 prot->aad_size = TLS_HEADER_SIZE;
2699 prot->tail_size = 1;
2700 } else {
2701 prot->aad_size = TLS_AAD_SPACE_SIZE;
2702 prot->tail_size = 0;
2703 }
2704
2705 /* Sanity-check the sizes for stack allocations. */
2706 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
2707 return -EINVAL;
2708
2709 prot->version = crypto_info->version;
2710 prot->cipher_type = crypto_info->cipher_type;
2711 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2712 prot->tag_size = cipher_desc->tag;
2713 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
2714 prot->iv_size = cipher_desc->iv;
2715 prot->salt_size = cipher_desc->salt;
2716 prot->rec_seq_size = cipher_desc->rec_seq;
2717
2718 return 0;
2719 }
2720
tls_finish_key_update(struct sock * sk,struct tls_context * tls_ctx)2721 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
2722 {
2723 struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
2724
2725 WRITE_ONCE(ctx->key_update_pending, false);
2726 /* wake-up pre-existing poll() */
2727 ctx->saved_data_ready(sk);
2728 }
2729
tls_set_sw_offload(struct sock * sk,int tx,struct tls_crypto_info * new_crypto_info)2730 int tls_set_sw_offload(struct sock *sk, int tx,
2731 struct tls_crypto_info *new_crypto_info)
2732 {
2733 struct tls_crypto_info *crypto_info, *src_crypto_info;
2734 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2735 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2736 const struct tls_cipher_desc *cipher_desc;
2737 char *iv, *rec_seq, *key, *salt;
2738 struct cipher_context *cctx;
2739 struct tls_prot_info *prot;
2740 struct crypto_aead **aead;
2741 struct tls_context *ctx;
2742 struct crypto_tfm *tfm;
2743 int rc = 0;
2744
2745 ctx = tls_get_ctx(sk);
2746 prot = &ctx->prot_info;
2747
2748 /* new_crypto_info != NULL means rekey */
2749 if (!new_crypto_info) {
2750 if (tx) {
2751 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2752 if (!ctx->priv_ctx_tx)
2753 return -ENOMEM;
2754 } else {
2755 ctx->priv_ctx_rx = init_ctx_rx(ctx);
2756 if (!ctx->priv_ctx_rx)
2757 return -ENOMEM;
2758 }
2759 }
2760
2761 if (tx) {
2762 sw_ctx_tx = ctx->priv_ctx_tx;
2763 crypto_info = &ctx->crypto_send.info;
2764 cctx = &ctx->tx;
2765 aead = &sw_ctx_tx->aead_send;
2766 } else {
2767 sw_ctx_rx = ctx->priv_ctx_rx;
2768 crypto_info = &ctx->crypto_recv.info;
2769 cctx = &ctx->rx;
2770 aead = &sw_ctx_rx->aead_recv;
2771 }
2772
2773 src_crypto_info = new_crypto_info ?: crypto_info;
2774
2775 cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
2776 if (!cipher_desc) {
2777 rc = -EINVAL;
2778 goto free_priv;
2779 }
2780
2781 rc = init_prot_info(prot, src_crypto_info, cipher_desc);
2782 if (rc)
2783 goto free_priv;
2784
2785 iv = crypto_info_iv(src_crypto_info, cipher_desc);
2786 key = crypto_info_key(src_crypto_info, cipher_desc);
2787 salt = crypto_info_salt(src_crypto_info, cipher_desc);
2788 rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
2789
2790 if (!*aead) {
2791 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
2792 if (IS_ERR(*aead)) {
2793 rc = PTR_ERR(*aead);
2794 *aead = NULL;
2795 goto free_priv;
2796 }
2797 }
2798
2799 ctx->push_pending_record = tls_sw_push_pending_record;
2800
2801 /* setkey is the last operation that could fail during a
2802 * rekey. if it succeeds, we can start modifying the
2803 * context.
2804 */
2805 rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
2806 if (rc) {
2807 if (new_crypto_info)
2808 goto out;
2809 else
2810 goto free_aead;
2811 }
2812
2813 if (!new_crypto_info) {
2814 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2815 if (rc)
2816 goto free_aead;
2817 }
2818
2819 if (!tx && !new_crypto_info) {
2820 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2821
2822 tls_update_rx_zc_capable(ctx);
2823 sw_ctx_rx->async_capable =
2824 src_crypto_info->version != TLS_1_3_VERSION &&
2825 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2826
2827 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2828 if (rc)
2829 goto free_aead;
2830 }
2831
2832 memcpy(cctx->iv, salt, cipher_desc->salt);
2833 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
2834 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
2835
2836 if (new_crypto_info) {
2837 unsafe_memcpy(crypto_info, new_crypto_info,
2838 cipher_desc->crypto_info,
2839 /* size was checked in do_tls_setsockopt_conf */);
2840 memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
2841 if (!tx)
2842 tls_finish_key_update(sk, ctx);
2843 }
2844
2845 goto out;
2846
2847 free_aead:
2848 crypto_free_aead(*aead);
2849 *aead = NULL;
2850 free_priv:
2851 if (!new_crypto_info) {
2852 if (tx) {
2853 kfree(ctx->priv_ctx_tx);
2854 ctx->priv_ctx_tx = NULL;
2855 } else {
2856 kfree(ctx->priv_ctx_rx);
2857 ctx->priv_ctx_rx = NULL;
2858 }
2859 }
2860 out:
2861 return rc;
2862 }
2863