1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48
49 #include "tls.h"
50
51 struct tls_decrypt_arg {
52 struct_group(inargs,
53 bool zc;
54 bool async;
55 bool async_done;
56 u8 tail;
57 );
58
59 struct sk_buff *skb;
60 };
61
62 struct tls_decrypt_ctx {
63 struct sock *sk;
64 u8 iv[TLS_MAX_IV_SIZE];
65 u8 aad[TLS_MAX_AAD_SIZE];
66 u8 tail;
67 bool free_sgout;
68 struct scatterlist sg[];
69 };
70
tls_err_abort(struct sock * sk,int err)71 noinline void tls_err_abort(struct sock *sk, int err)
72 {
73 WARN_ON_ONCE(err >= 0);
74 /* sk->sk_err should contain a positive error code. */
75 WRITE_ONCE(sk->sk_err, -err);
76 /* Paired with smp_rmb() in tcp_poll() */
77 smp_wmb();
78 sk_error_report(sk);
79 }
80
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)81 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
82 unsigned int recursion_level)
83 {
84 int start = skb_headlen(skb);
85 int i, chunk = start - offset;
86 struct sk_buff *frag_iter;
87 int elt = 0;
88
89 if (unlikely(recursion_level >= 24))
90 return -EMSGSIZE;
91
92 if (chunk > 0) {
93 if (chunk > len)
94 chunk = len;
95 elt++;
96 len -= chunk;
97 if (len == 0)
98 return elt;
99 offset += chunk;
100 }
101
102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 int end;
104
105 WARN_ON(start > offset + len);
106
107 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
108 chunk = end - offset;
109 if (chunk > 0) {
110 if (chunk > len)
111 chunk = len;
112 elt++;
113 len -= chunk;
114 if (len == 0)
115 return elt;
116 offset += chunk;
117 }
118 start = end;
119 }
120
121 if (unlikely(skb_has_frag_list(skb))) {
122 skb_walk_frags(skb, frag_iter) {
123 int end, ret;
124
125 WARN_ON(start > offset + len);
126
127 end = start + frag_iter->len;
128 chunk = end - offset;
129 if (chunk > 0) {
130 if (chunk > len)
131 chunk = len;
132 ret = __skb_nsg(frag_iter, offset - start, chunk,
133 recursion_level + 1);
134 if (unlikely(ret < 0))
135 return ret;
136 elt += ret;
137 len -= chunk;
138 if (len == 0)
139 return elt;
140 offset += chunk;
141 }
142 start = end;
143 }
144 }
145 BUG_ON(len);
146 return elt;
147 }
148
149 /* Return the number of scatterlist elements required to completely map the
150 * skb, or -EMSGSIZE if the recursion depth is exceeded.
151 */
skb_nsg(struct sk_buff * skb,int offset,int len)152 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 {
154 return __skb_nsg(skb, offset, len, 0);
155 }
156
tls_padding_length(struct tls_prot_info * prot,struct sk_buff * skb,struct tls_decrypt_arg * darg)157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
158 struct tls_decrypt_arg *darg)
159 {
160 struct strp_msg *rxm = strp_msg(skb);
161 struct tls_msg *tlm = tls_msg(skb);
162 int sub = 0;
163
164 /* Determine zero-padding length */
165 if (prot->version == TLS_1_3_VERSION) {
166 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
167 char content_type = darg->zc ? darg->tail : 0;
168 int err;
169
170 while (content_type == 0) {
171 if (offset < prot->prepend_size)
172 return -EBADMSG;
173 err = skb_copy_bits(skb, rxm->offset + offset,
174 &content_type, 1);
175 if (err)
176 return err;
177 if (content_type)
178 break;
179 sub++;
180 offset--;
181 }
182 tlm->control = content_type;
183 }
184 return sub;
185 }
186
tls_decrypt_done(void * data,int err)187 static void tls_decrypt_done(void *data, int err)
188 {
189 struct aead_request *aead_req = data;
190 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
191 struct scatterlist *sgout = aead_req->dst;
192 struct tls_sw_context_rx *ctx;
193 struct tls_decrypt_ctx *dctx;
194 struct tls_context *tls_ctx;
195 struct scatterlist *sg;
196 unsigned int pages;
197 struct sock *sk;
198 int aead_size;
199
200 /* If requests get too backlogged crypto API returns -EBUSY and calls
201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 * -EINPROGRESS -> 0.
205 * We have a single struct crypto_async_request per direction, this
206 * scheme doesn't help us, so just ignore the first ->complete().
207 */
208 if (err == -EINPROGRESS)
209 return;
210
211 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
212 aead_size = ALIGN(aead_size, __alignof__(*dctx));
213 dctx = (void *)((u8 *)aead_req + aead_size);
214
215 sk = dctx->sk;
216 tls_ctx = tls_get_ctx(sk);
217 ctx = tls_sw_ctx_rx(tls_ctx);
218
219 /* Propagate if there was an err */
220 if (err) {
221 if (err == -EBADMSG)
222 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
223 ctx->async_wait.err = err;
224 tls_err_abort(sk, err);
225 }
226
227 /* Free the destination pages if skb was not decrypted inplace */
228 if (dctx->free_sgout) {
229 /* Skip the first S/G entry as it points to AAD */
230 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
231 if (!sg)
232 break;
233 put_page(sg_page(sg));
234 }
235 }
236
237 kfree(aead_req);
238
239 if (atomic_dec_and_test(&ctx->decrypt_pending))
240 complete(&ctx->async_wait.completion);
241 }
242
tls_decrypt_async_wait(struct tls_sw_context_rx * ctx)243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 {
245 if (!atomic_dec_and_test(&ctx->decrypt_pending))
246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
247 atomic_inc(&ctx->decrypt_pending);
248
249 return ctx->async_wait.err;
250 }
251
tls_do_decryption(struct sock * sk,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,struct tls_decrypt_arg * darg)252 static int tls_do_decryption(struct sock *sk,
253 struct scatterlist *sgin,
254 struct scatterlist *sgout,
255 char *iv_recv,
256 size_t data_len,
257 struct aead_request *aead_req,
258 struct tls_decrypt_arg *darg)
259 {
260 struct tls_context *tls_ctx = tls_get_ctx(sk);
261 struct tls_prot_info *prot = &tls_ctx->prot_info;
262 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
263 int ret;
264
265 aead_request_set_tfm(aead_req, ctx->aead_recv);
266 aead_request_set_ad(aead_req, prot->aad_size);
267 aead_request_set_crypt(aead_req, sgin, sgout,
268 data_len + prot->tag_size,
269 (u8 *)iv_recv);
270
271 if (darg->async) {
272 aead_request_set_callback(aead_req,
273 CRYPTO_TFM_REQ_MAY_BACKLOG,
274 tls_decrypt_done, aead_req);
275 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
276 atomic_inc(&ctx->decrypt_pending);
277 } else {
278 DECLARE_CRYPTO_WAIT(wait);
279
280 aead_request_set_callback(aead_req,
281 CRYPTO_TFM_REQ_MAY_BACKLOG,
282 crypto_req_done, &wait);
283 ret = crypto_aead_decrypt(aead_req);
284 if (ret == -EINPROGRESS || ret == -EBUSY)
285 ret = crypto_wait_req(ret, &wait);
286 return ret;
287 }
288
289 ret = crypto_aead_decrypt(aead_req);
290 if (ret == -EINPROGRESS)
291 return 0;
292
293 if (ret == -EBUSY) {
294 ret = tls_decrypt_async_wait(ctx);
295 darg->async_done = true;
296 /* all completions have run, we're not doing async anymore */
297 darg->async = false;
298 return ret;
299 }
300
301 atomic_dec(&ctx->decrypt_pending);
302 darg->async = false;
303
304 return ret;
305 }
306
tls_trim_both_msgs(struct sock * sk,int target_size)307 static void tls_trim_both_msgs(struct sock *sk, int target_size)
308 {
309 struct tls_context *tls_ctx = tls_get_ctx(sk);
310 struct tls_prot_info *prot = &tls_ctx->prot_info;
311 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
312 struct tls_rec *rec = ctx->open_rec;
313
314 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
315 if (target_size > 0)
316 target_size += prot->overhead_size;
317 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
318 }
319
tls_alloc_encrypted_msg(struct sock * sk,int len)320 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
321 {
322 struct tls_context *tls_ctx = tls_get_ctx(sk);
323 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
324 struct tls_rec *rec = ctx->open_rec;
325 struct sk_msg *msg_en = &rec->msg_encrypted;
326
327 return sk_msg_alloc(sk, msg_en, len, 0);
328 }
329
tls_clone_plaintext_msg(struct sock * sk,int required)330 static int tls_clone_plaintext_msg(struct sock *sk, int required)
331 {
332 struct tls_context *tls_ctx = tls_get_ctx(sk);
333 struct tls_prot_info *prot = &tls_ctx->prot_info;
334 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
335 struct tls_rec *rec = ctx->open_rec;
336 struct sk_msg *msg_pl = &rec->msg_plaintext;
337 struct sk_msg *msg_en = &rec->msg_encrypted;
338 int skip, len;
339
340 /* We add page references worth len bytes from encrypted sg
341 * at the end of plaintext sg. It is guaranteed that msg_en
342 * has enough required room (ensured by caller).
343 */
344 len = required - msg_pl->sg.size;
345
346 /* Skip initial bytes in msg_en's data to be able to use
347 * same offset of both plain and encrypted data.
348 */
349 skip = prot->prepend_size + msg_pl->sg.size;
350
351 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
352 }
353
tls_get_rec(struct sock * sk)354 static struct tls_rec *tls_get_rec(struct sock *sk)
355 {
356 struct tls_context *tls_ctx = tls_get_ctx(sk);
357 struct tls_prot_info *prot = &tls_ctx->prot_info;
358 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
359 struct sk_msg *msg_pl, *msg_en;
360 struct tls_rec *rec;
361 int mem_size;
362
363 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
364
365 rec = kzalloc(mem_size, sk->sk_allocation);
366 if (!rec)
367 return NULL;
368
369 msg_pl = &rec->msg_plaintext;
370 msg_en = &rec->msg_encrypted;
371
372 sk_msg_init(msg_pl);
373 sk_msg_init(msg_en);
374
375 sg_init_table(rec->sg_aead_in, 2);
376 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
377 sg_unmark_end(&rec->sg_aead_in[1]);
378
379 sg_init_table(rec->sg_aead_out, 2);
380 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
381 sg_unmark_end(&rec->sg_aead_out[1]);
382
383 rec->sk = sk;
384
385 return rec;
386 }
387
tls_free_rec(struct sock * sk,struct tls_rec * rec)388 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
389 {
390 sk_msg_free(sk, &rec->msg_encrypted);
391 sk_msg_free(sk, &rec->msg_plaintext);
392 kfree(rec);
393 }
394
tls_free_open_rec(struct sock * sk)395 static void tls_free_open_rec(struct sock *sk)
396 {
397 struct tls_context *tls_ctx = tls_get_ctx(sk);
398 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
399 struct tls_rec *rec = ctx->open_rec;
400
401 if (rec) {
402 tls_free_rec(sk, rec);
403 ctx->open_rec = NULL;
404 }
405 }
406
tls_tx_records(struct sock * sk,int flags)407 int tls_tx_records(struct sock *sk, int flags)
408 {
409 struct tls_context *tls_ctx = tls_get_ctx(sk);
410 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
411 struct tls_rec *rec, *tmp;
412 struct sk_msg *msg_en;
413 int tx_flags, rc = 0;
414
415 if (tls_is_partially_sent_record(tls_ctx)) {
416 rec = list_first_entry(&ctx->tx_list,
417 struct tls_rec, list);
418
419 if (flags == -1)
420 tx_flags = rec->tx_flags;
421 else
422 tx_flags = flags;
423
424 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
425 if (rc)
426 goto tx_err;
427
428 /* Full record has been transmitted.
429 * Remove the head of tx_list
430 */
431 list_del(&rec->list);
432 sk_msg_free(sk, &rec->msg_plaintext);
433 kfree(rec);
434 }
435
436 /* Tx all ready records */
437 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
438 if (READ_ONCE(rec->tx_ready)) {
439 if (flags == -1)
440 tx_flags = rec->tx_flags;
441 else
442 tx_flags = flags;
443
444 msg_en = &rec->msg_encrypted;
445 rc = tls_push_sg(sk, tls_ctx,
446 &msg_en->sg.data[msg_en->sg.curr],
447 0, tx_flags);
448 if (rc)
449 goto tx_err;
450
451 list_del(&rec->list);
452 sk_msg_free(sk, &rec->msg_plaintext);
453 kfree(rec);
454 } else {
455 break;
456 }
457 }
458
459 tx_err:
460 if (rc < 0 && rc != -EAGAIN)
461 tls_err_abort(sk, rc);
462
463 return rc;
464 }
465
tls_encrypt_done(void * data,int err)466 static void tls_encrypt_done(void *data, int err)
467 {
468 struct tls_sw_context_tx *ctx;
469 struct tls_context *tls_ctx;
470 struct tls_prot_info *prot;
471 struct tls_rec *rec = data;
472 struct scatterlist *sge;
473 struct sk_msg *msg_en;
474 struct sock *sk;
475
476 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
477 return;
478
479 msg_en = &rec->msg_encrypted;
480
481 sk = rec->sk;
482 tls_ctx = tls_get_ctx(sk);
483 prot = &tls_ctx->prot_info;
484 ctx = tls_sw_ctx_tx(tls_ctx);
485
486 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
487 sge->offset -= prot->prepend_size;
488 sge->length += prot->prepend_size;
489
490 /* Check if error is previously set on socket */
491 if (err || sk->sk_err) {
492 rec = NULL;
493
494 /* If err is already set on socket, return the same code */
495 if (sk->sk_err) {
496 ctx->async_wait.err = -sk->sk_err;
497 } else {
498 ctx->async_wait.err = err;
499 tls_err_abort(sk, err);
500 }
501 }
502
503 if (rec) {
504 struct tls_rec *first_rec;
505
506 /* Mark the record as ready for transmission */
507 smp_store_mb(rec->tx_ready, true);
508
509 /* If received record is at head of tx_list, schedule tx */
510 first_rec = list_first_entry(&ctx->tx_list,
511 struct tls_rec, list);
512 if (rec == first_rec) {
513 /* Schedule the transmission */
514 if (!test_and_set_bit(BIT_TX_SCHEDULED,
515 &ctx->tx_bitmask))
516 schedule_delayed_work(&ctx->tx_work.work, 1);
517 }
518 }
519
520 if (atomic_dec_and_test(&ctx->encrypt_pending))
521 complete(&ctx->async_wait.completion);
522 }
523
tls_encrypt_async_wait(struct tls_sw_context_tx * ctx)524 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
525 {
526 if (!atomic_dec_and_test(&ctx->encrypt_pending))
527 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
528 atomic_inc(&ctx->encrypt_pending);
529
530 return ctx->async_wait.err;
531 }
532
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)533 static int tls_do_encryption(struct sock *sk,
534 struct tls_context *tls_ctx,
535 struct tls_sw_context_tx *ctx,
536 struct aead_request *aead_req,
537 size_t data_len, u32 start)
538 {
539 struct tls_prot_info *prot = &tls_ctx->prot_info;
540 struct tls_rec *rec = ctx->open_rec;
541 struct sk_msg *msg_en = &rec->msg_encrypted;
542 struct scatterlist *sge = sk_msg_elem(msg_en, start);
543 int rc, iv_offset = 0;
544
545 /* For CCM based ciphers, first byte of IV is a constant */
546 switch (prot->cipher_type) {
547 case TLS_CIPHER_AES_CCM_128:
548 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
549 iv_offset = 1;
550 break;
551 case TLS_CIPHER_SM4_CCM:
552 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
553 iv_offset = 1;
554 break;
555 }
556
557 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
558 prot->iv_size + prot->salt_size);
559
560 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
561 tls_ctx->tx.rec_seq);
562
563 sge->offset += prot->prepend_size;
564 sge->length -= prot->prepend_size;
565
566 msg_en->sg.curr = start;
567
568 aead_request_set_tfm(aead_req, ctx->aead_send);
569 aead_request_set_ad(aead_req, prot->aad_size);
570 aead_request_set_crypt(aead_req, rec->sg_aead_in,
571 rec->sg_aead_out,
572 data_len, rec->iv_data);
573
574 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
575 tls_encrypt_done, rec);
576
577 /* Add the record in tx_list */
578 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
579 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
580 atomic_inc(&ctx->encrypt_pending);
581
582 rc = crypto_aead_encrypt(aead_req);
583 if (rc == -EBUSY) {
584 rc = tls_encrypt_async_wait(ctx);
585 rc = rc ?: -EINPROGRESS;
586 }
587 if (!rc || rc != -EINPROGRESS) {
588 atomic_dec(&ctx->encrypt_pending);
589 sge->offset -= prot->prepend_size;
590 sge->length += prot->prepend_size;
591 }
592
593 if (!rc) {
594 WRITE_ONCE(rec->tx_ready, true);
595 } else if (rc != -EINPROGRESS) {
596 list_del(&rec->list);
597 return rc;
598 }
599
600 /* Unhook the record from context if encryption is not failure */
601 ctx->open_rec = NULL;
602 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
603 return rc;
604 }
605
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)606 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
607 struct tls_rec **to, struct sk_msg *msg_opl,
608 struct sk_msg *msg_oen, u32 split_point,
609 u32 tx_overhead_size, u32 *orig_end)
610 {
611 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
612 struct scatterlist *sge, *osge, *nsge;
613 u32 orig_size = msg_opl->sg.size;
614 struct scatterlist tmp = { };
615 struct sk_msg *msg_npl;
616 struct tls_rec *new;
617 int ret;
618
619 new = tls_get_rec(sk);
620 if (!new)
621 return -ENOMEM;
622 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
623 tx_overhead_size, 0);
624 if (ret < 0) {
625 tls_free_rec(sk, new);
626 return ret;
627 }
628
629 *orig_end = msg_opl->sg.end;
630 i = msg_opl->sg.start;
631 sge = sk_msg_elem(msg_opl, i);
632 while (apply && sge->length) {
633 if (sge->length > apply) {
634 u32 len = sge->length - apply;
635
636 get_page(sg_page(sge));
637 sg_set_page(&tmp, sg_page(sge), len,
638 sge->offset + apply);
639 sge->length = apply;
640 bytes += apply;
641 apply = 0;
642 } else {
643 apply -= sge->length;
644 bytes += sge->length;
645 }
646
647 sk_msg_iter_var_next(i);
648 if (i == msg_opl->sg.end)
649 break;
650 sge = sk_msg_elem(msg_opl, i);
651 }
652
653 msg_opl->sg.end = i;
654 msg_opl->sg.curr = i;
655 msg_opl->sg.copybreak = 0;
656 msg_opl->apply_bytes = 0;
657 msg_opl->sg.size = bytes;
658
659 msg_npl = &new->msg_plaintext;
660 msg_npl->apply_bytes = apply;
661 msg_npl->sg.size = orig_size - bytes;
662
663 j = msg_npl->sg.start;
664 nsge = sk_msg_elem(msg_npl, j);
665 if (tmp.length) {
666 memcpy(nsge, &tmp, sizeof(*nsge));
667 sk_msg_iter_var_next(j);
668 nsge = sk_msg_elem(msg_npl, j);
669 }
670
671 osge = sk_msg_elem(msg_opl, i);
672 while (osge->length) {
673 memcpy(nsge, osge, sizeof(*nsge));
674 sg_unmark_end(nsge);
675 sk_msg_iter_var_next(i);
676 sk_msg_iter_var_next(j);
677 if (i == *orig_end)
678 break;
679 osge = sk_msg_elem(msg_opl, i);
680 nsge = sk_msg_elem(msg_npl, j);
681 }
682
683 msg_npl->sg.end = j;
684 msg_npl->sg.curr = j;
685 msg_npl->sg.copybreak = 0;
686
687 *to = new;
688 return 0;
689 }
690
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)691 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
692 struct tls_rec *from, u32 orig_end)
693 {
694 struct sk_msg *msg_npl = &from->msg_plaintext;
695 struct sk_msg *msg_opl = &to->msg_plaintext;
696 struct scatterlist *osge, *nsge;
697 u32 i, j;
698
699 i = msg_opl->sg.end;
700 sk_msg_iter_var_prev(i);
701 j = msg_npl->sg.start;
702
703 osge = sk_msg_elem(msg_opl, i);
704 nsge = sk_msg_elem(msg_npl, j);
705
706 if (sg_page(osge) == sg_page(nsge) &&
707 osge->offset + osge->length == nsge->offset) {
708 osge->length += nsge->length;
709 put_page(sg_page(nsge));
710 }
711
712 msg_opl->sg.end = orig_end;
713 msg_opl->sg.curr = orig_end;
714 msg_opl->sg.copybreak = 0;
715 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
716 msg_opl->sg.size += msg_npl->sg.size;
717
718 sk_msg_free(sk, &to->msg_encrypted);
719 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
720
721 kfree(from);
722 }
723
tls_push_record(struct sock * sk,int flags,unsigned char record_type)724 static int tls_push_record(struct sock *sk, int flags,
725 unsigned char record_type)
726 {
727 struct tls_context *tls_ctx = tls_get_ctx(sk);
728 struct tls_prot_info *prot = &tls_ctx->prot_info;
729 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
730 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
731 u32 i, split_point, orig_end;
732 struct sk_msg *msg_pl, *msg_en;
733 struct aead_request *req;
734 bool split;
735 int rc;
736
737 if (!rec)
738 return 0;
739
740 msg_pl = &rec->msg_plaintext;
741 msg_en = &rec->msg_encrypted;
742
743 split_point = msg_pl->apply_bytes;
744 split = split_point && split_point < msg_pl->sg.size;
745 if (unlikely((!split &&
746 msg_pl->sg.size +
747 prot->overhead_size > msg_en->sg.size) ||
748 (split &&
749 split_point +
750 prot->overhead_size > msg_en->sg.size))) {
751 split = true;
752 split_point = msg_en->sg.size;
753 }
754 if (split) {
755 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
756 split_point, prot->overhead_size,
757 &orig_end);
758 if (rc < 0)
759 return rc;
760 /* This can happen if above tls_split_open_record allocates
761 * a single large encryption buffer instead of two smaller
762 * ones. In this case adjust pointers and continue without
763 * split.
764 */
765 if (!msg_pl->sg.size) {
766 tls_merge_open_record(sk, rec, tmp, orig_end);
767 msg_pl = &rec->msg_plaintext;
768 msg_en = &rec->msg_encrypted;
769 split = false;
770 }
771 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
772 prot->overhead_size);
773 }
774
775 rec->tx_flags = flags;
776 req = &rec->aead_req;
777
778 i = msg_pl->sg.end;
779 sk_msg_iter_var_prev(i);
780
781 rec->content_type = record_type;
782 if (prot->version == TLS_1_3_VERSION) {
783 /* Add content type to end of message. No padding added */
784 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
785 sg_mark_end(&rec->sg_content_type);
786 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
787 &rec->sg_content_type);
788 } else {
789 sg_mark_end(sk_msg_elem(msg_pl, i));
790 }
791
792 if (msg_pl->sg.end < msg_pl->sg.start) {
793 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
794 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
795 msg_pl->sg.data);
796 }
797
798 i = msg_pl->sg.start;
799 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
800
801 i = msg_en->sg.end;
802 sk_msg_iter_var_prev(i);
803 sg_mark_end(sk_msg_elem(msg_en, i));
804
805 i = msg_en->sg.start;
806 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
807
808 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
809 tls_ctx->tx.rec_seq, record_type, prot);
810
811 tls_fill_prepend(tls_ctx,
812 page_address(sg_page(&msg_en->sg.data[i])) +
813 msg_en->sg.data[i].offset,
814 msg_pl->sg.size + prot->tail_size,
815 record_type);
816
817 tls_ctx->pending_open_record_frags = false;
818
819 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
820 msg_pl->sg.size + prot->tail_size, i);
821 if (rc < 0) {
822 if (rc != -EINPROGRESS) {
823 tls_err_abort(sk, -EBADMSG);
824 if (split) {
825 tls_ctx->pending_open_record_frags = true;
826 tls_merge_open_record(sk, rec, tmp, orig_end);
827 }
828 }
829 ctx->async_capable = 1;
830 return rc;
831 } else if (split) {
832 msg_pl = &tmp->msg_plaintext;
833 msg_en = &tmp->msg_encrypted;
834 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
835 tls_ctx->pending_open_record_frags = true;
836 ctx->open_rec = tmp;
837 }
838
839 return tls_tx_records(sk, flags);
840 }
841
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)842 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
843 bool full_record, u8 record_type,
844 ssize_t *copied, int flags)
845 {
846 struct tls_context *tls_ctx = tls_get_ctx(sk);
847 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
848 struct sk_msg msg_redir = { };
849 struct sk_psock *psock;
850 struct sock *sk_redir;
851 struct tls_rec *rec;
852 bool enospc, policy, redir_ingress;
853 int err = 0, send;
854 u32 delta = 0;
855
856 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
857 psock = sk_psock_get(sk);
858 if (!psock || !policy) {
859 err = tls_push_record(sk, flags, record_type);
860 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
861 *copied -= sk_msg_free(sk, msg);
862 tls_free_open_rec(sk);
863 err = -sk->sk_err;
864 }
865 if (psock)
866 sk_psock_put(sk, psock);
867 return err;
868 }
869 more_data:
870 enospc = sk_msg_full(msg);
871 if (psock->eval == __SK_NONE) {
872 delta = msg->sg.size;
873 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
874 delta -= msg->sg.size;
875
876 if ((s32)delta > 0) {
877 /* It indicates that we executed bpf_msg_pop_data(),
878 * causing the plaintext data size to decrease.
879 * Therefore the encrypted data size also needs to
880 * correspondingly decrease. We only need to subtract
881 * delta to calculate the new ciphertext length since
882 * ktls does not support block encryption.
883 */
884 struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
885
886 sk_msg_trim(sk, enc, enc->sg.size - delta);
887 }
888 }
889 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
890 !enospc && !full_record) {
891 err = -ENOSPC;
892 goto out_err;
893 }
894 msg->cork_bytes = 0;
895 send = msg->sg.size;
896 if (msg->apply_bytes && msg->apply_bytes < send)
897 send = msg->apply_bytes;
898
899 switch (psock->eval) {
900 case __SK_PASS:
901 err = tls_push_record(sk, flags, record_type);
902 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
903 *copied -= sk_msg_free(sk, msg);
904 tls_free_open_rec(sk);
905 err = -sk->sk_err;
906 goto out_err;
907 }
908 break;
909 case __SK_REDIRECT:
910 redir_ingress = psock->redir_ingress;
911 sk_redir = psock->sk_redir;
912 memcpy(&msg_redir, msg, sizeof(*msg));
913 if (msg->apply_bytes < send)
914 msg->apply_bytes = 0;
915 else
916 msg->apply_bytes -= send;
917 sk_msg_return_zero(sk, msg, send);
918 msg->sg.size -= send;
919 release_sock(sk);
920 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
921 &msg_redir, send, flags);
922 lock_sock(sk);
923 if (err < 0) {
924 /* Regardless of whether the data represented by
925 * msg_redir is sent successfully, we have already
926 * uncharged it via sk_msg_return_zero(). The
927 * msg->sg.size represents the remaining unprocessed
928 * data, which needs to be uncharged here.
929 */
930 sk_mem_uncharge(sk, msg->sg.size);
931 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
932 msg->sg.size = 0;
933 }
934 if (msg->sg.size == 0)
935 tls_free_open_rec(sk);
936 break;
937 case __SK_DROP:
938 default:
939 sk_msg_free_partial(sk, msg, send);
940 if (msg->apply_bytes < send)
941 msg->apply_bytes = 0;
942 else
943 msg->apply_bytes -= send;
944 if (msg->sg.size == 0)
945 tls_free_open_rec(sk);
946 *copied -= (send + delta);
947 err = -EACCES;
948 }
949
950 if (likely(!err)) {
951 bool reset_eval = !ctx->open_rec;
952
953 rec = ctx->open_rec;
954 if (rec) {
955 msg = &rec->msg_plaintext;
956 if (!msg->apply_bytes)
957 reset_eval = true;
958 }
959 if (reset_eval) {
960 psock->eval = __SK_NONE;
961 if (psock->sk_redir) {
962 sock_put(psock->sk_redir);
963 psock->sk_redir = NULL;
964 }
965 }
966 if (rec)
967 goto more_data;
968 }
969 out_err:
970 sk_psock_put(sk, psock);
971 return err;
972 }
973
tls_sw_push_pending_record(struct sock * sk,int flags)974 static int tls_sw_push_pending_record(struct sock *sk, int flags)
975 {
976 struct tls_context *tls_ctx = tls_get_ctx(sk);
977 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
978 struct tls_rec *rec = ctx->open_rec;
979 struct sk_msg *msg_pl;
980 size_t copied;
981
982 if (!rec)
983 return 0;
984
985 msg_pl = &rec->msg_plaintext;
986 copied = msg_pl->sg.size;
987 if (!copied)
988 return 0;
989
990 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
991 &copied, flags);
992 }
993
tls_sw_sendmsg_splice(struct sock * sk,struct msghdr * msg,struct sk_msg * msg_pl,size_t try_to_copy,ssize_t * copied)994 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
995 struct sk_msg *msg_pl, size_t try_to_copy,
996 ssize_t *copied)
997 {
998 struct page *page = NULL, **pages = &page;
999
1000 do {
1001 ssize_t part;
1002 size_t off;
1003
1004 part = iov_iter_extract_pages(&msg->msg_iter, &pages,
1005 try_to_copy, 1, 0, &off);
1006 if (part <= 0)
1007 return part ?: -EIO;
1008
1009 if (WARN_ON_ONCE(!sendpage_ok(page))) {
1010 iov_iter_revert(&msg->msg_iter, part);
1011 return -EIO;
1012 }
1013
1014 sk_msg_page_add(msg_pl, page, part, off);
1015 msg_pl->sg.copybreak = 0;
1016 msg_pl->sg.curr = msg_pl->sg.end;
1017 sk_mem_charge(sk, part);
1018 *copied += part;
1019 try_to_copy -= part;
1020 } while (try_to_copy && !sk_msg_full(msg_pl));
1021
1022 return 0;
1023 }
1024
tls_sw_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1025 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
1026 size_t size)
1027 {
1028 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1029 struct tls_context *tls_ctx = tls_get_ctx(sk);
1030 struct tls_prot_info *prot = &tls_ctx->prot_info;
1031 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1032 bool async_capable = ctx->async_capable;
1033 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1034 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1035 bool eor = !(msg->msg_flags & MSG_MORE);
1036 size_t try_to_copy;
1037 ssize_t copied = 0;
1038 struct sk_msg *msg_pl, *msg_en;
1039 struct tls_rec *rec;
1040 int required_size;
1041 int num_async = 0;
1042 bool full_record;
1043 int record_room;
1044 int num_zc = 0;
1045 int orig_size;
1046 int ret = 0;
1047
1048 if (!eor && (msg->msg_flags & MSG_EOR))
1049 return -EINVAL;
1050
1051 if (unlikely(msg->msg_controllen)) {
1052 ret = tls_process_cmsg(sk, msg, &record_type);
1053 if (ret) {
1054 if (ret == -EINPROGRESS)
1055 num_async++;
1056 else if (ret != -EAGAIN)
1057 goto send_end;
1058 }
1059 }
1060
1061 while (msg_data_left(msg)) {
1062 if (sk->sk_err) {
1063 ret = -sk->sk_err;
1064 goto send_end;
1065 }
1066
1067 if (ctx->open_rec)
1068 rec = ctx->open_rec;
1069 else
1070 rec = ctx->open_rec = tls_get_rec(sk);
1071 if (!rec) {
1072 ret = -ENOMEM;
1073 goto send_end;
1074 }
1075
1076 msg_pl = &rec->msg_plaintext;
1077 msg_en = &rec->msg_encrypted;
1078
1079 orig_size = msg_pl->sg.size;
1080 full_record = false;
1081 try_to_copy = msg_data_left(msg);
1082 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1083 if (try_to_copy >= record_room) {
1084 try_to_copy = record_room;
1085 full_record = true;
1086 }
1087
1088 required_size = msg_pl->sg.size + try_to_copy +
1089 prot->overhead_size;
1090
1091 if (!sk_stream_memory_free(sk))
1092 goto wait_for_sndbuf;
1093
1094 alloc_encrypted:
1095 ret = tls_alloc_encrypted_msg(sk, required_size);
1096 if (ret) {
1097 if (ret != -ENOSPC)
1098 goto wait_for_memory;
1099
1100 /* Adjust try_to_copy according to the amount that was
1101 * actually allocated. The difference is due
1102 * to max sg elements limit
1103 */
1104 try_to_copy -= required_size - msg_en->sg.size;
1105 full_record = true;
1106 }
1107
1108 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1109 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1110 try_to_copy, &copied);
1111 if (ret < 0)
1112 goto send_end;
1113 tls_ctx->pending_open_record_frags = true;
1114
1115 if (sk_msg_full(msg_pl))
1116 full_record = true;
1117
1118 if (full_record || eor)
1119 goto copied;
1120 continue;
1121 }
1122
1123 if (!is_kvec && (full_record || eor) && !async_capable) {
1124 u32 first = msg_pl->sg.end;
1125
1126 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1127 msg_pl, try_to_copy);
1128 if (ret)
1129 goto fallback_to_reg_send;
1130
1131 num_zc++;
1132 copied += try_to_copy;
1133
1134 sk_msg_sg_copy_set(msg_pl, first);
1135 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1136 record_type, &copied,
1137 msg->msg_flags);
1138 if (ret) {
1139 if (ret == -EINPROGRESS)
1140 num_async++;
1141 else if (ret == -ENOMEM)
1142 goto wait_for_memory;
1143 else if (ctx->open_rec && ret == -ENOSPC) {
1144 if (msg_pl->cork_bytes) {
1145 ret = 0;
1146 goto send_end;
1147 }
1148 goto rollback_iter;
1149 } else if (ret != -EAGAIN)
1150 goto send_end;
1151 }
1152 continue;
1153 rollback_iter:
1154 copied -= try_to_copy;
1155 sk_msg_sg_copy_clear(msg_pl, first);
1156 iov_iter_revert(&msg->msg_iter,
1157 msg_pl->sg.size - orig_size);
1158 fallback_to_reg_send:
1159 sk_msg_trim(sk, msg_pl, orig_size);
1160 }
1161
1162 required_size = msg_pl->sg.size + try_to_copy;
1163
1164 ret = tls_clone_plaintext_msg(sk, required_size);
1165 if (ret) {
1166 if (ret != -ENOSPC)
1167 goto send_end;
1168
1169 /* Adjust try_to_copy according to the amount that was
1170 * actually allocated. The difference is due
1171 * to max sg elements limit
1172 */
1173 try_to_copy -= required_size - msg_pl->sg.size;
1174 full_record = true;
1175 sk_msg_trim(sk, msg_en,
1176 msg_pl->sg.size + prot->overhead_size);
1177 }
1178
1179 if (try_to_copy) {
1180 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1181 msg_pl, try_to_copy);
1182 if (ret < 0)
1183 goto trim_sgl;
1184 }
1185
1186 /* Open records defined only if successfully copied, otherwise
1187 * we would trim the sg but not reset the open record frags.
1188 */
1189 tls_ctx->pending_open_record_frags = true;
1190 copied += try_to_copy;
1191 copied:
1192 if (full_record || eor) {
1193 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1194 record_type, &copied,
1195 msg->msg_flags);
1196 if (ret) {
1197 if (ret == -EINPROGRESS)
1198 num_async++;
1199 else if (ret == -ENOMEM)
1200 goto wait_for_memory;
1201 else if (ret != -EAGAIN) {
1202 if (ret == -ENOSPC)
1203 ret = 0;
1204 goto send_end;
1205 }
1206 }
1207 }
1208
1209 continue;
1210
1211 wait_for_sndbuf:
1212 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1213 wait_for_memory:
1214 ret = sk_stream_wait_memory(sk, &timeo);
1215 if (ret) {
1216 trim_sgl:
1217 if (ctx->open_rec)
1218 tls_trim_both_msgs(sk, orig_size);
1219 goto send_end;
1220 }
1221
1222 if (ctx->open_rec && msg_en->sg.size < required_size)
1223 goto alloc_encrypted;
1224 }
1225
1226 if (!num_async) {
1227 goto send_end;
1228 } else if (num_zc || eor) {
1229 int err;
1230
1231 /* Wait for pending encryptions to get completed */
1232 err = tls_encrypt_async_wait(ctx);
1233 if (err) {
1234 ret = err;
1235 copied = 0;
1236 }
1237 }
1238
1239 /* Transmit if any encryptions have completed */
1240 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1241 cancel_delayed_work(&ctx->tx_work.work);
1242 tls_tx_records(sk, msg->msg_flags);
1243 }
1244
1245 send_end:
1246 ret = sk_stream_error(sk, msg->msg_flags, ret);
1247 return copied > 0 ? copied : ret;
1248 }
1249
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1250 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1251 {
1252 struct tls_context *tls_ctx = tls_get_ctx(sk);
1253 int ret;
1254
1255 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1256 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1257 MSG_SENDPAGE_NOPOLICY))
1258 return -EOPNOTSUPP;
1259
1260 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1261 if (ret)
1262 return ret;
1263 lock_sock(sk);
1264 ret = tls_sw_sendmsg_locked(sk, msg, size);
1265 release_sock(sk);
1266 mutex_unlock(&tls_ctx->tx_lock);
1267 return ret;
1268 }
1269
1270 /*
1271 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1272 */
tls_sw_splice_eof(struct socket * sock)1273 void tls_sw_splice_eof(struct socket *sock)
1274 {
1275 struct sock *sk = sock->sk;
1276 struct tls_context *tls_ctx = tls_get_ctx(sk);
1277 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1278 struct tls_rec *rec;
1279 struct sk_msg *msg_pl;
1280 ssize_t copied = 0;
1281 bool retrying = false;
1282 int ret = 0;
1283
1284 if (!ctx->open_rec)
1285 return;
1286
1287 mutex_lock(&tls_ctx->tx_lock);
1288 lock_sock(sk);
1289
1290 retry:
1291 /* same checks as in tls_sw_push_pending_record() */
1292 rec = ctx->open_rec;
1293 if (!rec)
1294 goto unlock;
1295
1296 msg_pl = &rec->msg_plaintext;
1297 if (msg_pl->sg.size == 0)
1298 goto unlock;
1299
1300 /* Check the BPF advisor and perform transmission. */
1301 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1302 &copied, 0);
1303 switch (ret) {
1304 case 0:
1305 case -EAGAIN:
1306 if (retrying)
1307 goto unlock;
1308 retrying = true;
1309 goto retry;
1310 case -EINPROGRESS:
1311 break;
1312 default:
1313 goto unlock;
1314 }
1315
1316 /* Wait for pending encryptions to get completed */
1317 if (tls_encrypt_async_wait(ctx))
1318 goto unlock;
1319
1320 /* Transmit if any encryptions have completed */
1321 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1322 cancel_delayed_work(&ctx->tx_work.work);
1323 tls_tx_records(sk, 0);
1324 }
1325
1326 unlock:
1327 release_sock(sk);
1328 mutex_unlock(&tls_ctx->tx_lock);
1329 }
1330
1331 static int
tls_rx_rec_wait(struct sock * sk,struct sk_psock * psock,bool nonblock,bool released)1332 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1333 bool released)
1334 {
1335 struct tls_context *tls_ctx = tls_get_ctx(sk);
1336 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1337 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1338 int ret = 0;
1339 long timeo;
1340
1341 /* a rekey is pending, let userspace deal with it */
1342 if (unlikely(ctx->key_update_pending))
1343 return -EKEYEXPIRED;
1344
1345 timeo = sock_rcvtimeo(sk, nonblock);
1346
1347 while (!tls_strp_msg_ready(ctx)) {
1348 if (!sk_psock_queue_empty(psock))
1349 return 0;
1350
1351 if (sk->sk_err)
1352 return sock_error(sk);
1353
1354 if (ret < 0)
1355 return ret;
1356
1357 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1358 tls_strp_check_rcv(&ctx->strp);
1359 if (tls_strp_msg_ready(ctx))
1360 break;
1361 }
1362
1363 if (sk->sk_shutdown & RCV_SHUTDOWN)
1364 return 0;
1365
1366 if (sock_flag(sk, SOCK_DONE))
1367 return 0;
1368
1369 if (!timeo)
1370 return -EAGAIN;
1371
1372 released = true;
1373 add_wait_queue(sk_sleep(sk), &wait);
1374 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1375 ret = sk_wait_event(sk, &timeo,
1376 tls_strp_msg_ready(ctx) ||
1377 !sk_psock_queue_empty(psock),
1378 &wait);
1379 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1380 remove_wait_queue(sk_sleep(sk), &wait);
1381
1382 /* Handle signals */
1383 if (signal_pending(current))
1384 return sock_intr_errno(timeo);
1385 }
1386
1387 tls_strp_msg_load(&ctx->strp, released);
1388
1389 return 1;
1390 }
1391
tls_setup_from_iter(struct iov_iter * from,int length,int * pages_used,struct scatterlist * to,int to_max_pages)1392 static int tls_setup_from_iter(struct iov_iter *from,
1393 int length, int *pages_used,
1394 struct scatterlist *to,
1395 int to_max_pages)
1396 {
1397 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1398 struct page *pages[MAX_SKB_FRAGS];
1399 unsigned int size = 0;
1400 ssize_t copied, use;
1401 size_t offset;
1402
1403 while (length > 0) {
1404 i = 0;
1405 maxpages = to_max_pages - num_elem;
1406 if (maxpages == 0) {
1407 rc = -EFAULT;
1408 goto out;
1409 }
1410 copied = iov_iter_get_pages2(from, pages,
1411 length,
1412 maxpages, &offset);
1413 if (copied <= 0) {
1414 rc = -EFAULT;
1415 goto out;
1416 }
1417
1418 length -= copied;
1419 size += copied;
1420 while (copied) {
1421 use = min_t(int, copied, PAGE_SIZE - offset);
1422
1423 sg_set_page(&to[num_elem],
1424 pages[i], use, offset);
1425 sg_unmark_end(&to[num_elem]);
1426 /* We do not uncharge memory from this API */
1427
1428 offset = 0;
1429 copied -= use;
1430
1431 i++;
1432 num_elem++;
1433 }
1434 }
1435 /* Mark the end in the last sg entry if newly added */
1436 if (num_elem > *pages_used)
1437 sg_mark_end(&to[num_elem - 1]);
1438 out:
1439 if (rc)
1440 iov_iter_revert(from, size);
1441 *pages_used = num_elem;
1442
1443 return rc;
1444 }
1445
1446 static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock * sk,struct sk_buff * skb,unsigned int full_len)1447 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1448 unsigned int full_len)
1449 {
1450 struct strp_msg *clr_rxm;
1451 struct sk_buff *clr_skb;
1452 int err;
1453
1454 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1455 &err, sk->sk_allocation);
1456 if (!clr_skb)
1457 return NULL;
1458
1459 skb_copy_header(clr_skb, skb);
1460 clr_skb->len = full_len;
1461 clr_skb->data_len = full_len;
1462
1463 clr_rxm = strp_msg(clr_skb);
1464 clr_rxm->offset = 0;
1465
1466 return clr_skb;
1467 }
1468
1469 /* Decrypt handlers
1470 *
1471 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1472 * They must transform the darg in/out argument are as follows:
1473 * | Input | Output
1474 * -------------------------------------------------------------------
1475 * zc | Zero-copy decrypt allowed | Zero-copy performed
1476 * async | Async decrypt allowed | Async crypto used / in progress
1477 * skb | * | Output skb
1478 *
1479 * If ZC decryption was performed darg.skb will point to the input skb.
1480 */
1481
1482 /* This function decrypts the input skb into either out_iov or in out_sg
1483 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1484 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1485 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1486 * NULL, then the decryption happens inside skb buffers itself, i.e.
1487 * zero-copy gets disabled and 'darg->zc' is updated.
1488 */
tls_decrypt_sg(struct sock * sk,struct iov_iter * out_iov,struct scatterlist * out_sg,struct tls_decrypt_arg * darg)1489 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1490 struct scatterlist *out_sg,
1491 struct tls_decrypt_arg *darg)
1492 {
1493 struct tls_context *tls_ctx = tls_get_ctx(sk);
1494 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1495 struct tls_prot_info *prot = &tls_ctx->prot_info;
1496 int n_sgin, n_sgout, aead_size, err, pages = 0;
1497 struct sk_buff *skb = tls_strp_msg(ctx);
1498 const struct strp_msg *rxm = strp_msg(skb);
1499 const struct tls_msg *tlm = tls_msg(skb);
1500 struct aead_request *aead_req;
1501 struct scatterlist *sgin = NULL;
1502 struct scatterlist *sgout = NULL;
1503 const int data_len = rxm->full_len - prot->overhead_size;
1504 int tail_pages = !!prot->tail_size;
1505 struct tls_decrypt_ctx *dctx;
1506 struct sk_buff *clear_skb;
1507 int iv_offset = 0;
1508 u8 *mem;
1509
1510 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1511 rxm->full_len - prot->prepend_size);
1512 if (n_sgin < 1)
1513 return n_sgin ?: -EBADMSG;
1514
1515 if (darg->zc && (out_iov || out_sg)) {
1516 clear_skb = NULL;
1517
1518 if (out_iov)
1519 n_sgout = 1 + tail_pages +
1520 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1521 else
1522 n_sgout = sg_nents(out_sg);
1523 } else {
1524 darg->zc = false;
1525
1526 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1527 if (!clear_skb)
1528 return -ENOMEM;
1529
1530 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1531 }
1532
1533 /* Increment to accommodate AAD */
1534 n_sgin = n_sgin + 1;
1535
1536 /* Allocate a single block of memory which contains
1537 * aead_req || tls_decrypt_ctx.
1538 * Both structs are variable length.
1539 */
1540 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1541 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1542 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1543 sk->sk_allocation);
1544 if (!mem) {
1545 err = -ENOMEM;
1546 goto exit_free_skb;
1547 }
1548
1549 /* Segment the allocated memory */
1550 aead_req = (struct aead_request *)mem;
1551 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1552 dctx->sk = sk;
1553 sgin = &dctx->sg[0];
1554 sgout = &dctx->sg[n_sgin];
1555
1556 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1557 switch (prot->cipher_type) {
1558 case TLS_CIPHER_AES_CCM_128:
1559 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1560 iv_offset = 1;
1561 break;
1562 case TLS_CIPHER_SM4_CCM:
1563 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1564 iv_offset = 1;
1565 break;
1566 }
1567
1568 /* Prepare IV */
1569 if (prot->version == TLS_1_3_VERSION ||
1570 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1571 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1572 prot->iv_size + prot->salt_size);
1573 } else {
1574 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1575 &dctx->iv[iv_offset] + prot->salt_size,
1576 prot->iv_size);
1577 if (err < 0)
1578 goto exit_free;
1579 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1580 }
1581 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1582
1583 /* Prepare AAD */
1584 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1585 prot->tail_size,
1586 tls_ctx->rx.rec_seq, tlm->control, prot);
1587
1588 /* Prepare sgin */
1589 sg_init_table(sgin, n_sgin);
1590 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1591 err = skb_to_sgvec(skb, &sgin[1],
1592 rxm->offset + prot->prepend_size,
1593 rxm->full_len - prot->prepend_size);
1594 if (err < 0)
1595 goto exit_free;
1596
1597 if (clear_skb) {
1598 sg_init_table(sgout, n_sgout);
1599 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1600
1601 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1602 data_len + prot->tail_size);
1603 if (err < 0)
1604 goto exit_free;
1605 } else if (out_iov) {
1606 sg_init_table(sgout, n_sgout);
1607 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1608
1609 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1610 (n_sgout - 1 - tail_pages));
1611 if (err < 0)
1612 goto exit_free_pages;
1613
1614 if (prot->tail_size) {
1615 sg_unmark_end(&sgout[pages]);
1616 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1617 prot->tail_size);
1618 sg_mark_end(&sgout[pages + 1]);
1619 }
1620 } else if (out_sg) {
1621 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1622 }
1623 dctx->free_sgout = !!pages;
1624
1625 /* Prepare and submit AEAD request */
1626 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1627 data_len + prot->tail_size, aead_req, darg);
1628 if (err) {
1629 if (darg->async_done)
1630 goto exit_free_skb;
1631 goto exit_free_pages;
1632 }
1633
1634 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1635 clear_skb = NULL;
1636
1637 if (unlikely(darg->async)) {
1638 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1639 if (err)
1640 __skb_queue_tail(&ctx->async_hold, darg->skb);
1641 return err;
1642 }
1643
1644 if (unlikely(darg->async_done))
1645 return 0;
1646
1647 if (prot->tail_size)
1648 darg->tail = dctx->tail;
1649
1650 exit_free_pages:
1651 /* Release the pages in case iov was mapped to pages */
1652 for (; pages > 0; pages--)
1653 put_page(sg_page(&sgout[pages]));
1654 exit_free:
1655 kfree(mem);
1656 exit_free_skb:
1657 consume_skb(clear_skb);
1658 return err;
1659 }
1660
1661 static int
tls_decrypt_sw(struct sock * sk,struct tls_context * tls_ctx,struct msghdr * msg,struct tls_decrypt_arg * darg)1662 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1663 struct msghdr *msg, struct tls_decrypt_arg *darg)
1664 {
1665 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1666 struct tls_prot_info *prot = &tls_ctx->prot_info;
1667 struct strp_msg *rxm;
1668 int pad, err;
1669
1670 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1671 if (err < 0) {
1672 if (err == -EBADMSG)
1673 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1674 return err;
1675 }
1676 /* keep going even for ->async, the code below is TLS 1.3 */
1677
1678 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1679 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1680 darg->tail != TLS_RECORD_TYPE_DATA)) {
1681 darg->zc = false;
1682 if (!darg->tail)
1683 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1684 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1685 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1686 }
1687
1688 pad = tls_padding_length(prot, darg->skb, darg);
1689 if (pad < 0) {
1690 if (darg->skb != tls_strp_msg(ctx))
1691 consume_skb(darg->skb);
1692 return pad;
1693 }
1694
1695 rxm = strp_msg(darg->skb);
1696 rxm->full_len -= pad;
1697
1698 return 0;
1699 }
1700
1701 static int
tls_decrypt_device(struct sock * sk,struct msghdr * msg,struct tls_context * tls_ctx,struct tls_decrypt_arg * darg)1702 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1703 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1704 {
1705 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1706 struct tls_prot_info *prot = &tls_ctx->prot_info;
1707 struct strp_msg *rxm;
1708 int pad, err;
1709
1710 if (tls_ctx->rx_conf != TLS_HW)
1711 return 0;
1712
1713 err = tls_device_decrypted(sk, tls_ctx);
1714 if (err <= 0)
1715 return err;
1716
1717 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1718 if (pad < 0)
1719 return pad;
1720
1721 darg->async = false;
1722 darg->skb = tls_strp_msg(ctx);
1723 /* ->zc downgrade check, in case TLS 1.3 gets here */
1724 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1725 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1726
1727 rxm = strp_msg(darg->skb);
1728 rxm->full_len -= pad;
1729
1730 if (!darg->zc) {
1731 /* Non-ZC case needs a real skb */
1732 darg->skb = tls_strp_msg_detach(ctx);
1733 if (!darg->skb)
1734 return -ENOMEM;
1735 } else {
1736 unsigned int off, len;
1737
1738 /* In ZC case nobody cares about the output skb.
1739 * Just copy the data here. Note the skb is not fully trimmed.
1740 */
1741 off = rxm->offset + prot->prepend_size;
1742 len = rxm->full_len - prot->overhead_size;
1743
1744 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1745 if (err)
1746 return err;
1747 }
1748 return 1;
1749 }
1750
tls_check_pending_rekey(struct sock * sk,struct tls_context * ctx,struct sk_buff * skb)1751 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
1752 struct sk_buff *skb)
1753 {
1754 const struct strp_msg *rxm = strp_msg(skb);
1755 const struct tls_msg *tlm = tls_msg(skb);
1756 char hs_type;
1757 int err;
1758
1759 if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
1760 return 0;
1761
1762 if (rxm->full_len < 1)
1763 return 0;
1764
1765 err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
1766 if (err < 0) {
1767 DEBUG_NET_WARN_ON_ONCE(1);
1768 return err;
1769 }
1770
1771 if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
1772 struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
1773
1774 WRITE_ONCE(rx_ctx->key_update_pending, true);
1775 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
1776 }
1777
1778 return 0;
1779 }
1780
tls_rx_one_record(struct sock * sk,struct msghdr * msg,struct tls_decrypt_arg * darg)1781 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1782 struct tls_decrypt_arg *darg)
1783 {
1784 struct tls_context *tls_ctx = tls_get_ctx(sk);
1785 struct tls_prot_info *prot = &tls_ctx->prot_info;
1786 struct strp_msg *rxm;
1787 int err;
1788
1789 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1790 if (!err)
1791 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1792 if (err < 0)
1793 return err;
1794
1795 rxm = strp_msg(darg->skb);
1796 rxm->offset += prot->prepend_size;
1797 rxm->full_len -= prot->overhead_size;
1798 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1799
1800 return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
1801 }
1802
decrypt_skb(struct sock * sk,struct scatterlist * sgout)1803 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1804 {
1805 struct tls_decrypt_arg darg = { .zc = true, };
1806
1807 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1808 }
1809
tls_record_content_type(struct msghdr * msg,struct tls_msg * tlm,u8 * control)1810 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1811 u8 *control)
1812 {
1813 int err;
1814
1815 if (!*control) {
1816 *control = tlm->control;
1817 if (!*control)
1818 return -EBADMSG;
1819
1820 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1821 sizeof(*control), control);
1822 if (*control != TLS_RECORD_TYPE_DATA) {
1823 if (err || msg->msg_flags & MSG_CTRUNC)
1824 return -EIO;
1825 }
1826 } else if (*control != tlm->control) {
1827 return 0;
1828 }
1829
1830 return 1;
1831 }
1832
tls_rx_rec_done(struct tls_sw_context_rx * ctx)1833 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1834 {
1835 tls_strp_msg_done(&ctx->strp);
1836 }
1837
1838 /* This function traverses the rx_list in tls receive context to copies the
1839 * decrypted records into the buffer provided by caller zero copy is not
1840 * true. Further, the records are removed from the rx_list if it is not a peek
1841 * case and the record has been consumed completely.
1842 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,size_t skip,size_t len,bool is_peek,bool * more)1843 static int process_rx_list(struct tls_sw_context_rx *ctx,
1844 struct msghdr *msg,
1845 u8 *control,
1846 size_t skip,
1847 size_t len,
1848 bool is_peek,
1849 bool *more)
1850 {
1851 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1852 struct tls_msg *tlm;
1853 ssize_t copied = 0;
1854 int err;
1855
1856 while (skip && skb) {
1857 struct strp_msg *rxm = strp_msg(skb);
1858 tlm = tls_msg(skb);
1859
1860 err = tls_record_content_type(msg, tlm, control);
1861 if (err <= 0)
1862 goto more;
1863
1864 if (skip < rxm->full_len)
1865 break;
1866
1867 skip = skip - rxm->full_len;
1868 skb = skb_peek_next(skb, &ctx->rx_list);
1869 }
1870
1871 while (len && skb) {
1872 struct sk_buff *next_skb;
1873 struct strp_msg *rxm = strp_msg(skb);
1874 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1875
1876 tlm = tls_msg(skb);
1877
1878 err = tls_record_content_type(msg, tlm, control);
1879 if (err <= 0)
1880 goto more;
1881
1882 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1883 msg, chunk);
1884 if (err < 0)
1885 goto more;
1886
1887 len = len - chunk;
1888 copied = copied + chunk;
1889
1890 /* Consume the data from record if it is non-peek case*/
1891 if (!is_peek) {
1892 rxm->offset = rxm->offset + chunk;
1893 rxm->full_len = rxm->full_len - chunk;
1894
1895 /* Return if there is unconsumed data in the record */
1896 if (rxm->full_len - skip)
1897 break;
1898 }
1899
1900 /* The remaining skip-bytes must lie in 1st record in rx_list.
1901 * So from the 2nd record, 'skip' should be 0.
1902 */
1903 skip = 0;
1904
1905 if (msg)
1906 msg->msg_flags |= MSG_EOR;
1907
1908 next_skb = skb_peek_next(skb, &ctx->rx_list);
1909
1910 if (!is_peek) {
1911 __skb_unlink(skb, &ctx->rx_list);
1912 consume_skb(skb);
1913 }
1914
1915 skb = next_skb;
1916 }
1917 err = 0;
1918
1919 out:
1920 return copied ? : err;
1921 more:
1922 if (more)
1923 *more = true;
1924 goto out;
1925 }
1926
1927 static bool
tls_read_flush_backlog(struct sock * sk,struct tls_prot_info * prot,size_t len_left,size_t decrypted,ssize_t done,size_t * flushed_at)1928 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1929 size_t len_left, size_t decrypted, ssize_t done,
1930 size_t *flushed_at)
1931 {
1932 size_t max_rec;
1933
1934 if (len_left <= decrypted)
1935 return false;
1936
1937 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1938 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1939 return false;
1940
1941 *flushed_at = done;
1942 return sk_flush_backlog(sk);
1943 }
1944
tls_rx_reader_acquire(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1945 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1946 bool nonblock)
1947 {
1948 long timeo;
1949 int ret;
1950
1951 timeo = sock_rcvtimeo(sk, nonblock);
1952
1953 while (unlikely(ctx->reader_present)) {
1954 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1955
1956 ctx->reader_contended = 1;
1957
1958 add_wait_queue(&ctx->wq, &wait);
1959 ret = sk_wait_event(sk, &timeo,
1960 !READ_ONCE(ctx->reader_present), &wait);
1961 remove_wait_queue(&ctx->wq, &wait);
1962
1963 if (timeo <= 0)
1964 return -EAGAIN;
1965 if (signal_pending(current))
1966 return sock_intr_errno(timeo);
1967 if (ret < 0)
1968 return ret;
1969 }
1970
1971 WRITE_ONCE(ctx->reader_present, 1);
1972
1973 return 0;
1974 }
1975
tls_rx_reader_lock(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1976 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1977 bool nonblock)
1978 {
1979 int err;
1980
1981 lock_sock(sk);
1982 err = tls_rx_reader_acquire(sk, ctx, nonblock);
1983 if (err)
1984 release_sock(sk);
1985 return err;
1986 }
1987
tls_rx_reader_release(struct sock * sk,struct tls_sw_context_rx * ctx)1988 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
1989 {
1990 if (unlikely(ctx->reader_contended)) {
1991 if (wq_has_sleeper(&ctx->wq))
1992 wake_up(&ctx->wq);
1993 else
1994 ctx->reader_contended = 0;
1995
1996 WARN_ON_ONCE(!ctx->reader_present);
1997 }
1998
1999 WRITE_ONCE(ctx->reader_present, 0);
2000 }
2001
tls_rx_reader_unlock(struct sock * sk,struct tls_sw_context_rx * ctx)2002 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
2003 {
2004 tls_rx_reader_release(sk, ctx);
2005 release_sock(sk);
2006 }
2007
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2008 int tls_sw_recvmsg(struct sock *sk,
2009 struct msghdr *msg,
2010 size_t len,
2011 int flags,
2012 int *addr_len)
2013 {
2014 struct tls_context *tls_ctx = tls_get_ctx(sk);
2015 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2016 struct tls_prot_info *prot = &tls_ctx->prot_info;
2017 ssize_t decrypted = 0, async_copy_bytes = 0;
2018 struct sk_psock *psock;
2019 unsigned char control = 0;
2020 size_t flushed_at = 0;
2021 struct strp_msg *rxm;
2022 struct tls_msg *tlm;
2023 ssize_t copied = 0;
2024 ssize_t peeked = 0;
2025 bool async = false;
2026 int target, err;
2027 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2028 bool is_peek = flags & MSG_PEEK;
2029 bool rx_more = false;
2030 bool released = true;
2031 bool bpf_strp_enabled;
2032 bool zc_capable;
2033
2034 if (unlikely(flags & MSG_ERRQUEUE))
2035 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2036
2037 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2038 if (err < 0)
2039 return err;
2040 psock = sk_psock_get(sk);
2041 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2042
2043 /* If crypto failed the connection is broken */
2044 err = ctx->async_wait.err;
2045 if (err)
2046 goto end;
2047
2048 /* Process pending decrypted records. It must be non-zero-copy */
2049 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2050 if (err < 0)
2051 goto end;
2052
2053 copied = err;
2054 if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
2055 goto end;
2056
2057 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2058 len = len - copied;
2059
2060 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2061 ctx->zc_capable;
2062 decrypted = 0;
2063 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2064 struct tls_decrypt_arg darg;
2065 int to_decrypt, chunk;
2066
2067 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2068 released);
2069 if (err <= 0) {
2070 if (psock) {
2071 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2072 flags);
2073 if (chunk > 0) {
2074 decrypted += chunk;
2075 len -= chunk;
2076 continue;
2077 }
2078 }
2079 goto recv_end;
2080 }
2081
2082 memset(&darg.inargs, 0, sizeof(darg.inargs));
2083
2084 rxm = strp_msg(tls_strp_msg(ctx));
2085 tlm = tls_msg(tls_strp_msg(ctx));
2086
2087 to_decrypt = rxm->full_len - prot->overhead_size;
2088
2089 if (zc_capable && to_decrypt <= len &&
2090 tlm->control == TLS_RECORD_TYPE_DATA)
2091 darg.zc = true;
2092
2093 /* Do not use async mode if record is non-data */
2094 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2095 darg.async = ctx->async_capable;
2096 else
2097 darg.async = false;
2098
2099 err = tls_rx_one_record(sk, msg, &darg);
2100 if (err < 0) {
2101 tls_err_abort(sk, -EBADMSG);
2102 goto recv_end;
2103 }
2104
2105 async |= darg.async;
2106
2107 /* If the type of records being processed is not known yet,
2108 * set it to record type just dequeued. If it is already known,
2109 * but does not match the record type just dequeued, go to end.
2110 * We always get record type here since for tls1.2, record type
2111 * is known just after record is dequeued from stream parser.
2112 * For tls1.3, we disable async.
2113 */
2114 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2115 if (err <= 0) {
2116 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2117 tls_rx_rec_done(ctx);
2118 put_on_rx_list_err:
2119 __skb_queue_tail(&ctx->rx_list, darg.skb);
2120 goto recv_end;
2121 }
2122
2123 /* periodically flush backlog, and feed strparser */
2124 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2125 decrypted + copied,
2126 &flushed_at);
2127
2128 /* TLS 1.3 may have updated the length by more than overhead */
2129 rxm = strp_msg(darg.skb);
2130 chunk = rxm->full_len;
2131 tls_rx_rec_done(ctx);
2132
2133 if (!darg.zc) {
2134 bool partially_consumed = chunk > len;
2135 struct sk_buff *skb = darg.skb;
2136
2137 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2138
2139 if (async) {
2140 /* TLS 1.2-only, to_decrypt must be text len */
2141 chunk = min_t(int, to_decrypt, len);
2142 async_copy_bytes += chunk;
2143 put_on_rx_list:
2144 decrypted += chunk;
2145 len -= chunk;
2146 __skb_queue_tail(&ctx->rx_list, skb);
2147 if (unlikely(control != TLS_RECORD_TYPE_DATA))
2148 break;
2149 continue;
2150 }
2151
2152 if (bpf_strp_enabled) {
2153 released = true;
2154 err = sk_psock_tls_strp_read(psock, skb);
2155 if (err != __SK_PASS) {
2156 rxm->offset = rxm->offset + rxm->full_len;
2157 rxm->full_len = 0;
2158 if (err == __SK_DROP)
2159 consume_skb(skb);
2160 continue;
2161 }
2162 }
2163
2164 if (partially_consumed)
2165 chunk = len;
2166
2167 err = skb_copy_datagram_msg(skb, rxm->offset,
2168 msg, chunk);
2169 if (err < 0)
2170 goto put_on_rx_list_err;
2171
2172 if (is_peek) {
2173 peeked += chunk;
2174 goto put_on_rx_list;
2175 }
2176
2177 if (partially_consumed) {
2178 rxm->offset += chunk;
2179 rxm->full_len -= chunk;
2180 goto put_on_rx_list;
2181 }
2182
2183 consume_skb(skb);
2184 }
2185
2186 decrypted += chunk;
2187 len -= chunk;
2188
2189 /* Return full control message to userspace before trying
2190 * to parse another message type
2191 */
2192 msg->msg_flags |= MSG_EOR;
2193 if (control != TLS_RECORD_TYPE_DATA)
2194 break;
2195 }
2196
2197 recv_end:
2198 if (async) {
2199 int ret;
2200
2201 /* Wait for all previously submitted records to be decrypted */
2202 ret = tls_decrypt_async_wait(ctx);
2203 __skb_queue_purge(&ctx->async_hold);
2204
2205 if (ret) {
2206 if (err >= 0 || err == -EINPROGRESS)
2207 err = ret;
2208 goto end;
2209 }
2210
2211 /* Drain records from the rx_list & copy if required */
2212 if (is_peek)
2213 err = process_rx_list(ctx, msg, &control, copied + peeked,
2214 decrypted - peeked, is_peek, NULL);
2215 else
2216 err = process_rx_list(ctx, msg, &control, 0,
2217 async_copy_bytes, is_peek, NULL);
2218
2219 /* we could have copied less than we wanted, and possibly nothing */
2220 decrypted += max(err, 0) - async_copy_bytes;
2221 }
2222
2223 copied += decrypted;
2224
2225 end:
2226 tls_rx_reader_unlock(sk, ctx);
2227 if (psock)
2228 sk_psock_put(sk, psock);
2229 return copied ? : err;
2230 }
2231
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2232 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2233 struct pipe_inode_info *pipe,
2234 size_t len, unsigned int flags)
2235 {
2236 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2237 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2238 struct strp_msg *rxm = NULL;
2239 struct sock *sk = sock->sk;
2240 struct tls_msg *tlm;
2241 struct sk_buff *skb;
2242 ssize_t copied = 0;
2243 int chunk;
2244 int err;
2245
2246 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2247 if (err < 0)
2248 return err;
2249
2250 if (!skb_queue_empty(&ctx->rx_list)) {
2251 skb = __skb_dequeue(&ctx->rx_list);
2252 } else {
2253 struct tls_decrypt_arg darg;
2254
2255 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2256 true);
2257 if (err <= 0)
2258 goto splice_read_end;
2259
2260 memset(&darg.inargs, 0, sizeof(darg.inargs));
2261
2262 err = tls_rx_one_record(sk, NULL, &darg);
2263 if (err < 0) {
2264 tls_err_abort(sk, -EBADMSG);
2265 goto splice_read_end;
2266 }
2267
2268 tls_rx_rec_done(ctx);
2269 skb = darg.skb;
2270 }
2271
2272 rxm = strp_msg(skb);
2273 tlm = tls_msg(skb);
2274
2275 /* splice does not support reading control messages */
2276 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2277 err = -EINVAL;
2278 goto splice_requeue;
2279 }
2280
2281 chunk = min_t(unsigned int, rxm->full_len, len);
2282 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2283 if (copied < 0)
2284 goto splice_requeue;
2285
2286 if (chunk < rxm->full_len) {
2287 rxm->offset += len;
2288 rxm->full_len -= len;
2289 goto splice_requeue;
2290 }
2291
2292 consume_skb(skb);
2293
2294 splice_read_end:
2295 tls_rx_reader_unlock(sk, ctx);
2296 return copied ? : err;
2297
2298 splice_requeue:
2299 __skb_queue_head(&ctx->rx_list, skb);
2300 goto splice_read_end;
2301 }
2302
tls_sw_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t read_actor)2303 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2304 sk_read_actor_t read_actor)
2305 {
2306 struct tls_context *tls_ctx = tls_get_ctx(sk);
2307 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2308 struct tls_prot_info *prot = &tls_ctx->prot_info;
2309 struct strp_msg *rxm = NULL;
2310 struct sk_buff *skb = NULL;
2311 struct sk_psock *psock;
2312 size_t flushed_at = 0;
2313 bool released = true;
2314 struct tls_msg *tlm;
2315 ssize_t copied = 0;
2316 ssize_t decrypted;
2317 int err, used;
2318
2319 psock = sk_psock_get(sk);
2320 if (psock) {
2321 sk_psock_put(sk, psock);
2322 return -EINVAL;
2323 }
2324 err = tls_rx_reader_acquire(sk, ctx, true);
2325 if (err < 0)
2326 return err;
2327
2328 /* If crypto failed the connection is broken */
2329 err = ctx->async_wait.err;
2330 if (err)
2331 goto read_sock_end;
2332
2333 decrypted = 0;
2334 do {
2335 if (!skb_queue_empty(&ctx->rx_list)) {
2336 skb = __skb_dequeue(&ctx->rx_list);
2337 rxm = strp_msg(skb);
2338 tlm = tls_msg(skb);
2339 } else {
2340 struct tls_decrypt_arg darg;
2341
2342 err = tls_rx_rec_wait(sk, NULL, true, released);
2343 if (err <= 0)
2344 goto read_sock_end;
2345
2346 memset(&darg.inargs, 0, sizeof(darg.inargs));
2347
2348 err = tls_rx_one_record(sk, NULL, &darg);
2349 if (err < 0) {
2350 tls_err_abort(sk, -EBADMSG);
2351 goto read_sock_end;
2352 }
2353
2354 released = tls_read_flush_backlog(sk, prot, INT_MAX,
2355 0, decrypted,
2356 &flushed_at);
2357 skb = darg.skb;
2358 rxm = strp_msg(skb);
2359 tlm = tls_msg(skb);
2360 decrypted += rxm->full_len;
2361
2362 tls_rx_rec_done(ctx);
2363 }
2364
2365 /* read_sock does not support reading control messages */
2366 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2367 err = -EINVAL;
2368 goto read_sock_requeue;
2369 }
2370
2371 used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2372 if (used <= 0) {
2373 if (!copied)
2374 err = used;
2375 goto read_sock_requeue;
2376 }
2377 copied += used;
2378 if (used < rxm->full_len) {
2379 rxm->offset += used;
2380 rxm->full_len -= used;
2381 if (!desc->count)
2382 goto read_sock_requeue;
2383 } else {
2384 consume_skb(skb);
2385 if (!desc->count)
2386 skb = NULL;
2387 }
2388 } while (skb);
2389
2390 read_sock_end:
2391 tls_rx_reader_release(sk, ctx);
2392 return copied ? : err;
2393
2394 read_sock_requeue:
2395 __skb_queue_head(&ctx->rx_list, skb);
2396 goto read_sock_end;
2397 }
2398
tls_sw_sock_is_readable(struct sock * sk)2399 bool tls_sw_sock_is_readable(struct sock *sk)
2400 {
2401 struct tls_context *tls_ctx = tls_get_ctx(sk);
2402 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2403 bool ingress_empty = true;
2404 struct sk_psock *psock;
2405
2406 rcu_read_lock();
2407 psock = sk_psock(sk);
2408 if (psock)
2409 ingress_empty = list_empty(&psock->ingress_msg);
2410 rcu_read_unlock();
2411
2412 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2413 !skb_queue_empty(&ctx->rx_list);
2414 }
2415
tls_rx_msg_size(struct tls_strparser * strp,struct sk_buff * skb)2416 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2417 {
2418 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2419 struct tls_prot_info *prot = &tls_ctx->prot_info;
2420 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
2421 size_t cipher_overhead;
2422 size_t data_len = 0;
2423 int ret;
2424
2425 /* Verify that we have a full TLS header, or wait for more data */
2426 if (strp->stm.offset + prot->prepend_size > skb->len)
2427 return 0;
2428
2429 /* Sanity-check size of on-stack buffer. */
2430 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2431 ret = -EINVAL;
2432 goto read_failure;
2433 }
2434
2435 /* Linearize header to local buffer */
2436 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2437 if (ret < 0)
2438 goto read_failure;
2439
2440 strp->mark = header[0];
2441
2442 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2443
2444 cipher_overhead = prot->tag_size;
2445 if (prot->version != TLS_1_3_VERSION &&
2446 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2447 cipher_overhead += prot->iv_size;
2448
2449 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2450 prot->tail_size) {
2451 ret = -EMSGSIZE;
2452 goto read_failure;
2453 }
2454 if (data_len < cipher_overhead) {
2455 ret = -EBADMSG;
2456 goto read_failure;
2457 }
2458
2459 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2460 if (header[1] != TLS_1_2_VERSION_MINOR ||
2461 header[2] != TLS_1_2_VERSION_MAJOR) {
2462 ret = -EINVAL;
2463 goto read_failure;
2464 }
2465
2466 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2467 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2468 return data_len + TLS_HEADER_SIZE;
2469
2470 read_failure:
2471 tls_err_abort(strp->sk, ret);
2472
2473 return ret;
2474 }
2475
tls_rx_msg_ready(struct tls_strparser * strp)2476 void tls_rx_msg_ready(struct tls_strparser *strp)
2477 {
2478 struct tls_sw_context_rx *ctx;
2479
2480 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2481 ctx->saved_data_ready(strp->sk);
2482 }
2483
tls_data_ready(struct sock * sk)2484 static void tls_data_ready(struct sock *sk)
2485 {
2486 struct tls_context *tls_ctx = tls_get_ctx(sk);
2487 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2488 struct sk_psock *psock;
2489 gfp_t alloc_save;
2490
2491 trace_sk_data_ready(sk);
2492
2493 alloc_save = sk->sk_allocation;
2494 sk->sk_allocation = GFP_ATOMIC;
2495 tls_strp_data_ready(&ctx->strp);
2496 sk->sk_allocation = alloc_save;
2497
2498 psock = sk_psock_get(sk);
2499 if (psock) {
2500 if (!list_empty(&psock->ingress_msg))
2501 ctx->saved_data_ready(sk);
2502 sk_psock_put(sk, psock);
2503 }
2504 }
2505
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2506 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2507 {
2508 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2509
2510 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2511 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2512 cancel_delayed_work_sync(&ctx->tx_work.work);
2513 }
2514
tls_sw_release_resources_tx(struct sock * sk)2515 void tls_sw_release_resources_tx(struct sock *sk)
2516 {
2517 struct tls_context *tls_ctx = tls_get_ctx(sk);
2518 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2519 struct tls_rec *rec, *tmp;
2520
2521 /* Wait for any pending async encryptions to complete */
2522 tls_encrypt_async_wait(ctx);
2523
2524 tls_tx_records(sk, -1);
2525
2526 /* Free up un-sent records in tx_list. First, free
2527 * the partially sent record if any at head of tx_list.
2528 */
2529 if (tls_ctx->partially_sent_record) {
2530 tls_free_partial_record(sk, tls_ctx);
2531 rec = list_first_entry(&ctx->tx_list,
2532 struct tls_rec, list);
2533 list_del(&rec->list);
2534 sk_msg_free(sk, &rec->msg_plaintext);
2535 kfree(rec);
2536 }
2537
2538 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2539 list_del(&rec->list);
2540 sk_msg_free(sk, &rec->msg_encrypted);
2541 sk_msg_free(sk, &rec->msg_plaintext);
2542 kfree(rec);
2543 }
2544
2545 crypto_free_aead(ctx->aead_send);
2546 tls_free_open_rec(sk);
2547 }
2548
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2549 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2550 {
2551 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2552
2553 kfree(ctx);
2554 }
2555
tls_sw_release_resources_rx(struct sock * sk)2556 void tls_sw_release_resources_rx(struct sock *sk)
2557 {
2558 struct tls_context *tls_ctx = tls_get_ctx(sk);
2559 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2560
2561 if (ctx->aead_recv) {
2562 __skb_queue_purge(&ctx->rx_list);
2563 crypto_free_aead(ctx->aead_recv);
2564 tls_strp_stop(&ctx->strp);
2565 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2566 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2567 * never swapped.
2568 */
2569 if (ctx->saved_data_ready) {
2570 write_lock_bh(&sk->sk_callback_lock);
2571 sk->sk_data_ready = ctx->saved_data_ready;
2572 write_unlock_bh(&sk->sk_callback_lock);
2573 }
2574 }
2575 }
2576
tls_sw_strparser_done(struct tls_context * tls_ctx)2577 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2578 {
2579 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2580
2581 tls_strp_done(&ctx->strp);
2582 }
2583
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2584 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2585 {
2586 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2587
2588 kfree(ctx);
2589 }
2590
tls_sw_free_resources_rx(struct sock * sk)2591 void tls_sw_free_resources_rx(struct sock *sk)
2592 {
2593 struct tls_context *tls_ctx = tls_get_ctx(sk);
2594
2595 tls_sw_release_resources_rx(sk);
2596 tls_sw_free_ctx_rx(tls_ctx);
2597 }
2598
2599 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2600 static void tx_work_handler(struct work_struct *work)
2601 {
2602 struct delayed_work *delayed_work = to_delayed_work(work);
2603 struct tx_work *tx_work = container_of(delayed_work,
2604 struct tx_work, work);
2605 struct sock *sk = tx_work->sk;
2606 struct tls_context *tls_ctx = tls_get_ctx(sk);
2607 struct tls_sw_context_tx *ctx;
2608
2609 if (unlikely(!tls_ctx))
2610 return;
2611
2612 ctx = tls_sw_ctx_tx(tls_ctx);
2613 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2614 return;
2615
2616 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2617 return;
2618
2619 if (mutex_trylock(&tls_ctx->tx_lock)) {
2620 lock_sock(sk);
2621 tls_tx_records(sk, -1);
2622 release_sock(sk);
2623 mutex_unlock(&tls_ctx->tx_lock);
2624 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2625 /* Someone is holding the tx_lock, they will likely run Tx
2626 * and cancel the work on their way out of the lock section.
2627 * Schedule a long delay just in case.
2628 */
2629 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2630 }
2631 }
2632
tls_is_tx_ready(struct tls_sw_context_tx * ctx)2633 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2634 {
2635 struct tls_rec *rec;
2636
2637 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2638 if (!rec)
2639 return false;
2640
2641 return READ_ONCE(rec->tx_ready);
2642 }
2643
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2644 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2645 {
2646 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2647
2648 /* Schedule the transmission if tx list is ready */
2649 if (tls_is_tx_ready(tx_ctx) &&
2650 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2651 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2652 }
2653
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2654 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2655 {
2656 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2657
2658 write_lock_bh(&sk->sk_callback_lock);
2659 rx_ctx->saved_data_ready = sk->sk_data_ready;
2660 sk->sk_data_ready = tls_data_ready;
2661 write_unlock_bh(&sk->sk_callback_lock);
2662 }
2663
tls_update_rx_zc_capable(struct tls_context * tls_ctx)2664 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2665 {
2666 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2667
2668 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2669 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2670 }
2671
init_ctx_tx(struct tls_context * ctx,struct sock * sk)2672 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2673 {
2674 struct tls_sw_context_tx *sw_ctx_tx;
2675
2676 if (!ctx->priv_ctx_tx) {
2677 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2678 if (!sw_ctx_tx)
2679 return NULL;
2680 } else {
2681 sw_ctx_tx = ctx->priv_ctx_tx;
2682 }
2683
2684 crypto_init_wait(&sw_ctx_tx->async_wait);
2685 atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2686 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2687 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2688 sw_ctx_tx->tx_work.sk = sk;
2689
2690 return sw_ctx_tx;
2691 }
2692
init_ctx_rx(struct tls_context * ctx)2693 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2694 {
2695 struct tls_sw_context_rx *sw_ctx_rx;
2696
2697 if (!ctx->priv_ctx_rx) {
2698 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2699 if (!sw_ctx_rx)
2700 return NULL;
2701 } else {
2702 sw_ctx_rx = ctx->priv_ctx_rx;
2703 }
2704
2705 crypto_init_wait(&sw_ctx_rx->async_wait);
2706 atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2707 init_waitqueue_head(&sw_ctx_rx->wq);
2708 skb_queue_head_init(&sw_ctx_rx->rx_list);
2709 skb_queue_head_init(&sw_ctx_rx->async_hold);
2710
2711 return sw_ctx_rx;
2712 }
2713
init_prot_info(struct tls_prot_info * prot,const struct tls_crypto_info * crypto_info,const struct tls_cipher_desc * cipher_desc)2714 int init_prot_info(struct tls_prot_info *prot,
2715 const struct tls_crypto_info *crypto_info,
2716 const struct tls_cipher_desc *cipher_desc)
2717 {
2718 u16 nonce_size = cipher_desc->nonce;
2719
2720 if (crypto_info->version == TLS_1_3_VERSION) {
2721 nonce_size = 0;
2722 prot->aad_size = TLS_HEADER_SIZE;
2723 prot->tail_size = 1;
2724 } else {
2725 prot->aad_size = TLS_AAD_SPACE_SIZE;
2726 prot->tail_size = 0;
2727 }
2728
2729 /* Sanity-check the sizes for stack allocations. */
2730 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
2731 return -EINVAL;
2732
2733 prot->version = crypto_info->version;
2734 prot->cipher_type = crypto_info->cipher_type;
2735 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2736 prot->tag_size = cipher_desc->tag;
2737 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
2738 prot->iv_size = cipher_desc->iv;
2739 prot->salt_size = cipher_desc->salt;
2740 prot->rec_seq_size = cipher_desc->rec_seq;
2741
2742 return 0;
2743 }
2744
tls_finish_key_update(struct sock * sk,struct tls_context * tls_ctx)2745 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
2746 {
2747 struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
2748
2749 WRITE_ONCE(ctx->key_update_pending, false);
2750 /* wake-up pre-existing poll() */
2751 ctx->saved_data_ready(sk);
2752 }
2753
tls_set_sw_offload(struct sock * sk,int tx,struct tls_crypto_info * new_crypto_info)2754 int tls_set_sw_offload(struct sock *sk, int tx,
2755 struct tls_crypto_info *new_crypto_info)
2756 {
2757 struct tls_crypto_info *crypto_info, *src_crypto_info;
2758 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2759 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2760 const struct tls_cipher_desc *cipher_desc;
2761 char *iv, *rec_seq, *key, *salt;
2762 struct cipher_context *cctx;
2763 struct tls_prot_info *prot;
2764 struct crypto_aead **aead;
2765 struct tls_context *ctx;
2766 struct crypto_tfm *tfm;
2767 int rc = 0;
2768
2769 ctx = tls_get_ctx(sk);
2770 prot = &ctx->prot_info;
2771
2772 /* new_crypto_info != NULL means rekey */
2773 if (!new_crypto_info) {
2774 if (tx) {
2775 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2776 if (!ctx->priv_ctx_tx)
2777 return -ENOMEM;
2778 } else {
2779 ctx->priv_ctx_rx = init_ctx_rx(ctx);
2780 if (!ctx->priv_ctx_rx)
2781 return -ENOMEM;
2782 }
2783 }
2784
2785 if (tx) {
2786 sw_ctx_tx = ctx->priv_ctx_tx;
2787 crypto_info = &ctx->crypto_send.info;
2788 cctx = &ctx->tx;
2789 aead = &sw_ctx_tx->aead_send;
2790 } else {
2791 sw_ctx_rx = ctx->priv_ctx_rx;
2792 crypto_info = &ctx->crypto_recv.info;
2793 cctx = &ctx->rx;
2794 aead = &sw_ctx_rx->aead_recv;
2795 }
2796
2797 src_crypto_info = new_crypto_info ?: crypto_info;
2798
2799 cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
2800 if (!cipher_desc) {
2801 rc = -EINVAL;
2802 goto free_priv;
2803 }
2804
2805 rc = init_prot_info(prot, src_crypto_info, cipher_desc);
2806 if (rc)
2807 goto free_priv;
2808
2809 iv = crypto_info_iv(src_crypto_info, cipher_desc);
2810 key = crypto_info_key(src_crypto_info, cipher_desc);
2811 salt = crypto_info_salt(src_crypto_info, cipher_desc);
2812 rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
2813
2814 if (!*aead) {
2815 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
2816 if (IS_ERR(*aead)) {
2817 rc = PTR_ERR(*aead);
2818 *aead = NULL;
2819 goto free_priv;
2820 }
2821 }
2822
2823 ctx->push_pending_record = tls_sw_push_pending_record;
2824
2825 /* setkey is the last operation that could fail during a
2826 * rekey. if it succeeds, we can start modifying the
2827 * context.
2828 */
2829 rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
2830 if (rc) {
2831 if (new_crypto_info)
2832 goto out;
2833 else
2834 goto free_aead;
2835 }
2836
2837 if (!new_crypto_info) {
2838 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2839 if (rc)
2840 goto free_aead;
2841 }
2842
2843 if (!tx && !new_crypto_info) {
2844 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2845
2846 tls_update_rx_zc_capable(ctx);
2847 sw_ctx_rx->async_capable =
2848 src_crypto_info->version != TLS_1_3_VERSION &&
2849 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2850
2851 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2852 if (rc)
2853 goto free_aead;
2854 }
2855
2856 memcpy(cctx->iv, salt, cipher_desc->salt);
2857 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
2858 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
2859
2860 if (new_crypto_info) {
2861 unsafe_memcpy(crypto_info, new_crypto_info,
2862 cipher_desc->crypto_info,
2863 /* size was checked in do_tls_setsockopt_conf */);
2864 memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
2865 if (!tx)
2866 tls_finish_key_update(sk, ctx);
2867 }
2868
2869 goto out;
2870
2871 free_aead:
2872 crypto_free_aead(*aead);
2873 *aead = NULL;
2874 free_priv:
2875 if (!new_crypto_info) {
2876 if (tx) {
2877 kfree(ctx->priv_ctx_tx);
2878 ctx->priv_ctx_tx = NULL;
2879 } else {
2880 kfree(ctx->priv_ctx_rx);
2881 ctx->priv_ctx_rx = NULL;
2882 }
2883 }
2884 out:
2885 return rc;
2886 }
2887