1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48
49 #include "tls.h"
50
51 struct tls_decrypt_arg {
52 struct_group(inargs,
53 bool zc;
54 bool async;
55 bool async_done;
56 u8 tail;
57 );
58
59 struct sk_buff *skb;
60 };
61
62 struct tls_decrypt_ctx {
63 struct sock *sk;
64 u8 iv[TLS_MAX_IV_SIZE];
65 u8 aad[TLS_MAX_AAD_SIZE];
66 u8 tail;
67 bool free_sgout;
68 struct scatterlist sg[];
69 };
70
tls_err_abort(struct sock * sk,int err)71 noinline void tls_err_abort(struct sock *sk, int err)
72 {
73 WARN_ON_ONCE(err >= 0);
74 /* sk->sk_err should contain a positive error code. */
75 WRITE_ONCE(sk->sk_err, -err);
76 /* Paired with smp_rmb() in tcp_poll() */
77 smp_wmb();
78 sk_error_report(sk);
79 }
80
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)81 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
82 unsigned int recursion_level)
83 {
84 int start = skb_headlen(skb);
85 int i, chunk = start - offset;
86 struct sk_buff *frag_iter;
87 int elt = 0;
88
89 if (unlikely(recursion_level >= 24))
90 return -EMSGSIZE;
91
92 if (chunk > 0) {
93 if (chunk > len)
94 chunk = len;
95 elt++;
96 len -= chunk;
97 if (len == 0)
98 return elt;
99 offset += chunk;
100 }
101
102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 int end;
104
105 WARN_ON(start > offset + len);
106
107 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
108 chunk = end - offset;
109 if (chunk > 0) {
110 if (chunk > len)
111 chunk = len;
112 elt++;
113 len -= chunk;
114 if (len == 0)
115 return elt;
116 offset += chunk;
117 }
118 start = end;
119 }
120
121 if (unlikely(skb_has_frag_list(skb))) {
122 skb_walk_frags(skb, frag_iter) {
123 int end, ret;
124
125 WARN_ON(start > offset + len);
126
127 end = start + frag_iter->len;
128 chunk = end - offset;
129 if (chunk > 0) {
130 if (chunk > len)
131 chunk = len;
132 ret = __skb_nsg(frag_iter, offset - start, chunk,
133 recursion_level + 1);
134 if (unlikely(ret < 0))
135 return ret;
136 elt += ret;
137 len -= chunk;
138 if (len == 0)
139 return elt;
140 offset += chunk;
141 }
142 start = end;
143 }
144 }
145 BUG_ON(len);
146 return elt;
147 }
148
149 /* Return the number of scatterlist elements required to completely map the
150 * skb, or -EMSGSIZE if the recursion depth is exceeded.
151 */
skb_nsg(struct sk_buff * skb,int offset,int len)152 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 {
154 return __skb_nsg(skb, offset, len, 0);
155 }
156
tls_padding_length(struct tls_prot_info * prot,struct sk_buff * skb,struct tls_decrypt_arg * darg)157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
158 struct tls_decrypt_arg *darg)
159 {
160 struct strp_msg *rxm = strp_msg(skb);
161 struct tls_msg *tlm = tls_msg(skb);
162 int sub = 0;
163
164 /* Determine zero-padding length */
165 if (prot->version == TLS_1_3_VERSION) {
166 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
167 char content_type = darg->zc ? darg->tail : 0;
168 int err;
169
170 while (content_type == 0) {
171 if (offset < prot->prepend_size)
172 return -EBADMSG;
173 err = skb_copy_bits(skb, rxm->offset + offset,
174 &content_type, 1);
175 if (err)
176 return err;
177 if (content_type)
178 break;
179 sub++;
180 offset--;
181 }
182 tlm->control = content_type;
183 }
184 return sub;
185 }
186
tls_decrypt_done(void * data,int err)187 static void tls_decrypt_done(void *data, int err)
188 {
189 struct aead_request *aead_req = data;
190 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
191 struct scatterlist *sgout = aead_req->dst;
192 struct tls_sw_context_rx *ctx;
193 struct tls_decrypt_ctx *dctx;
194 struct tls_context *tls_ctx;
195 struct scatterlist *sg;
196 unsigned int pages;
197 struct sock *sk;
198 int aead_size;
199
200 /* If requests get too backlogged crypto API returns -EBUSY and calls
201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 * -EINPROGRESS -> 0.
205 * We have a single struct crypto_async_request per direction, this
206 * scheme doesn't help us, so just ignore the first ->complete().
207 */
208 if (err == -EINPROGRESS)
209 return;
210
211 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
212 aead_size = ALIGN(aead_size, __alignof__(*dctx));
213 dctx = (void *)((u8 *)aead_req + aead_size);
214
215 sk = dctx->sk;
216 tls_ctx = tls_get_ctx(sk);
217 ctx = tls_sw_ctx_rx(tls_ctx);
218
219 /* Propagate if there was an err */
220 if (err) {
221 if (err == -EBADMSG)
222 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
223 ctx->async_wait.err = err;
224 tls_err_abort(sk, err);
225 }
226
227 /* Free the destination pages if skb was not decrypted inplace */
228 if (dctx->free_sgout) {
229 /* Skip the first S/G entry as it points to AAD */
230 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
231 if (!sg)
232 break;
233 put_page(sg_page(sg));
234 }
235 }
236
237 kfree(aead_req);
238
239 if (atomic_dec_and_test(&ctx->decrypt_pending))
240 complete(&ctx->async_wait.completion);
241 }
242
tls_decrypt_async_wait(struct tls_sw_context_rx * ctx)243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 {
245 if (!atomic_dec_and_test(&ctx->decrypt_pending))
246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
247 atomic_inc(&ctx->decrypt_pending);
248
249 __skb_queue_purge(&ctx->async_hold);
250 return ctx->async_wait.err;
251 }
252
tls_do_decryption(struct sock * sk,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,struct tls_decrypt_arg * darg)253 static int tls_do_decryption(struct sock *sk,
254 struct scatterlist *sgin,
255 struct scatterlist *sgout,
256 char *iv_recv,
257 size_t data_len,
258 struct aead_request *aead_req,
259 struct tls_decrypt_arg *darg)
260 {
261 struct tls_context *tls_ctx = tls_get_ctx(sk);
262 struct tls_prot_info *prot = &tls_ctx->prot_info;
263 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
264 int ret;
265
266 aead_request_set_tfm(aead_req, ctx->aead_recv);
267 aead_request_set_ad(aead_req, prot->aad_size);
268 aead_request_set_crypt(aead_req, sgin, sgout,
269 data_len + prot->tag_size,
270 (u8 *)iv_recv);
271
272 if (darg->async) {
273 aead_request_set_callback(aead_req,
274 CRYPTO_TFM_REQ_MAY_BACKLOG,
275 tls_decrypt_done, aead_req);
276 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
277 atomic_inc(&ctx->decrypt_pending);
278 } else {
279 DECLARE_CRYPTO_WAIT(wait);
280
281 aead_request_set_callback(aead_req,
282 CRYPTO_TFM_REQ_MAY_BACKLOG,
283 crypto_req_done, &wait);
284 ret = crypto_aead_decrypt(aead_req);
285 if (ret == -EINPROGRESS || ret == -EBUSY)
286 ret = crypto_wait_req(ret, &wait);
287 return ret;
288 }
289
290 ret = crypto_aead_decrypt(aead_req);
291 if (ret == -EINPROGRESS)
292 return 0;
293
294 if (ret == -EBUSY) {
295 ret = tls_decrypt_async_wait(ctx);
296 darg->async_done = true;
297 /* all completions have run, we're not doing async anymore */
298 darg->async = false;
299 return ret;
300 }
301
302 atomic_dec(&ctx->decrypt_pending);
303 darg->async = false;
304
305 return ret;
306 }
307
tls_trim_both_msgs(struct sock * sk,int target_size)308 static void tls_trim_both_msgs(struct sock *sk, int target_size)
309 {
310 struct tls_context *tls_ctx = tls_get_ctx(sk);
311 struct tls_prot_info *prot = &tls_ctx->prot_info;
312 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
313 struct tls_rec *rec = ctx->open_rec;
314
315 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
316 if (target_size > 0)
317 target_size += prot->overhead_size;
318 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
319 }
320
tls_alloc_encrypted_msg(struct sock * sk,int len)321 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
322 {
323 struct tls_context *tls_ctx = tls_get_ctx(sk);
324 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
325 struct tls_rec *rec = ctx->open_rec;
326 struct sk_msg *msg_en = &rec->msg_encrypted;
327
328 return sk_msg_alloc(sk, msg_en, len, 0);
329 }
330
tls_clone_plaintext_msg(struct sock * sk,int required)331 static int tls_clone_plaintext_msg(struct sock *sk, int required)
332 {
333 struct tls_context *tls_ctx = tls_get_ctx(sk);
334 struct tls_prot_info *prot = &tls_ctx->prot_info;
335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 struct tls_rec *rec = ctx->open_rec;
337 struct sk_msg *msg_pl = &rec->msg_plaintext;
338 struct sk_msg *msg_en = &rec->msg_encrypted;
339 int skip, len;
340
341 /* We add page references worth len bytes from encrypted sg
342 * at the end of plaintext sg. It is guaranteed that msg_en
343 * has enough required room (ensured by caller).
344 */
345 len = required - msg_pl->sg.size;
346
347 /* Skip initial bytes in msg_en's data to be able to use
348 * same offset of both plain and encrypted data.
349 */
350 skip = prot->prepend_size + msg_pl->sg.size;
351
352 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
353 }
354
tls_get_rec(struct sock * sk)355 static struct tls_rec *tls_get_rec(struct sock *sk)
356 {
357 struct tls_context *tls_ctx = tls_get_ctx(sk);
358 struct tls_prot_info *prot = &tls_ctx->prot_info;
359 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
360 struct sk_msg *msg_pl, *msg_en;
361 struct tls_rec *rec;
362 int mem_size;
363
364 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
365
366 rec = kzalloc(mem_size, sk->sk_allocation);
367 if (!rec)
368 return NULL;
369
370 msg_pl = &rec->msg_plaintext;
371 msg_en = &rec->msg_encrypted;
372
373 sk_msg_init(msg_pl);
374 sk_msg_init(msg_en);
375
376 sg_init_table(rec->sg_aead_in, 2);
377 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
378 sg_unmark_end(&rec->sg_aead_in[1]);
379
380 sg_init_table(rec->sg_aead_out, 2);
381 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
382 sg_unmark_end(&rec->sg_aead_out[1]);
383
384 rec->sk = sk;
385
386 return rec;
387 }
388
tls_free_rec(struct sock * sk,struct tls_rec * rec)389 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
390 {
391 sk_msg_free(sk, &rec->msg_encrypted);
392 sk_msg_free(sk, &rec->msg_plaintext);
393 kfree(rec);
394 }
395
tls_free_open_rec(struct sock * sk)396 static void tls_free_open_rec(struct sock *sk)
397 {
398 struct tls_context *tls_ctx = tls_get_ctx(sk);
399 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
400 struct tls_rec *rec = ctx->open_rec;
401
402 if (rec) {
403 tls_free_rec(sk, rec);
404 ctx->open_rec = NULL;
405 }
406 }
407
tls_tx_records(struct sock * sk,int flags)408 int tls_tx_records(struct sock *sk, int flags)
409 {
410 struct tls_context *tls_ctx = tls_get_ctx(sk);
411 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
412 struct tls_rec *rec, *tmp;
413 struct sk_msg *msg_en;
414 int tx_flags, rc = 0;
415
416 if (tls_is_partially_sent_record(tls_ctx)) {
417 rec = list_first_entry(&ctx->tx_list,
418 struct tls_rec, list);
419
420 if (flags == -1)
421 tx_flags = rec->tx_flags;
422 else
423 tx_flags = flags;
424
425 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
426 if (rc)
427 goto tx_err;
428
429 /* Full record has been transmitted.
430 * Remove the head of tx_list
431 */
432 list_del(&rec->list);
433 sk_msg_free(sk, &rec->msg_plaintext);
434 kfree(rec);
435 }
436
437 /* Tx all ready records */
438 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
439 if (READ_ONCE(rec->tx_ready)) {
440 if (flags == -1)
441 tx_flags = rec->tx_flags;
442 else
443 tx_flags = flags;
444
445 msg_en = &rec->msg_encrypted;
446 rc = tls_push_sg(sk, tls_ctx,
447 &msg_en->sg.data[msg_en->sg.curr],
448 0, tx_flags);
449 if (rc)
450 goto tx_err;
451
452 list_del(&rec->list);
453 sk_msg_free(sk, &rec->msg_plaintext);
454 kfree(rec);
455 } else {
456 break;
457 }
458 }
459
460 tx_err:
461 if (rc < 0 && rc != -EAGAIN)
462 tls_err_abort(sk, rc);
463
464 return rc;
465 }
466
tls_encrypt_done(void * data,int err)467 static void tls_encrypt_done(void *data, int err)
468 {
469 struct tls_sw_context_tx *ctx;
470 struct tls_context *tls_ctx;
471 struct tls_prot_info *prot;
472 struct tls_rec *rec = data;
473 struct scatterlist *sge;
474 struct sk_msg *msg_en;
475 struct sock *sk;
476
477 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
478 return;
479
480 msg_en = &rec->msg_encrypted;
481
482 sk = rec->sk;
483 tls_ctx = tls_get_ctx(sk);
484 prot = &tls_ctx->prot_info;
485 ctx = tls_sw_ctx_tx(tls_ctx);
486
487 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
488 sge->offset -= prot->prepend_size;
489 sge->length += prot->prepend_size;
490
491 /* Check if error is previously set on socket */
492 if (err || sk->sk_err) {
493 rec = NULL;
494
495 /* If err is already set on socket, return the same code */
496 if (sk->sk_err) {
497 ctx->async_wait.err = -sk->sk_err;
498 } else {
499 ctx->async_wait.err = err;
500 tls_err_abort(sk, err);
501 }
502 }
503
504 if (rec) {
505 struct tls_rec *first_rec;
506
507 /* Mark the record as ready for transmission */
508 smp_store_mb(rec->tx_ready, true);
509
510 /* If received record is at head of tx_list, schedule tx */
511 first_rec = list_first_entry(&ctx->tx_list,
512 struct tls_rec, list);
513 if (rec == first_rec) {
514 /* Schedule the transmission */
515 if (!test_and_set_bit(BIT_TX_SCHEDULED,
516 &ctx->tx_bitmask))
517 schedule_delayed_work(&ctx->tx_work.work, 1);
518 }
519 }
520
521 if (atomic_dec_and_test(&ctx->encrypt_pending))
522 complete(&ctx->async_wait.completion);
523 }
524
tls_encrypt_async_wait(struct tls_sw_context_tx * ctx)525 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
526 {
527 if (!atomic_dec_and_test(&ctx->encrypt_pending))
528 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
529 atomic_inc(&ctx->encrypt_pending);
530
531 return ctx->async_wait.err;
532 }
533
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)534 static int tls_do_encryption(struct sock *sk,
535 struct tls_context *tls_ctx,
536 struct tls_sw_context_tx *ctx,
537 struct aead_request *aead_req,
538 size_t data_len, u32 start)
539 {
540 struct tls_prot_info *prot = &tls_ctx->prot_info;
541 struct tls_rec *rec = ctx->open_rec;
542 struct sk_msg *msg_en = &rec->msg_encrypted;
543 struct scatterlist *sge = sk_msg_elem(msg_en, start);
544 int rc, iv_offset = 0;
545
546 /* For CCM based ciphers, first byte of IV is a constant */
547 switch (prot->cipher_type) {
548 case TLS_CIPHER_AES_CCM_128:
549 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
550 iv_offset = 1;
551 break;
552 case TLS_CIPHER_SM4_CCM:
553 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
554 iv_offset = 1;
555 break;
556 }
557
558 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
559 prot->iv_size + prot->salt_size);
560
561 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
562 tls_ctx->tx.rec_seq);
563
564 sge->offset += prot->prepend_size;
565 sge->length -= prot->prepend_size;
566
567 msg_en->sg.curr = start;
568
569 aead_request_set_tfm(aead_req, ctx->aead_send);
570 aead_request_set_ad(aead_req, prot->aad_size);
571 aead_request_set_crypt(aead_req, rec->sg_aead_in,
572 rec->sg_aead_out,
573 data_len, rec->iv_data);
574
575 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
576 tls_encrypt_done, rec);
577
578 /* Add the record in tx_list */
579 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
580 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
581 atomic_inc(&ctx->encrypt_pending);
582
583 rc = crypto_aead_encrypt(aead_req);
584 if (rc == -EBUSY) {
585 rc = tls_encrypt_async_wait(ctx);
586 rc = rc ?: -EINPROGRESS;
587 }
588 if (!rc || rc != -EINPROGRESS) {
589 atomic_dec(&ctx->encrypt_pending);
590 sge->offset -= prot->prepend_size;
591 sge->length += prot->prepend_size;
592 }
593
594 if (!rc) {
595 WRITE_ONCE(rec->tx_ready, true);
596 } else if (rc != -EINPROGRESS) {
597 list_del(&rec->list);
598 return rc;
599 }
600
601 /* Unhook the record from context if encryption is not failure */
602 ctx->open_rec = NULL;
603 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
604 return rc;
605 }
606
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)607 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
608 struct tls_rec **to, struct sk_msg *msg_opl,
609 struct sk_msg *msg_oen, u32 split_point,
610 u32 tx_overhead_size, u32 *orig_end)
611 {
612 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
613 struct scatterlist *sge, *osge, *nsge;
614 u32 orig_size = msg_opl->sg.size;
615 struct scatterlist tmp = { };
616 struct sk_msg *msg_npl;
617 struct tls_rec *new;
618 int ret;
619
620 new = tls_get_rec(sk);
621 if (!new)
622 return -ENOMEM;
623 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
624 tx_overhead_size, 0);
625 if (ret < 0) {
626 tls_free_rec(sk, new);
627 return ret;
628 }
629
630 *orig_end = msg_opl->sg.end;
631 i = msg_opl->sg.start;
632 sge = sk_msg_elem(msg_opl, i);
633 while (apply && sge->length) {
634 if (sge->length > apply) {
635 u32 len = sge->length - apply;
636
637 get_page(sg_page(sge));
638 sg_set_page(&tmp, sg_page(sge), len,
639 sge->offset + apply);
640 sge->length = apply;
641 bytes += apply;
642 apply = 0;
643 } else {
644 apply -= sge->length;
645 bytes += sge->length;
646 }
647
648 sk_msg_iter_var_next(i);
649 if (i == msg_opl->sg.end)
650 break;
651 sge = sk_msg_elem(msg_opl, i);
652 }
653
654 msg_opl->sg.end = i;
655 msg_opl->sg.curr = i;
656 msg_opl->sg.copybreak = 0;
657 msg_opl->apply_bytes = 0;
658 msg_opl->sg.size = bytes;
659
660 msg_npl = &new->msg_plaintext;
661 msg_npl->apply_bytes = apply;
662 msg_npl->sg.size = orig_size - bytes;
663
664 j = msg_npl->sg.start;
665 nsge = sk_msg_elem(msg_npl, j);
666 if (tmp.length) {
667 memcpy(nsge, &tmp, sizeof(*nsge));
668 sk_msg_iter_var_next(j);
669 nsge = sk_msg_elem(msg_npl, j);
670 }
671
672 osge = sk_msg_elem(msg_opl, i);
673 while (osge->length) {
674 memcpy(nsge, osge, sizeof(*nsge));
675 sg_unmark_end(nsge);
676 sk_msg_iter_var_next(i);
677 sk_msg_iter_var_next(j);
678 if (i == *orig_end)
679 break;
680 osge = sk_msg_elem(msg_opl, i);
681 nsge = sk_msg_elem(msg_npl, j);
682 }
683
684 msg_npl->sg.end = j;
685 msg_npl->sg.curr = j;
686 msg_npl->sg.copybreak = 0;
687
688 *to = new;
689 return 0;
690 }
691
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)692 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
693 struct tls_rec *from, u32 orig_end)
694 {
695 struct sk_msg *msg_npl = &from->msg_plaintext;
696 struct sk_msg *msg_opl = &to->msg_plaintext;
697 struct scatterlist *osge, *nsge;
698 u32 i, j;
699
700 i = msg_opl->sg.end;
701 sk_msg_iter_var_prev(i);
702 j = msg_npl->sg.start;
703
704 osge = sk_msg_elem(msg_opl, i);
705 nsge = sk_msg_elem(msg_npl, j);
706
707 if (sg_page(osge) == sg_page(nsge) &&
708 osge->offset + osge->length == nsge->offset) {
709 osge->length += nsge->length;
710 put_page(sg_page(nsge));
711 }
712
713 msg_opl->sg.end = orig_end;
714 msg_opl->sg.curr = orig_end;
715 msg_opl->sg.copybreak = 0;
716 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
717 msg_opl->sg.size += msg_npl->sg.size;
718
719 sk_msg_free(sk, &to->msg_encrypted);
720 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
721
722 kfree(from);
723 }
724
tls_push_record(struct sock * sk,int flags,unsigned char record_type)725 static int tls_push_record(struct sock *sk, int flags,
726 unsigned char record_type)
727 {
728 struct tls_context *tls_ctx = tls_get_ctx(sk);
729 struct tls_prot_info *prot = &tls_ctx->prot_info;
730 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
731 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
732 u32 i, split_point, orig_end;
733 struct sk_msg *msg_pl, *msg_en;
734 struct aead_request *req;
735 bool split;
736 int rc;
737
738 if (!rec)
739 return 0;
740
741 msg_pl = &rec->msg_plaintext;
742 msg_en = &rec->msg_encrypted;
743
744 split_point = msg_pl->apply_bytes;
745 split = split_point && split_point < msg_pl->sg.size;
746 if (unlikely((!split &&
747 msg_pl->sg.size +
748 prot->overhead_size > msg_en->sg.size) ||
749 (split &&
750 split_point +
751 prot->overhead_size > msg_en->sg.size))) {
752 split = true;
753 split_point = msg_en->sg.size;
754 }
755 if (split) {
756 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
757 split_point, prot->overhead_size,
758 &orig_end);
759 if (rc < 0)
760 return rc;
761 /* This can happen if above tls_split_open_record allocates
762 * a single large encryption buffer instead of two smaller
763 * ones. In this case adjust pointers and continue without
764 * split.
765 */
766 if (!msg_pl->sg.size) {
767 tls_merge_open_record(sk, rec, tmp, orig_end);
768 msg_pl = &rec->msg_plaintext;
769 msg_en = &rec->msg_encrypted;
770 split = false;
771 }
772 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
773 prot->overhead_size);
774 }
775
776 rec->tx_flags = flags;
777 req = &rec->aead_req;
778
779 i = msg_pl->sg.end;
780 sk_msg_iter_var_prev(i);
781
782 rec->content_type = record_type;
783 if (prot->version == TLS_1_3_VERSION) {
784 /* Add content type to end of message. No padding added */
785 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
786 sg_mark_end(&rec->sg_content_type);
787 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
788 &rec->sg_content_type);
789 } else {
790 sg_mark_end(sk_msg_elem(msg_pl, i));
791 }
792
793 if (msg_pl->sg.end < msg_pl->sg.start) {
794 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
795 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
796 msg_pl->sg.data);
797 }
798
799 i = msg_pl->sg.start;
800 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
801
802 i = msg_en->sg.end;
803 sk_msg_iter_var_prev(i);
804 sg_mark_end(sk_msg_elem(msg_en, i));
805
806 i = msg_en->sg.start;
807 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
808
809 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
810 tls_ctx->tx.rec_seq, record_type, prot);
811
812 tls_fill_prepend(tls_ctx,
813 page_address(sg_page(&msg_en->sg.data[i])) +
814 msg_en->sg.data[i].offset,
815 msg_pl->sg.size + prot->tail_size,
816 record_type);
817
818 tls_ctx->pending_open_record_frags = false;
819
820 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
821 msg_pl->sg.size + prot->tail_size, i);
822 if (rc < 0) {
823 if (rc != -EINPROGRESS) {
824 tls_err_abort(sk, -EBADMSG);
825 if (split) {
826 tls_ctx->pending_open_record_frags = true;
827 tls_merge_open_record(sk, rec, tmp, orig_end);
828 }
829 }
830 ctx->async_capable = 1;
831 return rc;
832 } else if (split) {
833 msg_pl = &tmp->msg_plaintext;
834 msg_en = &tmp->msg_encrypted;
835 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
836 tls_ctx->pending_open_record_frags = true;
837 ctx->open_rec = tmp;
838 }
839
840 return tls_tx_records(sk, flags);
841 }
842
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)843 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
844 bool full_record, u8 record_type,
845 ssize_t *copied, int flags)
846 {
847 struct tls_context *tls_ctx = tls_get_ctx(sk);
848 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
849 struct sk_msg msg_redir = { };
850 struct sk_psock *psock;
851 struct sock *sk_redir;
852 struct tls_rec *rec;
853 bool enospc, policy, redir_ingress;
854 int err = 0, send;
855 u32 delta = 0;
856
857 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
858 psock = sk_psock_get(sk);
859 if (!psock || !policy) {
860 err = tls_push_record(sk, flags, record_type);
861 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
862 *copied -= sk_msg_free(sk, msg);
863 tls_free_open_rec(sk);
864 err = -sk->sk_err;
865 }
866 if (psock)
867 sk_psock_put(sk, psock);
868 return err;
869 }
870 more_data:
871 enospc = sk_msg_full(msg);
872 if (psock->eval == __SK_NONE) {
873 delta = msg->sg.size;
874 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
875 delta -= msg->sg.size;
876
877 if ((s32)delta > 0) {
878 /* It indicates that we executed bpf_msg_pop_data(),
879 * causing the plaintext data size to decrease.
880 * Therefore the encrypted data size also needs to
881 * correspondingly decrease. We only need to subtract
882 * delta to calculate the new ciphertext length since
883 * ktls does not support block encryption.
884 */
885 struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
886
887 sk_msg_trim(sk, enc, enc->sg.size - delta);
888 }
889 }
890 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
891 !enospc && !full_record) {
892 err = -ENOSPC;
893 goto out_err;
894 }
895 msg->cork_bytes = 0;
896 send = msg->sg.size;
897 if (msg->apply_bytes && msg->apply_bytes < send)
898 send = msg->apply_bytes;
899
900 switch (psock->eval) {
901 case __SK_PASS:
902 err = tls_push_record(sk, flags, record_type);
903 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
904 *copied -= sk_msg_free(sk, msg);
905 tls_free_open_rec(sk);
906 err = -sk->sk_err;
907 goto out_err;
908 }
909 break;
910 case __SK_REDIRECT:
911 redir_ingress = psock->redir_ingress;
912 sk_redir = psock->sk_redir;
913 memcpy(&msg_redir, msg, sizeof(*msg));
914 if (msg->apply_bytes < send)
915 msg->apply_bytes = 0;
916 else
917 msg->apply_bytes -= send;
918 sk_msg_return_zero(sk, msg, send);
919 msg->sg.size -= send;
920 release_sock(sk);
921 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
922 &msg_redir, send, flags);
923 lock_sock(sk);
924 if (err < 0) {
925 /* Regardless of whether the data represented by
926 * msg_redir is sent successfully, we have already
927 * uncharged it via sk_msg_return_zero(). The
928 * msg->sg.size represents the remaining unprocessed
929 * data, which needs to be uncharged here.
930 */
931 sk_mem_uncharge(sk, msg->sg.size);
932 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
933 msg->sg.size = 0;
934 }
935 if (msg->sg.size == 0)
936 tls_free_open_rec(sk);
937 break;
938 case __SK_DROP:
939 default:
940 sk_msg_free_partial(sk, msg, send);
941 if (msg->apply_bytes < send)
942 msg->apply_bytes = 0;
943 else
944 msg->apply_bytes -= send;
945 if (msg->sg.size == 0)
946 tls_free_open_rec(sk);
947 *copied -= (send + delta);
948 err = -EACCES;
949 }
950
951 if (likely(!err)) {
952 bool reset_eval = !ctx->open_rec;
953
954 rec = ctx->open_rec;
955 if (rec) {
956 msg = &rec->msg_plaintext;
957 if (!msg->apply_bytes)
958 reset_eval = true;
959 }
960 if (reset_eval) {
961 psock->eval = __SK_NONE;
962 if (psock->sk_redir) {
963 sock_put(psock->sk_redir);
964 psock->sk_redir = NULL;
965 }
966 }
967 if (rec)
968 goto more_data;
969 }
970 out_err:
971 sk_psock_put(sk, psock);
972 return err;
973 }
974
tls_sw_push_pending_record(struct sock * sk,int flags)975 static int tls_sw_push_pending_record(struct sock *sk, int flags)
976 {
977 struct tls_context *tls_ctx = tls_get_ctx(sk);
978 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
979 struct tls_rec *rec = ctx->open_rec;
980 struct sk_msg *msg_pl;
981 size_t copied;
982
983 if (!rec)
984 return 0;
985
986 msg_pl = &rec->msg_plaintext;
987 copied = msg_pl->sg.size;
988 if (!copied)
989 return 0;
990
991 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
992 &copied, flags);
993 }
994
tls_sw_sendmsg_splice(struct sock * sk,struct msghdr * msg,struct sk_msg * msg_pl,size_t try_to_copy,ssize_t * copied)995 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
996 struct sk_msg *msg_pl, size_t try_to_copy,
997 ssize_t *copied)
998 {
999 struct page *page = NULL, **pages = &page;
1000
1001 do {
1002 ssize_t part;
1003 size_t off;
1004
1005 part = iov_iter_extract_pages(&msg->msg_iter, &pages,
1006 try_to_copy, 1, 0, &off);
1007 if (part <= 0)
1008 return part ?: -EIO;
1009
1010 if (WARN_ON_ONCE(!sendpage_ok(page))) {
1011 iov_iter_revert(&msg->msg_iter, part);
1012 return -EIO;
1013 }
1014
1015 sk_msg_page_add(msg_pl, page, part, off);
1016 msg_pl->sg.copybreak = 0;
1017 msg_pl->sg.curr = msg_pl->sg.end;
1018 sk_mem_charge(sk, part);
1019 *copied += part;
1020 try_to_copy -= part;
1021 } while (try_to_copy && !sk_msg_full(msg_pl));
1022
1023 return 0;
1024 }
1025
tls_sw_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1026 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
1027 size_t size)
1028 {
1029 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1030 struct tls_context *tls_ctx = tls_get_ctx(sk);
1031 struct tls_prot_info *prot = &tls_ctx->prot_info;
1032 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1033 bool async_capable = ctx->async_capable;
1034 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1035 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1036 bool eor = !(msg->msg_flags & MSG_MORE);
1037 size_t try_to_copy;
1038 ssize_t copied = 0;
1039 struct sk_msg *msg_pl, *msg_en;
1040 struct tls_rec *rec;
1041 int required_size;
1042 int num_async = 0;
1043 bool full_record;
1044 int record_room;
1045 int num_zc = 0;
1046 int orig_size;
1047 int ret = 0;
1048
1049 if (!eor && (msg->msg_flags & MSG_EOR))
1050 return -EINVAL;
1051
1052 if (unlikely(msg->msg_controllen)) {
1053 ret = tls_process_cmsg(sk, msg, &record_type);
1054 if (ret) {
1055 if (ret == -EINPROGRESS)
1056 num_async++;
1057 else if (ret != -EAGAIN)
1058 goto end;
1059 }
1060 }
1061
1062 while (msg_data_left(msg)) {
1063 if (sk->sk_err) {
1064 ret = -sk->sk_err;
1065 goto send_end;
1066 }
1067
1068 if (ctx->open_rec)
1069 rec = ctx->open_rec;
1070 else
1071 rec = ctx->open_rec = tls_get_rec(sk);
1072 if (!rec) {
1073 ret = -ENOMEM;
1074 goto send_end;
1075 }
1076
1077 msg_pl = &rec->msg_plaintext;
1078 msg_en = &rec->msg_encrypted;
1079
1080 orig_size = msg_pl->sg.size;
1081 full_record = false;
1082 try_to_copy = msg_data_left(msg);
1083 record_room = tls_ctx->tx_max_payload_len - msg_pl->sg.size;
1084 if (try_to_copy >= record_room) {
1085 try_to_copy = record_room;
1086 full_record = true;
1087 }
1088
1089 required_size = msg_pl->sg.size + try_to_copy +
1090 prot->overhead_size;
1091
1092 if (!sk_stream_memory_free(sk))
1093 goto wait_for_sndbuf;
1094
1095 alloc_encrypted:
1096 ret = tls_alloc_encrypted_msg(sk, required_size);
1097 if (ret) {
1098 if (ret != -ENOSPC)
1099 goto wait_for_memory;
1100
1101 /* Adjust try_to_copy according to the amount that was
1102 * actually allocated. The difference is due
1103 * to max sg elements limit
1104 */
1105 try_to_copy -= required_size - msg_en->sg.size;
1106 full_record = true;
1107 }
1108
1109 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1110 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1111 try_to_copy, &copied);
1112 if (ret < 0)
1113 goto send_end;
1114 tls_ctx->pending_open_record_frags = true;
1115
1116 if (sk_msg_full(msg_pl)) {
1117 full_record = true;
1118 sk_msg_trim(sk, msg_en,
1119 msg_pl->sg.size + prot->overhead_size);
1120 }
1121
1122 if (full_record || eor)
1123 goto copied;
1124 continue;
1125 }
1126
1127 if (!is_kvec && (full_record || eor) && !async_capable) {
1128 u32 first = msg_pl->sg.end;
1129
1130 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1131 msg_pl, try_to_copy);
1132 if (ret)
1133 goto fallback_to_reg_send;
1134
1135 num_zc++;
1136 copied += try_to_copy;
1137
1138 sk_msg_sg_copy_set(msg_pl, first);
1139 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1140 record_type, &copied,
1141 msg->msg_flags);
1142 if (ret) {
1143 if (ret == -EINPROGRESS)
1144 num_async++;
1145 else if (ret == -ENOMEM)
1146 goto wait_for_memory;
1147 else if (ctx->open_rec && ret == -ENOSPC) {
1148 if (msg_pl->cork_bytes) {
1149 ret = 0;
1150 goto send_end;
1151 }
1152 goto rollback_iter;
1153 } else if (ret != -EAGAIN)
1154 goto send_end;
1155 }
1156
1157 /* Transmit if any encryptions have completed */
1158 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1159 cancel_delayed_work(&ctx->tx_work.work);
1160 tls_tx_records(sk, msg->msg_flags);
1161 }
1162
1163 continue;
1164 rollback_iter:
1165 copied -= try_to_copy;
1166 sk_msg_sg_copy_clear(msg_pl, first);
1167 iov_iter_revert(&msg->msg_iter,
1168 msg_pl->sg.size - orig_size);
1169 fallback_to_reg_send:
1170 sk_msg_trim(sk, msg_pl, orig_size);
1171 }
1172
1173 required_size = msg_pl->sg.size + try_to_copy;
1174
1175 ret = tls_clone_plaintext_msg(sk, required_size);
1176 if (ret) {
1177 if (ret != -ENOSPC)
1178 goto send_end;
1179
1180 /* Adjust try_to_copy according to the amount that was
1181 * actually allocated. The difference is due
1182 * to max sg elements limit
1183 */
1184 try_to_copy -= required_size - msg_pl->sg.size;
1185 full_record = true;
1186 sk_msg_trim(sk, msg_en,
1187 msg_pl->sg.size + prot->overhead_size);
1188 }
1189
1190 if (try_to_copy) {
1191 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1192 msg_pl, try_to_copy);
1193 if (ret < 0)
1194 goto trim_sgl;
1195 }
1196
1197 /* Open records defined only if successfully copied, otherwise
1198 * we would trim the sg but not reset the open record frags.
1199 */
1200 tls_ctx->pending_open_record_frags = true;
1201 copied += try_to_copy;
1202 copied:
1203 if (full_record || eor) {
1204 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1205 record_type, &copied,
1206 msg->msg_flags);
1207 if (ret) {
1208 if (ret == -EINPROGRESS)
1209 num_async++;
1210 else if (ret == -ENOMEM)
1211 goto wait_for_memory;
1212 else if (ret != -EAGAIN) {
1213 if (ret == -ENOSPC)
1214 ret = 0;
1215 goto send_end;
1216 }
1217 }
1218
1219 /* Transmit if any encryptions have completed */
1220 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1221 cancel_delayed_work(&ctx->tx_work.work);
1222 tls_tx_records(sk, msg->msg_flags);
1223 }
1224 }
1225
1226 continue;
1227
1228 wait_for_sndbuf:
1229 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1230 wait_for_memory:
1231 ret = sk_stream_wait_memory(sk, &timeo);
1232 if (ret) {
1233 trim_sgl:
1234 if (ctx->open_rec)
1235 tls_trim_both_msgs(sk, orig_size);
1236 goto send_end;
1237 }
1238
1239 if (ctx->open_rec && msg_en->sg.size < required_size)
1240 goto alloc_encrypted;
1241 }
1242
1243 send_end:
1244 if (!num_async) {
1245 goto end;
1246 } else if (num_zc || eor) {
1247 int err;
1248
1249 /* Wait for pending encryptions to get completed */
1250 err = tls_encrypt_async_wait(ctx);
1251 if (err) {
1252 ret = err;
1253 copied = 0;
1254 }
1255 }
1256
1257 /* Transmit if any encryptions have completed */
1258 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1259 cancel_delayed_work(&ctx->tx_work.work);
1260 tls_tx_records(sk, msg->msg_flags);
1261 }
1262
1263 end:
1264 ret = sk_stream_error(sk, msg->msg_flags, ret);
1265 return copied > 0 ? copied : ret;
1266 }
1267
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1268 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1269 {
1270 struct tls_context *tls_ctx = tls_get_ctx(sk);
1271 int ret;
1272
1273 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1274 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1275 MSG_SENDPAGE_NOPOLICY))
1276 return -EOPNOTSUPP;
1277
1278 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1279 if (ret)
1280 return ret;
1281 lock_sock(sk);
1282 ret = tls_sw_sendmsg_locked(sk, msg, size);
1283 release_sock(sk);
1284 mutex_unlock(&tls_ctx->tx_lock);
1285 return ret;
1286 }
1287
1288 /*
1289 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1290 */
tls_sw_splice_eof(struct socket * sock)1291 void tls_sw_splice_eof(struct socket *sock)
1292 {
1293 struct sock *sk = sock->sk;
1294 struct tls_context *tls_ctx = tls_get_ctx(sk);
1295 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1296 struct tls_rec *rec;
1297 struct sk_msg *msg_pl;
1298 ssize_t copied = 0;
1299 bool retrying = false;
1300 int ret = 0;
1301
1302 if (!ctx->open_rec)
1303 return;
1304
1305 mutex_lock(&tls_ctx->tx_lock);
1306 lock_sock(sk);
1307
1308 retry:
1309 /* same checks as in tls_sw_push_pending_record() */
1310 rec = ctx->open_rec;
1311 if (!rec)
1312 goto unlock;
1313
1314 msg_pl = &rec->msg_plaintext;
1315 if (msg_pl->sg.size == 0)
1316 goto unlock;
1317
1318 /* Check the BPF advisor and perform transmission. */
1319 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1320 &copied, 0);
1321 switch (ret) {
1322 case 0:
1323 case -EAGAIN:
1324 if (retrying)
1325 goto unlock;
1326 retrying = true;
1327 goto retry;
1328 case -EINPROGRESS:
1329 break;
1330 default:
1331 goto unlock;
1332 }
1333
1334 /* Wait for pending encryptions to get completed */
1335 if (tls_encrypt_async_wait(ctx))
1336 goto unlock;
1337
1338 /* Transmit if any encryptions have completed */
1339 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1340 cancel_delayed_work(&ctx->tx_work.work);
1341 tls_tx_records(sk, 0);
1342 }
1343
1344 unlock:
1345 release_sock(sk);
1346 mutex_unlock(&tls_ctx->tx_lock);
1347 }
1348
1349 static int
tls_rx_rec_wait(struct sock * sk,struct sk_psock * psock,bool nonblock,bool released)1350 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1351 bool released)
1352 {
1353 struct tls_context *tls_ctx = tls_get_ctx(sk);
1354 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1355 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1356 int ret = 0;
1357 long timeo;
1358
1359 /* a rekey is pending, let userspace deal with it */
1360 if (unlikely(ctx->key_update_pending))
1361 return -EKEYEXPIRED;
1362
1363 timeo = sock_rcvtimeo(sk, nonblock);
1364
1365 while (!tls_strp_msg_ready(ctx)) {
1366 if (!sk_psock_queue_empty(psock))
1367 return 0;
1368
1369 if (sk->sk_err)
1370 return sock_error(sk);
1371
1372 if (ret < 0)
1373 return ret;
1374
1375 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1376 tls_strp_check_rcv(&ctx->strp);
1377 if (tls_strp_msg_ready(ctx))
1378 break;
1379 }
1380
1381 if (sk->sk_shutdown & RCV_SHUTDOWN)
1382 return 0;
1383
1384 if (sock_flag(sk, SOCK_DONE))
1385 return 0;
1386
1387 if (!timeo)
1388 return -EAGAIN;
1389
1390 released = true;
1391 add_wait_queue(sk_sleep(sk), &wait);
1392 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1393 ret = sk_wait_event(sk, &timeo,
1394 tls_strp_msg_ready(ctx) ||
1395 !sk_psock_queue_empty(psock),
1396 &wait);
1397 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1398 remove_wait_queue(sk_sleep(sk), &wait);
1399
1400 /* Handle signals */
1401 if (signal_pending(current))
1402 return sock_intr_errno(timeo);
1403 }
1404
1405 if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
1406 return tls_rx_rec_wait(sk, psock, nonblock, false);
1407
1408 return 1;
1409 }
1410
tls_setup_from_iter(struct iov_iter * from,int length,int * pages_used,struct scatterlist * to,int to_max_pages)1411 static int tls_setup_from_iter(struct iov_iter *from,
1412 int length, int *pages_used,
1413 struct scatterlist *to,
1414 int to_max_pages)
1415 {
1416 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1417 struct page *pages[MAX_SKB_FRAGS];
1418 unsigned int size = 0;
1419 ssize_t copied, use;
1420 size_t offset;
1421
1422 while (length > 0) {
1423 i = 0;
1424 maxpages = to_max_pages - num_elem;
1425 if (maxpages == 0) {
1426 rc = -EFAULT;
1427 goto out;
1428 }
1429 copied = iov_iter_get_pages2(from, pages,
1430 length,
1431 maxpages, &offset);
1432 if (copied <= 0) {
1433 rc = -EFAULT;
1434 goto out;
1435 }
1436
1437 length -= copied;
1438 size += copied;
1439 while (copied) {
1440 use = min_t(int, copied, PAGE_SIZE - offset);
1441
1442 sg_set_page(&to[num_elem],
1443 pages[i], use, offset);
1444 sg_unmark_end(&to[num_elem]);
1445 /* We do not uncharge memory from this API */
1446
1447 offset = 0;
1448 copied -= use;
1449
1450 i++;
1451 num_elem++;
1452 }
1453 }
1454 /* Mark the end in the last sg entry if newly added */
1455 if (num_elem > *pages_used)
1456 sg_mark_end(&to[num_elem - 1]);
1457 out:
1458 if (rc)
1459 iov_iter_revert(from, size);
1460 *pages_used = num_elem;
1461
1462 return rc;
1463 }
1464
1465 static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock * sk,struct sk_buff * skb,unsigned int full_len)1466 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1467 unsigned int full_len)
1468 {
1469 struct strp_msg *clr_rxm;
1470 struct sk_buff *clr_skb;
1471 int err;
1472
1473 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1474 &err, sk->sk_allocation);
1475 if (!clr_skb)
1476 return NULL;
1477
1478 skb_copy_header(clr_skb, skb);
1479 clr_skb->len = full_len;
1480 clr_skb->data_len = full_len;
1481
1482 clr_rxm = strp_msg(clr_skb);
1483 clr_rxm->offset = 0;
1484
1485 return clr_skb;
1486 }
1487
1488 /* Decrypt handlers
1489 *
1490 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1491 * They must transform the darg in/out argument are as follows:
1492 * | Input | Output
1493 * -------------------------------------------------------------------
1494 * zc | Zero-copy decrypt allowed | Zero-copy performed
1495 * async | Async decrypt allowed | Async crypto used / in progress
1496 * skb | * | Output skb
1497 *
1498 * If ZC decryption was performed darg.skb will point to the input skb.
1499 */
1500
1501 /* This function decrypts the input skb into either out_iov or in out_sg
1502 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1503 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1504 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1505 * NULL, then the decryption happens inside skb buffers itself, i.e.
1506 * zero-copy gets disabled and 'darg->zc' is updated.
1507 */
tls_decrypt_sg(struct sock * sk,struct iov_iter * out_iov,struct scatterlist * out_sg,struct tls_decrypt_arg * darg)1508 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1509 struct scatterlist *out_sg,
1510 struct tls_decrypt_arg *darg)
1511 {
1512 struct tls_context *tls_ctx = tls_get_ctx(sk);
1513 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1514 struct tls_prot_info *prot = &tls_ctx->prot_info;
1515 int n_sgin, n_sgout, aead_size, err, pages = 0;
1516 struct sk_buff *skb = tls_strp_msg(ctx);
1517 const struct strp_msg *rxm = strp_msg(skb);
1518 const struct tls_msg *tlm = tls_msg(skb);
1519 struct aead_request *aead_req;
1520 struct scatterlist *sgin = NULL;
1521 struct scatterlist *sgout = NULL;
1522 const int data_len = rxm->full_len - prot->overhead_size;
1523 int tail_pages = !!prot->tail_size;
1524 struct tls_decrypt_ctx *dctx;
1525 struct sk_buff *clear_skb;
1526 int iv_offset = 0;
1527 u8 *mem;
1528
1529 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1530 rxm->full_len - prot->prepend_size);
1531 if (n_sgin < 1)
1532 return n_sgin ?: -EBADMSG;
1533
1534 if (darg->zc && (out_iov || out_sg)) {
1535 clear_skb = NULL;
1536
1537 if (out_iov)
1538 n_sgout = 1 + tail_pages +
1539 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1540 else
1541 n_sgout = sg_nents(out_sg);
1542 } else {
1543 darg->zc = false;
1544
1545 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1546 if (!clear_skb)
1547 return -ENOMEM;
1548
1549 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1550 }
1551
1552 /* Increment to accommodate AAD */
1553 n_sgin = n_sgin + 1;
1554
1555 /* Allocate a single block of memory which contains
1556 * aead_req || tls_decrypt_ctx.
1557 * Both structs are variable length.
1558 */
1559 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1560 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1561 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1562 sk->sk_allocation);
1563 if (!mem) {
1564 err = -ENOMEM;
1565 goto exit_free_skb;
1566 }
1567
1568 /* Segment the allocated memory */
1569 aead_req = (struct aead_request *)mem;
1570 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1571 dctx->sk = sk;
1572 sgin = &dctx->sg[0];
1573 sgout = &dctx->sg[n_sgin];
1574
1575 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1576 switch (prot->cipher_type) {
1577 case TLS_CIPHER_AES_CCM_128:
1578 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1579 iv_offset = 1;
1580 break;
1581 case TLS_CIPHER_SM4_CCM:
1582 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1583 iv_offset = 1;
1584 break;
1585 }
1586
1587 /* Prepare IV */
1588 if (prot->version == TLS_1_3_VERSION ||
1589 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1590 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1591 prot->iv_size + prot->salt_size);
1592 } else {
1593 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1594 &dctx->iv[iv_offset] + prot->salt_size,
1595 prot->iv_size);
1596 if (err < 0)
1597 goto exit_free;
1598 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1599 }
1600 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1601
1602 /* Prepare AAD */
1603 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1604 prot->tail_size,
1605 tls_ctx->rx.rec_seq, tlm->control, prot);
1606
1607 /* Prepare sgin */
1608 sg_init_table(sgin, n_sgin);
1609 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1610 err = skb_to_sgvec(skb, &sgin[1],
1611 rxm->offset + prot->prepend_size,
1612 rxm->full_len - prot->prepend_size);
1613 if (err < 0)
1614 goto exit_free;
1615
1616 if (clear_skb) {
1617 sg_init_table(sgout, n_sgout);
1618 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1619
1620 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1621 data_len + prot->tail_size);
1622 if (err < 0)
1623 goto exit_free;
1624 } else if (out_iov) {
1625 sg_init_table(sgout, n_sgout);
1626 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1627
1628 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1629 (n_sgout - 1 - tail_pages));
1630 if (err < 0)
1631 goto exit_free_pages;
1632
1633 if (prot->tail_size) {
1634 sg_unmark_end(&sgout[pages]);
1635 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1636 prot->tail_size);
1637 sg_mark_end(&sgout[pages + 1]);
1638 }
1639 } else if (out_sg) {
1640 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1641 }
1642 dctx->free_sgout = !!pages;
1643
1644 /* Prepare and submit AEAD request */
1645 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1646 data_len + prot->tail_size, aead_req, darg);
1647 if (err) {
1648 if (darg->async_done)
1649 goto exit_free_skb;
1650 goto exit_free_pages;
1651 }
1652
1653 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1654 clear_skb = NULL;
1655
1656 if (unlikely(darg->async)) {
1657 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1658 if (err) {
1659 err = tls_decrypt_async_wait(ctx);
1660 darg->async = false;
1661 }
1662 return err;
1663 }
1664
1665 if (unlikely(darg->async_done))
1666 return 0;
1667
1668 if (prot->tail_size)
1669 darg->tail = dctx->tail;
1670
1671 exit_free_pages:
1672 /* Release the pages in case iov was mapped to pages */
1673 for (; pages > 0; pages--)
1674 put_page(sg_page(&sgout[pages]));
1675 exit_free:
1676 kfree(mem);
1677 exit_free_skb:
1678 consume_skb(clear_skb);
1679 return err;
1680 }
1681
1682 static int
tls_decrypt_sw(struct sock * sk,struct tls_context * tls_ctx,struct msghdr * msg,struct tls_decrypt_arg * darg)1683 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1684 struct msghdr *msg, struct tls_decrypt_arg *darg)
1685 {
1686 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1687 struct tls_prot_info *prot = &tls_ctx->prot_info;
1688 struct strp_msg *rxm;
1689 int pad, err;
1690
1691 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1692 if (err < 0) {
1693 if (err == -EBADMSG)
1694 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1695 return err;
1696 }
1697 /* keep going even for ->async, the code below is TLS 1.3 */
1698
1699 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1700 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1701 darg->tail != TLS_RECORD_TYPE_DATA)) {
1702 darg->zc = false;
1703 if (!darg->tail)
1704 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1705 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1706 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1707 }
1708
1709 pad = tls_padding_length(prot, darg->skb, darg);
1710 if (pad < 0) {
1711 if (darg->skb != tls_strp_msg(ctx))
1712 consume_skb(darg->skb);
1713 return pad;
1714 }
1715
1716 rxm = strp_msg(darg->skb);
1717 rxm->full_len -= pad;
1718
1719 return 0;
1720 }
1721
1722 static int
tls_decrypt_device(struct sock * sk,struct msghdr * msg,struct tls_context * tls_ctx,struct tls_decrypt_arg * darg)1723 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1724 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1725 {
1726 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1727 struct tls_prot_info *prot = &tls_ctx->prot_info;
1728 struct strp_msg *rxm;
1729 int pad, err;
1730
1731 if (tls_ctx->rx_conf != TLS_HW)
1732 return 0;
1733
1734 err = tls_device_decrypted(sk, tls_ctx);
1735 if (err <= 0)
1736 return err;
1737
1738 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1739 if (pad < 0)
1740 return pad;
1741
1742 darg->async = false;
1743 darg->skb = tls_strp_msg(ctx);
1744 /* ->zc downgrade check, in case TLS 1.3 gets here */
1745 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1746 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1747
1748 rxm = strp_msg(darg->skb);
1749 rxm->full_len -= pad;
1750
1751 if (!darg->zc) {
1752 /* Non-ZC case needs a real skb */
1753 darg->skb = tls_strp_msg_detach(ctx);
1754 if (!darg->skb)
1755 return -ENOMEM;
1756 } else {
1757 unsigned int off, len;
1758
1759 /* In ZC case nobody cares about the output skb.
1760 * Just copy the data here. Note the skb is not fully trimmed.
1761 */
1762 off = rxm->offset + prot->prepend_size;
1763 len = rxm->full_len - prot->overhead_size;
1764
1765 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1766 if (err)
1767 return err;
1768 }
1769 return 1;
1770 }
1771
tls_check_pending_rekey(struct sock * sk,struct tls_context * ctx,struct sk_buff * skb)1772 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
1773 struct sk_buff *skb)
1774 {
1775 const struct strp_msg *rxm = strp_msg(skb);
1776 const struct tls_msg *tlm = tls_msg(skb);
1777 char hs_type;
1778 int err;
1779
1780 if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
1781 return 0;
1782
1783 if (rxm->full_len < 1)
1784 return 0;
1785
1786 err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
1787 if (err < 0) {
1788 DEBUG_NET_WARN_ON_ONCE(1);
1789 return err;
1790 }
1791
1792 if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
1793 struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
1794
1795 WRITE_ONCE(rx_ctx->key_update_pending, true);
1796 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
1797 }
1798
1799 return 0;
1800 }
1801
tls_rx_one_record(struct sock * sk,struct msghdr * msg,struct tls_decrypt_arg * darg)1802 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1803 struct tls_decrypt_arg *darg)
1804 {
1805 struct tls_context *tls_ctx = tls_get_ctx(sk);
1806 struct tls_prot_info *prot = &tls_ctx->prot_info;
1807 struct strp_msg *rxm;
1808 int err;
1809
1810 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1811 if (!err)
1812 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1813 if (err < 0)
1814 return err;
1815
1816 rxm = strp_msg(darg->skb);
1817 rxm->offset += prot->prepend_size;
1818 rxm->full_len -= prot->overhead_size;
1819 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1820
1821 return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
1822 }
1823
decrypt_skb(struct sock * sk,struct scatterlist * sgout)1824 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1825 {
1826 struct tls_decrypt_arg darg = { .zc = true, };
1827
1828 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1829 }
1830
1831 /* All records returned from a recvmsg() call must have the same type.
1832 * 0 is not a valid content type. Use it as "no type reported, yet".
1833 */
tls_record_content_type(struct msghdr * msg,struct tls_msg * tlm,u8 * control)1834 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1835 u8 *control)
1836 {
1837 int err;
1838
1839 if (!*control) {
1840 *control = tlm->control;
1841 if (!*control)
1842 return -EBADMSG;
1843
1844 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1845 sizeof(*control), control);
1846 if (*control != TLS_RECORD_TYPE_DATA) {
1847 if (err || msg->msg_flags & MSG_CTRUNC)
1848 return -EIO;
1849 }
1850 } else if (*control != tlm->control) {
1851 return 0;
1852 }
1853
1854 return 1;
1855 }
1856
tls_rx_rec_done(struct tls_sw_context_rx * ctx)1857 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1858 {
1859 tls_strp_msg_done(&ctx->strp);
1860 }
1861
1862 /* This function traverses the rx_list in tls receive context to copies the
1863 * decrypted records into the buffer provided by caller zero copy is not
1864 * true. Further, the records are removed from the rx_list if it is not a peek
1865 * case and the record has been consumed completely.
1866 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,size_t skip,size_t len,bool is_peek,bool * more)1867 static int process_rx_list(struct tls_sw_context_rx *ctx,
1868 struct msghdr *msg,
1869 u8 *control,
1870 size_t skip,
1871 size_t len,
1872 bool is_peek,
1873 bool *more)
1874 {
1875 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1876 struct tls_msg *tlm;
1877 ssize_t copied = 0;
1878 int err;
1879
1880 while (skip && skb) {
1881 struct strp_msg *rxm = strp_msg(skb);
1882 tlm = tls_msg(skb);
1883
1884 err = tls_record_content_type(msg, tlm, control);
1885 if (err <= 0)
1886 goto more;
1887
1888 if (skip < rxm->full_len)
1889 break;
1890
1891 skip = skip - rxm->full_len;
1892 skb = skb_peek_next(skb, &ctx->rx_list);
1893 }
1894
1895 while (len && skb) {
1896 struct sk_buff *next_skb;
1897 struct strp_msg *rxm = strp_msg(skb);
1898 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1899
1900 tlm = tls_msg(skb);
1901
1902 err = tls_record_content_type(msg, tlm, control);
1903 if (err <= 0)
1904 goto more;
1905
1906 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1907 msg, chunk);
1908 if (err < 0)
1909 goto more;
1910
1911 len = len - chunk;
1912 copied = copied + chunk;
1913
1914 /* Consume the data from record if it is non-peek case*/
1915 if (!is_peek) {
1916 rxm->offset = rxm->offset + chunk;
1917 rxm->full_len = rxm->full_len - chunk;
1918
1919 /* Return if there is unconsumed data in the record */
1920 if (rxm->full_len - skip)
1921 break;
1922 }
1923
1924 /* The remaining skip-bytes must lie in 1st record in rx_list.
1925 * So from the 2nd record, 'skip' should be 0.
1926 */
1927 skip = 0;
1928
1929 if (msg)
1930 msg->msg_flags |= MSG_EOR;
1931
1932 next_skb = skb_peek_next(skb, &ctx->rx_list);
1933
1934 if (!is_peek) {
1935 __skb_unlink(skb, &ctx->rx_list);
1936 consume_skb(skb);
1937 }
1938
1939 skb = next_skb;
1940 }
1941 err = 0;
1942
1943 out:
1944 return copied ? : err;
1945 more:
1946 if (more)
1947 *more = true;
1948 goto out;
1949 }
1950
1951 static bool
tls_read_flush_backlog(struct sock * sk,struct tls_prot_info * prot,size_t len_left,size_t decrypted,ssize_t done,size_t * flushed_at)1952 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1953 size_t len_left, size_t decrypted, ssize_t done,
1954 size_t *flushed_at)
1955 {
1956 size_t max_rec;
1957
1958 if (len_left <= decrypted)
1959 return false;
1960
1961 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1962 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1963 return false;
1964
1965 *flushed_at = done;
1966 return sk_flush_backlog(sk);
1967 }
1968
tls_rx_reader_acquire(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1969 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1970 bool nonblock)
1971 {
1972 long timeo;
1973 int ret;
1974
1975 timeo = sock_rcvtimeo(sk, nonblock);
1976
1977 while (unlikely(ctx->reader_present)) {
1978 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1979
1980 ctx->reader_contended = 1;
1981
1982 add_wait_queue(&ctx->wq, &wait);
1983 ret = sk_wait_event(sk, &timeo,
1984 !READ_ONCE(ctx->reader_present), &wait);
1985 remove_wait_queue(&ctx->wq, &wait);
1986
1987 if (timeo <= 0)
1988 return -EAGAIN;
1989 if (signal_pending(current))
1990 return sock_intr_errno(timeo);
1991 if (ret < 0)
1992 return ret;
1993 }
1994
1995 WRITE_ONCE(ctx->reader_present, 1);
1996
1997 return 0;
1998 }
1999
tls_rx_reader_lock(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)2000 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
2001 bool nonblock)
2002 {
2003 int err;
2004
2005 lock_sock(sk);
2006 err = tls_rx_reader_acquire(sk, ctx, nonblock);
2007 if (err)
2008 release_sock(sk);
2009 return err;
2010 }
2011
tls_rx_reader_release(struct sock * sk,struct tls_sw_context_rx * ctx)2012 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
2013 {
2014 if (unlikely(ctx->reader_contended)) {
2015 if (wq_has_sleeper(&ctx->wq))
2016 wake_up(&ctx->wq);
2017 else
2018 ctx->reader_contended = 0;
2019
2020 WARN_ON_ONCE(!ctx->reader_present);
2021 }
2022
2023 WRITE_ONCE(ctx->reader_present, 0);
2024 }
2025
tls_rx_reader_unlock(struct sock * sk,struct tls_sw_context_rx * ctx)2026 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
2027 {
2028 tls_rx_reader_release(sk, ctx);
2029 release_sock(sk);
2030 }
2031
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2032 int tls_sw_recvmsg(struct sock *sk,
2033 struct msghdr *msg,
2034 size_t len,
2035 int flags,
2036 int *addr_len)
2037 {
2038 struct tls_context *tls_ctx = tls_get_ctx(sk);
2039 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2040 struct tls_prot_info *prot = &tls_ctx->prot_info;
2041 ssize_t decrypted = 0, async_copy_bytes = 0;
2042 struct sk_psock *psock;
2043 unsigned char control = 0;
2044 size_t flushed_at = 0;
2045 struct strp_msg *rxm;
2046 struct tls_msg *tlm;
2047 ssize_t copied = 0;
2048 ssize_t peeked = 0;
2049 bool async = false;
2050 int target, err;
2051 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2052 bool is_peek = flags & MSG_PEEK;
2053 bool rx_more = false;
2054 bool released = true;
2055 bool bpf_strp_enabled;
2056 bool zc_capable;
2057
2058 if (unlikely(flags & MSG_ERRQUEUE))
2059 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2060
2061 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2062 if (err < 0)
2063 return err;
2064 psock = sk_psock_get(sk);
2065 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2066
2067 /* If crypto failed the connection is broken */
2068 err = ctx->async_wait.err;
2069 if (err)
2070 goto end;
2071
2072 /* Process pending decrypted records. It must be non-zero-copy */
2073 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2074 if (err < 0)
2075 goto end;
2076
2077 /* process_rx_list() will set @control if it processed any records */
2078 copied = err;
2079 if (len <= copied || rx_more ||
2080 (control && control != TLS_RECORD_TYPE_DATA))
2081 goto end;
2082
2083 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2084 len = len - copied;
2085
2086 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2087 ctx->zc_capable;
2088 decrypted = 0;
2089 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2090 struct tls_decrypt_arg darg;
2091 int to_decrypt, chunk;
2092
2093 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2094 released);
2095 if (err <= 0) {
2096 if (psock) {
2097 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2098 flags);
2099 if (chunk > 0) {
2100 decrypted += chunk;
2101 len -= chunk;
2102 continue;
2103 }
2104 }
2105 goto recv_end;
2106 }
2107
2108 memset(&darg.inargs, 0, sizeof(darg.inargs));
2109
2110 rxm = strp_msg(tls_strp_msg(ctx));
2111 tlm = tls_msg(tls_strp_msg(ctx));
2112
2113 to_decrypt = rxm->full_len - prot->overhead_size;
2114
2115 if (zc_capable && to_decrypt <= len &&
2116 tlm->control == TLS_RECORD_TYPE_DATA)
2117 darg.zc = true;
2118
2119 /* Do not use async mode if record is non-data */
2120 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2121 darg.async = ctx->async_capable;
2122 else
2123 darg.async = false;
2124
2125 err = tls_rx_one_record(sk, msg, &darg);
2126 if (err < 0) {
2127 tls_err_abort(sk, -EBADMSG);
2128 goto recv_end;
2129 }
2130
2131 async |= darg.async;
2132
2133 /* If the type of records being processed is not known yet,
2134 * set it to record type just dequeued. If it is already known,
2135 * but does not match the record type just dequeued, go to end.
2136 * We always get record type here since for tls1.2, record type
2137 * is known just after record is dequeued from stream parser.
2138 * For tls1.3, we disable async.
2139 */
2140 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2141 if (err <= 0) {
2142 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2143 tls_rx_rec_done(ctx);
2144 put_on_rx_list_err:
2145 __skb_queue_tail(&ctx->rx_list, darg.skb);
2146 goto recv_end;
2147 }
2148
2149 /* periodically flush backlog, and feed strparser */
2150 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2151 decrypted + copied,
2152 &flushed_at);
2153
2154 /* TLS 1.3 may have updated the length by more than overhead */
2155 rxm = strp_msg(darg.skb);
2156 chunk = rxm->full_len;
2157 tls_rx_rec_done(ctx);
2158
2159 if (!darg.zc) {
2160 bool partially_consumed = chunk > len;
2161 struct sk_buff *skb = darg.skb;
2162
2163 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2164
2165 if (async) {
2166 /* TLS 1.2-only, to_decrypt must be text len */
2167 chunk = min_t(int, to_decrypt, len);
2168 async_copy_bytes += chunk;
2169 put_on_rx_list:
2170 decrypted += chunk;
2171 len -= chunk;
2172 __skb_queue_tail(&ctx->rx_list, skb);
2173 if (unlikely(control != TLS_RECORD_TYPE_DATA))
2174 break;
2175 continue;
2176 }
2177
2178 if (bpf_strp_enabled) {
2179 released = true;
2180 err = sk_psock_tls_strp_read(psock, skb);
2181 if (err != __SK_PASS) {
2182 rxm->offset = rxm->offset + rxm->full_len;
2183 rxm->full_len = 0;
2184 if (err == __SK_DROP)
2185 consume_skb(skb);
2186 continue;
2187 }
2188 }
2189
2190 if (partially_consumed)
2191 chunk = len;
2192
2193 err = skb_copy_datagram_msg(skb, rxm->offset,
2194 msg, chunk);
2195 if (err < 0)
2196 goto put_on_rx_list_err;
2197
2198 if (is_peek) {
2199 peeked += chunk;
2200 goto put_on_rx_list;
2201 }
2202
2203 if (partially_consumed) {
2204 rxm->offset += chunk;
2205 rxm->full_len -= chunk;
2206 goto put_on_rx_list;
2207 }
2208
2209 consume_skb(skb);
2210 }
2211
2212 decrypted += chunk;
2213 len -= chunk;
2214
2215 /* Return full control message to userspace before trying
2216 * to parse another message type
2217 */
2218 msg->msg_flags |= MSG_EOR;
2219 if (control != TLS_RECORD_TYPE_DATA)
2220 break;
2221 }
2222
2223 recv_end:
2224 if (async) {
2225 int ret;
2226
2227 /* Wait for all previously submitted records to be decrypted */
2228 ret = tls_decrypt_async_wait(ctx);
2229
2230 if (ret) {
2231 if (err >= 0 || err == -EINPROGRESS)
2232 err = ret;
2233 goto end;
2234 }
2235
2236 /* Drain records from the rx_list & copy if required */
2237 if (is_peek)
2238 err = process_rx_list(ctx, msg, &control, copied + peeked,
2239 decrypted - peeked, is_peek, NULL);
2240 else
2241 err = process_rx_list(ctx, msg, &control, 0,
2242 async_copy_bytes, is_peek, NULL);
2243
2244 /* we could have copied less than we wanted, and possibly nothing */
2245 decrypted += max(err, 0) - async_copy_bytes;
2246 }
2247
2248 copied += decrypted;
2249
2250 end:
2251 tls_rx_reader_unlock(sk, ctx);
2252 if (psock)
2253 sk_psock_put(sk, psock);
2254 return copied ? : err;
2255 }
2256
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2257 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2258 struct pipe_inode_info *pipe,
2259 size_t len, unsigned int flags)
2260 {
2261 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2262 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2263 struct strp_msg *rxm = NULL;
2264 struct sock *sk = sock->sk;
2265 struct tls_msg *tlm;
2266 struct sk_buff *skb;
2267 ssize_t copied = 0;
2268 int chunk;
2269 int err;
2270
2271 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2272 if (err < 0)
2273 return err;
2274
2275 if (!skb_queue_empty(&ctx->rx_list)) {
2276 skb = __skb_dequeue(&ctx->rx_list);
2277 } else {
2278 struct tls_decrypt_arg darg;
2279
2280 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2281 true);
2282 if (err <= 0)
2283 goto splice_read_end;
2284
2285 memset(&darg.inargs, 0, sizeof(darg.inargs));
2286
2287 err = tls_rx_one_record(sk, NULL, &darg);
2288 if (err < 0) {
2289 tls_err_abort(sk, -EBADMSG);
2290 goto splice_read_end;
2291 }
2292
2293 tls_rx_rec_done(ctx);
2294 skb = darg.skb;
2295 }
2296
2297 rxm = strp_msg(skb);
2298 tlm = tls_msg(skb);
2299
2300 /* splice does not support reading control messages */
2301 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2302 err = -EINVAL;
2303 goto splice_requeue;
2304 }
2305
2306 chunk = min_t(unsigned int, rxm->full_len, len);
2307 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2308 if (copied < 0)
2309 goto splice_requeue;
2310
2311 if (chunk < rxm->full_len) {
2312 rxm->offset += len;
2313 rxm->full_len -= len;
2314 goto splice_requeue;
2315 }
2316
2317 consume_skb(skb);
2318
2319 splice_read_end:
2320 tls_rx_reader_unlock(sk, ctx);
2321 return copied ? : err;
2322
2323 splice_requeue:
2324 __skb_queue_head(&ctx->rx_list, skb);
2325 goto splice_read_end;
2326 }
2327
tls_sw_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t read_actor)2328 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2329 sk_read_actor_t read_actor)
2330 {
2331 struct tls_context *tls_ctx = tls_get_ctx(sk);
2332 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2333 struct tls_prot_info *prot = &tls_ctx->prot_info;
2334 struct strp_msg *rxm = NULL;
2335 struct sk_buff *skb = NULL;
2336 struct sk_psock *psock;
2337 size_t flushed_at = 0;
2338 bool released = true;
2339 struct tls_msg *tlm;
2340 ssize_t copied = 0;
2341 ssize_t decrypted;
2342 int err, used;
2343
2344 psock = sk_psock_get(sk);
2345 if (psock) {
2346 sk_psock_put(sk, psock);
2347 return -EINVAL;
2348 }
2349 err = tls_rx_reader_acquire(sk, ctx, true);
2350 if (err < 0)
2351 return err;
2352
2353 /* If crypto failed the connection is broken */
2354 err = ctx->async_wait.err;
2355 if (err)
2356 goto read_sock_end;
2357
2358 decrypted = 0;
2359 do {
2360 if (!skb_queue_empty(&ctx->rx_list)) {
2361 skb = __skb_dequeue(&ctx->rx_list);
2362 rxm = strp_msg(skb);
2363 tlm = tls_msg(skb);
2364 } else {
2365 struct tls_decrypt_arg darg;
2366
2367 err = tls_rx_rec_wait(sk, NULL, true, released);
2368 if (err <= 0)
2369 goto read_sock_end;
2370
2371 memset(&darg.inargs, 0, sizeof(darg.inargs));
2372
2373 err = tls_rx_one_record(sk, NULL, &darg);
2374 if (err < 0) {
2375 tls_err_abort(sk, -EBADMSG);
2376 goto read_sock_end;
2377 }
2378
2379 released = tls_read_flush_backlog(sk, prot, INT_MAX,
2380 0, decrypted,
2381 &flushed_at);
2382 skb = darg.skb;
2383 rxm = strp_msg(skb);
2384 tlm = tls_msg(skb);
2385 decrypted += rxm->full_len;
2386
2387 tls_rx_rec_done(ctx);
2388 }
2389
2390 /* read_sock does not support reading control messages */
2391 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2392 err = -EINVAL;
2393 goto read_sock_requeue;
2394 }
2395
2396 used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2397 if (used <= 0) {
2398 if (!copied)
2399 err = used;
2400 goto read_sock_requeue;
2401 }
2402 copied += used;
2403 if (used < rxm->full_len) {
2404 rxm->offset += used;
2405 rxm->full_len -= used;
2406 if (!desc->count)
2407 goto read_sock_requeue;
2408 } else {
2409 consume_skb(skb);
2410 if (!desc->count)
2411 skb = NULL;
2412 }
2413 } while (skb);
2414
2415 read_sock_end:
2416 tls_rx_reader_release(sk, ctx);
2417 return copied ? : err;
2418
2419 read_sock_requeue:
2420 __skb_queue_head(&ctx->rx_list, skb);
2421 goto read_sock_end;
2422 }
2423
tls_sw_sock_is_readable(struct sock * sk)2424 bool tls_sw_sock_is_readable(struct sock *sk)
2425 {
2426 struct tls_context *tls_ctx = tls_get_ctx(sk);
2427 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2428 bool ingress_empty = true;
2429 struct sk_psock *psock;
2430
2431 rcu_read_lock();
2432 psock = sk_psock(sk);
2433 if (psock)
2434 ingress_empty = list_empty(&psock->ingress_msg);
2435 rcu_read_unlock();
2436
2437 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2438 !skb_queue_empty(&ctx->rx_list);
2439 }
2440
tls_rx_msg_size(struct tls_strparser * strp,struct sk_buff * skb)2441 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2442 {
2443 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2444 struct tls_prot_info *prot = &tls_ctx->prot_info;
2445 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
2446 size_t cipher_overhead;
2447 size_t data_len = 0;
2448 int ret;
2449
2450 /* Verify that we have a full TLS header, or wait for more data */
2451 if (strp->stm.offset + prot->prepend_size > skb->len)
2452 return 0;
2453
2454 /* Sanity-check size of on-stack buffer. */
2455 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2456 ret = -EINVAL;
2457 goto read_failure;
2458 }
2459
2460 /* Linearize header to local buffer */
2461 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2462 if (ret < 0)
2463 goto read_failure;
2464
2465 strp->mark = header[0];
2466
2467 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2468
2469 cipher_overhead = prot->tag_size;
2470 if (prot->version != TLS_1_3_VERSION &&
2471 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2472 cipher_overhead += prot->iv_size;
2473
2474 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2475 prot->tail_size) {
2476 ret = -EMSGSIZE;
2477 goto read_failure;
2478 }
2479 if (data_len < cipher_overhead) {
2480 ret = -EBADMSG;
2481 goto read_failure;
2482 }
2483
2484 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2485 if (header[1] != TLS_1_2_VERSION_MINOR ||
2486 header[2] != TLS_1_2_VERSION_MAJOR) {
2487 ret = -EINVAL;
2488 goto read_failure;
2489 }
2490
2491 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2492 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2493 return data_len + TLS_HEADER_SIZE;
2494
2495 read_failure:
2496 tls_strp_abort_strp(strp, ret);
2497 return ret;
2498 }
2499
tls_rx_msg_ready(struct tls_strparser * strp)2500 void tls_rx_msg_ready(struct tls_strparser *strp)
2501 {
2502 struct tls_sw_context_rx *ctx;
2503
2504 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2505 ctx->saved_data_ready(strp->sk);
2506 }
2507
tls_data_ready(struct sock * sk)2508 static void tls_data_ready(struct sock *sk)
2509 {
2510 struct tls_context *tls_ctx = tls_get_ctx(sk);
2511 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2512 struct sk_psock *psock;
2513 gfp_t alloc_save;
2514
2515 trace_sk_data_ready(sk);
2516
2517 alloc_save = sk->sk_allocation;
2518 sk->sk_allocation = GFP_ATOMIC;
2519 tls_strp_data_ready(&ctx->strp);
2520 sk->sk_allocation = alloc_save;
2521
2522 psock = sk_psock_get(sk);
2523 if (psock) {
2524 if (!list_empty(&psock->ingress_msg))
2525 ctx->saved_data_ready(sk);
2526 sk_psock_put(sk, psock);
2527 }
2528 }
2529
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2530 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2531 {
2532 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2533
2534 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2535 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2536 disable_delayed_work_sync(&ctx->tx_work.work);
2537 }
2538
tls_sw_release_resources_tx(struct sock * sk)2539 void tls_sw_release_resources_tx(struct sock *sk)
2540 {
2541 struct tls_context *tls_ctx = tls_get_ctx(sk);
2542 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2543 struct tls_rec *rec, *tmp;
2544
2545 /* Wait for any pending async encryptions to complete */
2546 tls_encrypt_async_wait(ctx);
2547
2548 tls_tx_records(sk, -1);
2549
2550 /* Free up un-sent records in tx_list. First, free
2551 * the partially sent record if any at head of tx_list.
2552 */
2553 if (tls_ctx->partially_sent_record) {
2554 tls_free_partial_record(sk, tls_ctx);
2555 rec = list_first_entry(&ctx->tx_list,
2556 struct tls_rec, list);
2557 list_del(&rec->list);
2558 sk_msg_free(sk, &rec->msg_plaintext);
2559 kfree(rec);
2560 }
2561
2562 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2563 list_del(&rec->list);
2564 sk_msg_free(sk, &rec->msg_encrypted);
2565 sk_msg_free(sk, &rec->msg_plaintext);
2566 kfree(rec);
2567 }
2568
2569 crypto_free_aead(ctx->aead_send);
2570 tls_free_open_rec(sk);
2571 }
2572
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2573 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2574 {
2575 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2576
2577 kfree(ctx);
2578 }
2579
tls_sw_release_resources_rx(struct sock * sk)2580 void tls_sw_release_resources_rx(struct sock *sk)
2581 {
2582 struct tls_context *tls_ctx = tls_get_ctx(sk);
2583 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2584
2585 if (ctx->aead_recv) {
2586 __skb_queue_purge(&ctx->rx_list);
2587 crypto_free_aead(ctx->aead_recv);
2588 tls_strp_stop(&ctx->strp);
2589 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2590 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2591 * never swapped.
2592 */
2593 if (ctx->saved_data_ready) {
2594 write_lock_bh(&sk->sk_callback_lock);
2595 sk->sk_data_ready = ctx->saved_data_ready;
2596 write_unlock_bh(&sk->sk_callback_lock);
2597 }
2598 }
2599 }
2600
tls_sw_strparser_done(struct tls_context * tls_ctx)2601 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2602 {
2603 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2604
2605 tls_strp_done(&ctx->strp);
2606 }
2607
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2608 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2609 {
2610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2611
2612 kfree(ctx);
2613 }
2614
tls_sw_free_resources_rx(struct sock * sk)2615 void tls_sw_free_resources_rx(struct sock *sk)
2616 {
2617 struct tls_context *tls_ctx = tls_get_ctx(sk);
2618
2619 tls_sw_release_resources_rx(sk);
2620 tls_sw_free_ctx_rx(tls_ctx);
2621 }
2622
2623 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2624 static void tx_work_handler(struct work_struct *work)
2625 {
2626 struct delayed_work *delayed_work = to_delayed_work(work);
2627 struct tx_work *tx_work = container_of(delayed_work,
2628 struct tx_work, work);
2629 struct sock *sk = tx_work->sk;
2630 struct tls_context *tls_ctx = tls_get_ctx(sk);
2631 struct tls_sw_context_tx *ctx;
2632
2633 if (unlikely(!tls_ctx))
2634 return;
2635
2636 ctx = tls_sw_ctx_tx(tls_ctx);
2637 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2638 return;
2639
2640 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2641 return;
2642
2643 if (mutex_trylock(&tls_ctx->tx_lock)) {
2644 lock_sock(sk);
2645 tls_tx_records(sk, -1);
2646 release_sock(sk);
2647 mutex_unlock(&tls_ctx->tx_lock);
2648 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2649 /* Someone is holding the tx_lock, they will likely run Tx
2650 * and cancel the work on their way out of the lock section.
2651 * Schedule a long delay just in case.
2652 */
2653 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2654 }
2655 }
2656
tls_is_tx_ready(struct tls_sw_context_tx * ctx)2657 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2658 {
2659 struct tls_rec *rec;
2660
2661 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2662 if (!rec)
2663 return false;
2664
2665 return READ_ONCE(rec->tx_ready);
2666 }
2667
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2668 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2669 {
2670 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2671
2672 /* Schedule the transmission if tx list is ready */
2673 if (tls_is_tx_ready(tx_ctx) &&
2674 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2675 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2676 }
2677
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2678 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2679 {
2680 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2681
2682 write_lock_bh(&sk->sk_callback_lock);
2683 rx_ctx->saved_data_ready = sk->sk_data_ready;
2684 sk->sk_data_ready = tls_data_ready;
2685 write_unlock_bh(&sk->sk_callback_lock);
2686 }
2687
tls_update_rx_zc_capable(struct tls_context * tls_ctx)2688 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2689 {
2690 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2691
2692 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2693 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2694 }
2695
init_ctx_tx(struct tls_context * ctx,struct sock * sk)2696 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2697 {
2698 struct tls_sw_context_tx *sw_ctx_tx;
2699
2700 if (!ctx->priv_ctx_tx) {
2701 sw_ctx_tx = kzalloc_obj(*sw_ctx_tx);
2702 if (!sw_ctx_tx)
2703 return NULL;
2704 } else {
2705 sw_ctx_tx = ctx->priv_ctx_tx;
2706 }
2707
2708 crypto_init_wait(&sw_ctx_tx->async_wait);
2709 atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2710 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2711 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2712 sw_ctx_tx->tx_work.sk = sk;
2713
2714 return sw_ctx_tx;
2715 }
2716
init_ctx_rx(struct tls_context * ctx)2717 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2718 {
2719 struct tls_sw_context_rx *sw_ctx_rx;
2720
2721 if (!ctx->priv_ctx_rx) {
2722 sw_ctx_rx = kzalloc_obj(*sw_ctx_rx);
2723 if (!sw_ctx_rx)
2724 return NULL;
2725 } else {
2726 sw_ctx_rx = ctx->priv_ctx_rx;
2727 }
2728
2729 crypto_init_wait(&sw_ctx_rx->async_wait);
2730 atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2731 init_waitqueue_head(&sw_ctx_rx->wq);
2732 skb_queue_head_init(&sw_ctx_rx->rx_list);
2733 skb_queue_head_init(&sw_ctx_rx->async_hold);
2734
2735 return sw_ctx_rx;
2736 }
2737
init_prot_info(struct tls_prot_info * prot,const struct tls_crypto_info * crypto_info,const struct tls_cipher_desc * cipher_desc)2738 int init_prot_info(struct tls_prot_info *prot,
2739 const struct tls_crypto_info *crypto_info,
2740 const struct tls_cipher_desc *cipher_desc)
2741 {
2742 u16 nonce_size = cipher_desc->nonce;
2743
2744 if (crypto_info->version == TLS_1_3_VERSION) {
2745 nonce_size = 0;
2746 prot->aad_size = TLS_HEADER_SIZE;
2747 prot->tail_size = 1;
2748 } else {
2749 prot->aad_size = TLS_AAD_SPACE_SIZE;
2750 prot->tail_size = 0;
2751 }
2752
2753 /* Sanity-check the sizes for stack allocations. */
2754 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
2755 return -EINVAL;
2756
2757 prot->version = crypto_info->version;
2758 prot->cipher_type = crypto_info->cipher_type;
2759 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2760 prot->tag_size = cipher_desc->tag;
2761 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
2762 prot->iv_size = cipher_desc->iv;
2763 prot->salt_size = cipher_desc->salt;
2764 prot->rec_seq_size = cipher_desc->rec_seq;
2765
2766 return 0;
2767 }
2768
tls_finish_key_update(struct sock * sk,struct tls_context * tls_ctx)2769 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
2770 {
2771 struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
2772
2773 WRITE_ONCE(ctx->key_update_pending, false);
2774 /* wake-up pre-existing poll() */
2775 ctx->saved_data_ready(sk);
2776 }
2777
tls_set_sw_offload(struct sock * sk,int tx,struct tls_crypto_info * new_crypto_info)2778 int tls_set_sw_offload(struct sock *sk, int tx,
2779 struct tls_crypto_info *new_crypto_info)
2780 {
2781 struct tls_crypto_info *crypto_info, *src_crypto_info;
2782 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2783 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2784 const struct tls_cipher_desc *cipher_desc;
2785 char *iv, *rec_seq, *key, *salt;
2786 struct cipher_context *cctx;
2787 struct tls_prot_info *prot;
2788 struct crypto_aead **aead;
2789 struct tls_context *ctx;
2790 struct crypto_tfm *tfm;
2791 int rc = 0;
2792
2793 ctx = tls_get_ctx(sk);
2794 prot = &ctx->prot_info;
2795
2796 /* new_crypto_info != NULL means rekey */
2797 if (!new_crypto_info) {
2798 if (tx) {
2799 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2800 if (!ctx->priv_ctx_tx)
2801 return -ENOMEM;
2802 } else {
2803 ctx->priv_ctx_rx = init_ctx_rx(ctx);
2804 if (!ctx->priv_ctx_rx)
2805 return -ENOMEM;
2806 }
2807 }
2808
2809 if (tx) {
2810 sw_ctx_tx = ctx->priv_ctx_tx;
2811 crypto_info = &ctx->crypto_send.info;
2812 cctx = &ctx->tx;
2813 aead = &sw_ctx_tx->aead_send;
2814 } else {
2815 sw_ctx_rx = ctx->priv_ctx_rx;
2816 crypto_info = &ctx->crypto_recv.info;
2817 cctx = &ctx->rx;
2818 aead = &sw_ctx_rx->aead_recv;
2819 }
2820
2821 src_crypto_info = new_crypto_info ?: crypto_info;
2822
2823 cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
2824 if (!cipher_desc) {
2825 rc = -EINVAL;
2826 goto free_priv;
2827 }
2828
2829 rc = init_prot_info(prot, src_crypto_info, cipher_desc);
2830 if (rc)
2831 goto free_priv;
2832
2833 iv = crypto_info_iv(src_crypto_info, cipher_desc);
2834 key = crypto_info_key(src_crypto_info, cipher_desc);
2835 salt = crypto_info_salt(src_crypto_info, cipher_desc);
2836 rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
2837
2838 if (!*aead) {
2839 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
2840 if (IS_ERR(*aead)) {
2841 rc = PTR_ERR(*aead);
2842 *aead = NULL;
2843 goto free_priv;
2844 }
2845 }
2846
2847 ctx->push_pending_record = tls_sw_push_pending_record;
2848
2849 /* setkey is the last operation that could fail during a
2850 * rekey. if it succeeds, we can start modifying the
2851 * context.
2852 */
2853 rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
2854 if (rc) {
2855 if (new_crypto_info)
2856 goto out;
2857 else
2858 goto free_aead;
2859 }
2860
2861 if (!new_crypto_info) {
2862 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2863 if (rc)
2864 goto free_aead;
2865 }
2866
2867 if (!tx && !new_crypto_info) {
2868 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2869
2870 tls_update_rx_zc_capable(ctx);
2871 sw_ctx_rx->async_capable =
2872 src_crypto_info->version != TLS_1_3_VERSION &&
2873 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2874
2875 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2876 if (rc)
2877 goto free_aead;
2878 }
2879
2880 memcpy(cctx->iv, salt, cipher_desc->salt);
2881 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
2882 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
2883
2884 if (new_crypto_info) {
2885 unsafe_memcpy(crypto_info, new_crypto_info,
2886 cipher_desc->crypto_info,
2887 /* size was checked in do_tls_setsockopt_conf */);
2888 memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
2889 if (!tx)
2890 tls_finish_key_update(sk, ctx);
2891 }
2892
2893 goto out;
2894
2895 free_aead:
2896 crypto_free_aead(*aead);
2897 *aead = NULL;
2898 free_priv:
2899 if (!new_crypto_info) {
2900 if (tx) {
2901 kfree(ctx->priv_ctx_tx);
2902 ctx->priv_ctx_tx = NULL;
2903 } else {
2904 kfree(ctx->priv_ctx_rx);
2905 ctx->priv_ctx_rx = NULL;
2906 }
2907 }
2908 out:
2909 return rc;
2910 }
2911