1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48
49 #include "tls.h"
50
51 struct tls_decrypt_arg {
52 struct_group(inargs,
53 bool zc;
54 bool async;
55 bool async_done;
56 u8 tail;
57 );
58
59 struct sk_buff *skb;
60 };
61
62 struct tls_decrypt_ctx {
63 struct sock *sk;
64 u8 iv[TLS_MAX_IV_SIZE];
65 u8 aad[TLS_MAX_AAD_SIZE];
66 u8 tail;
67 bool free_sgout;
68 struct scatterlist sg[];
69 };
70
tls_err_abort(struct sock * sk,int err)71 noinline void tls_err_abort(struct sock *sk, int err)
72 {
73 WARN_ON_ONCE(err >= 0);
74 /* sk->sk_err should contain a positive error code. */
75 WRITE_ONCE(sk->sk_err, -err);
76 /* Paired with smp_rmb() in tcp_poll() */
77 smp_wmb();
78 sk_error_report(sk);
79 }
80
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)81 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
82 unsigned int recursion_level)
83 {
84 int start = skb_headlen(skb);
85 int i, chunk = start - offset;
86 struct sk_buff *frag_iter;
87 int elt = 0;
88
89 if (unlikely(recursion_level >= 24))
90 return -EMSGSIZE;
91
92 if (chunk > 0) {
93 if (chunk > len)
94 chunk = len;
95 elt++;
96 len -= chunk;
97 if (len == 0)
98 return elt;
99 offset += chunk;
100 }
101
102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 int end;
104
105 WARN_ON(start > offset + len);
106
107 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
108 chunk = end - offset;
109 if (chunk > 0) {
110 if (chunk > len)
111 chunk = len;
112 elt++;
113 len -= chunk;
114 if (len == 0)
115 return elt;
116 offset += chunk;
117 }
118 start = end;
119 }
120
121 if (unlikely(skb_has_frag_list(skb))) {
122 skb_walk_frags(skb, frag_iter) {
123 int end, ret;
124
125 WARN_ON(start > offset + len);
126
127 end = start + frag_iter->len;
128 chunk = end - offset;
129 if (chunk > 0) {
130 if (chunk > len)
131 chunk = len;
132 ret = __skb_nsg(frag_iter, offset - start, chunk,
133 recursion_level + 1);
134 if (unlikely(ret < 0))
135 return ret;
136 elt += ret;
137 len -= chunk;
138 if (len == 0)
139 return elt;
140 offset += chunk;
141 }
142 start = end;
143 }
144 }
145 BUG_ON(len);
146 return elt;
147 }
148
149 /* Return the number of scatterlist elements required to completely map the
150 * skb, or -EMSGSIZE if the recursion depth is exceeded.
151 */
skb_nsg(struct sk_buff * skb,int offset,int len)152 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 {
154 return __skb_nsg(skb, offset, len, 0);
155 }
156
tls_padding_length(struct tls_prot_info * prot,struct sk_buff * skb,struct tls_decrypt_arg * darg)157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
158 struct tls_decrypt_arg *darg)
159 {
160 struct strp_msg *rxm = strp_msg(skb);
161 struct tls_msg *tlm = tls_msg(skb);
162 int sub = 0;
163
164 /* Determine zero-padding length */
165 if (prot->version == TLS_1_3_VERSION) {
166 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
167 char content_type = darg->zc ? darg->tail : 0;
168 int err;
169
170 while (content_type == 0) {
171 if (offset < prot->prepend_size)
172 return -EBADMSG;
173 err = skb_copy_bits(skb, rxm->offset + offset,
174 &content_type, 1);
175 if (err)
176 return err;
177 if (content_type)
178 break;
179 sub++;
180 offset--;
181 }
182 tlm->control = content_type;
183 }
184 return sub;
185 }
186
tls_decrypt_done(void * data,int err)187 static void tls_decrypt_done(void *data, int err)
188 {
189 struct aead_request *aead_req = data;
190 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
191 struct scatterlist *sgout = aead_req->dst;
192 struct tls_sw_context_rx *ctx;
193 struct tls_decrypt_ctx *dctx;
194 struct tls_context *tls_ctx;
195 struct scatterlist *sg;
196 unsigned int pages;
197 struct sock *sk;
198 int aead_size;
199
200 /* If requests get too backlogged crypto API returns -EBUSY and calls
201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 * -EINPROGRESS -> 0.
205 * We have a single struct crypto_async_request per direction, this
206 * scheme doesn't help us, so just ignore the first ->complete().
207 */
208 if (err == -EINPROGRESS)
209 return;
210
211 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
212 aead_size = ALIGN(aead_size, __alignof__(*dctx));
213 dctx = (void *)((u8 *)aead_req + aead_size);
214
215 sk = dctx->sk;
216 tls_ctx = tls_get_ctx(sk);
217 ctx = tls_sw_ctx_rx(tls_ctx);
218
219 /* Propagate if there was an err */
220 if (err) {
221 if (err == -EBADMSG)
222 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
223 ctx->async_wait.err = err;
224 tls_err_abort(sk, err);
225 }
226
227 /* Free the destination pages if skb was not decrypted inplace */
228 if (dctx->free_sgout) {
229 /* Skip the first S/G entry as it points to AAD */
230 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
231 if (!sg)
232 break;
233 put_page(sg_page(sg));
234 }
235 }
236
237 kfree(aead_req);
238
239 if (atomic_dec_and_test(&ctx->decrypt_pending))
240 complete(&ctx->async_wait.completion);
241 }
242
tls_decrypt_async_wait(struct tls_sw_context_rx * ctx)243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 {
245 if (!atomic_dec_and_test(&ctx->decrypt_pending))
246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
247 atomic_inc(&ctx->decrypt_pending);
248
249 __skb_queue_purge(&ctx->async_hold);
250 return ctx->async_wait.err;
251 }
252
tls_do_decryption(struct sock * sk,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,struct tls_decrypt_arg * darg)253 static int tls_do_decryption(struct sock *sk,
254 struct scatterlist *sgin,
255 struct scatterlist *sgout,
256 char *iv_recv,
257 size_t data_len,
258 struct aead_request *aead_req,
259 struct tls_decrypt_arg *darg)
260 {
261 struct tls_context *tls_ctx = tls_get_ctx(sk);
262 struct tls_prot_info *prot = &tls_ctx->prot_info;
263 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
264 int ret;
265
266 aead_request_set_tfm(aead_req, ctx->aead_recv);
267 aead_request_set_ad(aead_req, prot->aad_size);
268 aead_request_set_crypt(aead_req, sgin, sgout,
269 data_len + prot->tag_size,
270 (u8 *)iv_recv);
271
272 if (darg->async) {
273 aead_request_set_callback(aead_req,
274 CRYPTO_TFM_REQ_MAY_BACKLOG,
275 tls_decrypt_done, aead_req);
276 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
277 atomic_inc(&ctx->decrypt_pending);
278 } else {
279 DECLARE_CRYPTO_WAIT(wait);
280
281 aead_request_set_callback(aead_req,
282 CRYPTO_TFM_REQ_MAY_BACKLOG,
283 crypto_req_done, &wait);
284 ret = crypto_aead_decrypt(aead_req);
285 if (ret == -EINPROGRESS || ret == -EBUSY)
286 ret = crypto_wait_req(ret, &wait);
287 return ret;
288 }
289
290 ret = crypto_aead_decrypt(aead_req);
291 if (ret == -EINPROGRESS)
292 return 0;
293
294 if (ret == -EBUSY) {
295 ret = tls_decrypt_async_wait(ctx);
296 darg->async_done = true;
297 /* all completions have run, we're not doing async anymore */
298 darg->async = false;
299 return ret;
300 }
301
302 atomic_dec(&ctx->decrypt_pending);
303 darg->async = false;
304
305 return ret;
306 }
307
tls_trim_both_msgs(struct sock * sk,int target_size)308 static void tls_trim_both_msgs(struct sock *sk, int target_size)
309 {
310 struct tls_context *tls_ctx = tls_get_ctx(sk);
311 struct tls_prot_info *prot = &tls_ctx->prot_info;
312 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
313 struct tls_rec *rec = ctx->open_rec;
314
315 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
316 if (target_size > 0)
317 target_size += prot->overhead_size;
318 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
319 }
320
tls_alloc_encrypted_msg(struct sock * sk,int len)321 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
322 {
323 struct tls_context *tls_ctx = tls_get_ctx(sk);
324 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
325 struct tls_rec *rec = ctx->open_rec;
326 struct sk_msg *msg_en = &rec->msg_encrypted;
327
328 return sk_msg_alloc(sk, msg_en, len, 0);
329 }
330
tls_clone_plaintext_msg(struct sock * sk,int required)331 static int tls_clone_plaintext_msg(struct sock *sk, int required)
332 {
333 struct tls_context *tls_ctx = tls_get_ctx(sk);
334 struct tls_prot_info *prot = &tls_ctx->prot_info;
335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 struct tls_rec *rec = ctx->open_rec;
337 struct sk_msg *msg_pl = &rec->msg_plaintext;
338 struct sk_msg *msg_en = &rec->msg_encrypted;
339 int skip, len;
340
341 /* We add page references worth len bytes from encrypted sg
342 * at the end of plaintext sg. It is guaranteed that msg_en
343 * has enough required room (ensured by caller).
344 */
345 len = required - msg_pl->sg.size;
346
347 /* Skip initial bytes in msg_en's data to be able to use
348 * same offset of both plain and encrypted data.
349 */
350 skip = prot->prepend_size + msg_pl->sg.size;
351
352 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
353 }
354
tls_get_rec(struct sock * sk)355 static struct tls_rec *tls_get_rec(struct sock *sk)
356 {
357 struct tls_context *tls_ctx = tls_get_ctx(sk);
358 struct tls_prot_info *prot = &tls_ctx->prot_info;
359 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
360 struct sk_msg *msg_pl, *msg_en;
361 struct tls_rec *rec;
362 int mem_size;
363
364 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
365
366 rec = kzalloc(mem_size, sk->sk_allocation);
367 if (!rec)
368 return NULL;
369
370 msg_pl = &rec->msg_plaintext;
371 msg_en = &rec->msg_encrypted;
372
373 sk_msg_init(msg_pl);
374 sk_msg_init(msg_en);
375
376 sg_init_table(rec->sg_aead_in, 2);
377 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
378 sg_unmark_end(&rec->sg_aead_in[1]);
379
380 sg_init_table(rec->sg_aead_out, 2);
381 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
382 sg_unmark_end(&rec->sg_aead_out[1]);
383
384 rec->sk = sk;
385
386 return rec;
387 }
388
tls_free_rec(struct sock * sk,struct tls_rec * rec)389 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
390 {
391 sk_msg_free(sk, &rec->msg_encrypted);
392 sk_msg_free(sk, &rec->msg_plaintext);
393 kfree(rec);
394 }
395
tls_free_open_rec(struct sock * sk)396 static void tls_free_open_rec(struct sock *sk)
397 {
398 struct tls_context *tls_ctx = tls_get_ctx(sk);
399 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
400 struct tls_rec *rec = ctx->open_rec;
401
402 if (rec) {
403 tls_free_rec(sk, rec);
404 ctx->open_rec = NULL;
405 }
406 }
407
tls_tx_records(struct sock * sk,int flags)408 int tls_tx_records(struct sock *sk, int flags)
409 {
410 struct tls_context *tls_ctx = tls_get_ctx(sk);
411 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
412 struct tls_rec *rec, *tmp;
413 struct sk_msg *msg_en;
414 int tx_flags, rc = 0;
415
416 if (tls_is_partially_sent_record(tls_ctx)) {
417 rec = list_first_entry(&ctx->tx_list,
418 struct tls_rec, list);
419
420 if (flags == -1)
421 tx_flags = rec->tx_flags;
422 else
423 tx_flags = flags;
424
425 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
426 if (rc)
427 goto tx_err;
428
429 /* Full record has been transmitted.
430 * Remove the head of tx_list
431 */
432 list_del(&rec->list);
433 sk_msg_free(sk, &rec->msg_plaintext);
434 kfree(rec);
435 }
436
437 /* Tx all ready records */
438 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
439 if (READ_ONCE(rec->tx_ready)) {
440 if (flags == -1)
441 tx_flags = rec->tx_flags;
442 else
443 tx_flags = flags;
444
445 msg_en = &rec->msg_encrypted;
446 rc = tls_push_sg(sk, tls_ctx,
447 &msg_en->sg.data[msg_en->sg.curr],
448 0, tx_flags);
449 if (rc)
450 goto tx_err;
451
452 list_del(&rec->list);
453 sk_msg_free(sk, &rec->msg_plaintext);
454 kfree(rec);
455 } else {
456 break;
457 }
458 }
459
460 tx_err:
461 if (rc < 0 && rc != -EAGAIN)
462 tls_err_abort(sk, rc);
463
464 return rc;
465 }
466
tls_encrypt_done(void * data,int err)467 static void tls_encrypt_done(void *data, int err)
468 {
469 struct tls_sw_context_tx *ctx;
470 struct tls_context *tls_ctx;
471 struct tls_prot_info *prot;
472 struct tls_rec *rec = data;
473 struct scatterlist *sge;
474 struct sk_msg *msg_en;
475 struct sock *sk;
476
477 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
478 return;
479
480 msg_en = &rec->msg_encrypted;
481
482 sk = rec->sk;
483 tls_ctx = tls_get_ctx(sk);
484 prot = &tls_ctx->prot_info;
485 ctx = tls_sw_ctx_tx(tls_ctx);
486
487 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
488 sge->offset -= prot->prepend_size;
489 sge->length += prot->prepend_size;
490
491 /* Check if error is previously set on socket */
492 if (err || sk->sk_err) {
493 rec = NULL;
494
495 /* If err is already set on socket, return the same code */
496 if (sk->sk_err) {
497 ctx->async_wait.err = -sk->sk_err;
498 } else {
499 ctx->async_wait.err = err;
500 tls_err_abort(sk, err);
501 }
502 }
503
504 if (rec) {
505 struct tls_rec *first_rec;
506
507 /* Mark the record as ready for transmission */
508 smp_store_mb(rec->tx_ready, true);
509
510 /* If received record is at head of tx_list, schedule tx */
511 first_rec = list_first_entry(&ctx->tx_list,
512 struct tls_rec, list);
513 if (rec == first_rec) {
514 /* Schedule the transmission */
515 if (!test_and_set_bit(BIT_TX_SCHEDULED,
516 &ctx->tx_bitmask))
517 schedule_delayed_work(&ctx->tx_work.work, 1);
518 }
519 }
520
521 if (atomic_dec_and_test(&ctx->encrypt_pending))
522 complete(&ctx->async_wait.completion);
523 }
524
tls_encrypt_async_wait(struct tls_sw_context_tx * ctx)525 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
526 {
527 if (!atomic_dec_and_test(&ctx->encrypt_pending))
528 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
529 atomic_inc(&ctx->encrypt_pending);
530
531 return ctx->async_wait.err;
532 }
533
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)534 static int tls_do_encryption(struct sock *sk,
535 struct tls_context *tls_ctx,
536 struct tls_sw_context_tx *ctx,
537 struct aead_request *aead_req,
538 size_t data_len, u32 start)
539 {
540 struct tls_prot_info *prot = &tls_ctx->prot_info;
541 struct tls_rec *rec = ctx->open_rec;
542 struct sk_msg *msg_en = &rec->msg_encrypted;
543 struct scatterlist *sge = sk_msg_elem(msg_en, start);
544 int rc, iv_offset = 0;
545
546 /* For CCM based ciphers, first byte of IV is a constant */
547 switch (prot->cipher_type) {
548 case TLS_CIPHER_AES_CCM_128:
549 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
550 iv_offset = 1;
551 break;
552 case TLS_CIPHER_SM4_CCM:
553 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
554 iv_offset = 1;
555 break;
556 }
557
558 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
559 prot->iv_size + prot->salt_size);
560
561 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
562 tls_ctx->tx.rec_seq);
563
564 sge->offset += prot->prepend_size;
565 sge->length -= prot->prepend_size;
566
567 msg_en->sg.curr = start;
568
569 aead_request_set_tfm(aead_req, ctx->aead_send);
570 aead_request_set_ad(aead_req, prot->aad_size);
571 aead_request_set_crypt(aead_req, rec->sg_aead_in,
572 rec->sg_aead_out,
573 data_len, rec->iv_data);
574
575 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
576 tls_encrypt_done, rec);
577
578 /* Add the record in tx_list */
579 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
580 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
581 atomic_inc(&ctx->encrypt_pending);
582
583 rc = crypto_aead_encrypt(aead_req);
584 if (rc == -EBUSY) {
585 rc = tls_encrypt_async_wait(ctx);
586 rc = rc ?: -EINPROGRESS;
587 /*
588 * The async callback tls_encrypt_done() has already
589 * decremented encrypt_pending and restored the sge on
590 * both success and error. Skip the synchronous cleanup
591 * below on error, just remove the record and return.
592 */
593 if (rc != -EINPROGRESS) {
594 list_del(&rec->list);
595 return rc;
596 }
597 }
598 if (!rc || rc != -EINPROGRESS) {
599 atomic_dec(&ctx->encrypt_pending);
600 sge->offset -= prot->prepend_size;
601 sge->length += prot->prepend_size;
602 }
603
604 if (!rc) {
605 WRITE_ONCE(rec->tx_ready, true);
606 } else if (rc != -EINPROGRESS) {
607 list_del(&rec->list);
608 return rc;
609 }
610
611 /* Unhook the record from context if encryption is not failure */
612 ctx->open_rec = NULL;
613 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
614 return rc;
615 }
616
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)617 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
618 struct tls_rec **to, struct sk_msg *msg_opl,
619 struct sk_msg *msg_oen, u32 split_point,
620 u32 tx_overhead_size, u32 *orig_end)
621 {
622 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
623 struct scatterlist *sge, *osge, *nsge;
624 u32 orig_size = msg_opl->sg.size;
625 struct scatterlist tmp = { };
626 struct sk_msg *msg_npl;
627 struct tls_rec *new;
628 int ret;
629
630 new = tls_get_rec(sk);
631 if (!new)
632 return -ENOMEM;
633 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
634 tx_overhead_size, 0);
635 if (ret < 0) {
636 tls_free_rec(sk, new);
637 return ret;
638 }
639
640 *orig_end = msg_opl->sg.end;
641 i = msg_opl->sg.start;
642 sge = sk_msg_elem(msg_opl, i);
643 while (apply && sge->length) {
644 if (sge->length > apply) {
645 u32 len = sge->length - apply;
646
647 get_page(sg_page(sge));
648 sg_set_page(&tmp, sg_page(sge), len,
649 sge->offset + apply);
650 sge->length = apply;
651 bytes += apply;
652 apply = 0;
653 } else {
654 apply -= sge->length;
655 bytes += sge->length;
656 }
657
658 sk_msg_iter_var_next(i);
659 if (i == msg_opl->sg.end)
660 break;
661 sge = sk_msg_elem(msg_opl, i);
662 }
663
664 msg_opl->sg.end = i;
665 msg_opl->sg.curr = i;
666 msg_opl->sg.copybreak = 0;
667 msg_opl->apply_bytes = 0;
668 msg_opl->sg.size = bytes;
669
670 msg_npl = &new->msg_plaintext;
671 msg_npl->apply_bytes = apply;
672 msg_npl->sg.size = orig_size - bytes;
673
674 j = msg_npl->sg.start;
675 nsge = sk_msg_elem(msg_npl, j);
676 if (tmp.length) {
677 memcpy(nsge, &tmp, sizeof(*nsge));
678 sk_msg_iter_var_next(j);
679 nsge = sk_msg_elem(msg_npl, j);
680 }
681
682 osge = sk_msg_elem(msg_opl, i);
683 while (osge->length) {
684 memcpy(nsge, osge, sizeof(*nsge));
685 sg_unmark_end(nsge);
686 sk_msg_iter_var_next(i);
687 sk_msg_iter_var_next(j);
688 if (i == *orig_end)
689 break;
690 osge = sk_msg_elem(msg_opl, i);
691 nsge = sk_msg_elem(msg_npl, j);
692 }
693
694 msg_npl->sg.end = j;
695 msg_npl->sg.curr = j;
696 msg_npl->sg.copybreak = 0;
697
698 *to = new;
699 return 0;
700 }
701
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)702 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
703 struct tls_rec *from, u32 orig_end)
704 {
705 struct sk_msg *msg_npl = &from->msg_plaintext;
706 struct sk_msg *msg_opl = &to->msg_plaintext;
707 struct scatterlist *osge, *nsge;
708 u32 i, j;
709
710 i = msg_opl->sg.end;
711 sk_msg_iter_var_prev(i);
712 j = msg_npl->sg.start;
713
714 osge = sk_msg_elem(msg_opl, i);
715 nsge = sk_msg_elem(msg_npl, j);
716
717 if (sg_page(osge) == sg_page(nsge) &&
718 osge->offset + osge->length == nsge->offset) {
719 osge->length += nsge->length;
720 put_page(sg_page(nsge));
721 }
722
723 msg_opl->sg.end = orig_end;
724 msg_opl->sg.curr = orig_end;
725 msg_opl->sg.copybreak = 0;
726 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
727 msg_opl->sg.size += msg_npl->sg.size;
728
729 sk_msg_free(sk, &to->msg_encrypted);
730 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
731
732 kfree(from);
733 }
734
tls_push_record(struct sock * sk,int flags,unsigned char record_type)735 static int tls_push_record(struct sock *sk, int flags,
736 unsigned char record_type)
737 {
738 struct tls_context *tls_ctx = tls_get_ctx(sk);
739 struct tls_prot_info *prot = &tls_ctx->prot_info;
740 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
741 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
742 u32 i, split_point, orig_end;
743 struct sk_msg *msg_pl, *msg_en;
744 struct aead_request *req;
745 bool split;
746 int rc;
747
748 if (!rec)
749 return 0;
750
751 msg_pl = &rec->msg_plaintext;
752 msg_en = &rec->msg_encrypted;
753
754 split_point = msg_pl->apply_bytes;
755 split = split_point && split_point < msg_pl->sg.size;
756 if (unlikely((!split &&
757 msg_pl->sg.size +
758 prot->overhead_size > msg_en->sg.size) ||
759 (split &&
760 split_point +
761 prot->overhead_size > msg_en->sg.size))) {
762 split = true;
763 split_point = msg_en->sg.size;
764 }
765 if (split) {
766 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
767 split_point, prot->overhead_size,
768 &orig_end);
769 if (rc < 0)
770 return rc;
771 /* This can happen if above tls_split_open_record allocates
772 * a single large encryption buffer instead of two smaller
773 * ones. In this case adjust pointers and continue without
774 * split.
775 */
776 if (!msg_pl->sg.size) {
777 tls_merge_open_record(sk, rec, tmp, orig_end);
778 msg_pl = &rec->msg_plaintext;
779 msg_en = &rec->msg_encrypted;
780 split = false;
781 }
782 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
783 prot->overhead_size);
784 }
785
786 rec->tx_flags = flags;
787 req = &rec->aead_req;
788
789 i = msg_pl->sg.end;
790 sk_msg_iter_var_prev(i);
791
792 /* msg_pl->sg.data is a ring; data[MAX+1] is reserved for the wrap
793 * link (frags won't use it). 'i' is now the last filled entry:
794 *
795 * i end start
796 * v v v [ rsv ]
797 * [ d ][ d ][ ][ ]...[ ][ d ][ d ][ d ][chain]
798 * ^ END v
799 * `-----------------------------------------'
800 *
801 * Note that SGL does not allow chain-after-chain, so for TLS 1.3,
802 * we must make sure we don't create the wrap entry and then chain
803 * link to content_type immediately at index 0.
804 */
805 if (i < msg_pl->sg.start)
806 sg_chain(msg_pl->sg.data, ARRAY_SIZE(msg_pl->sg.data),
807 msg_pl->sg.data);
808
809 rec->content_type = record_type;
810 if (prot->version == TLS_1_3_VERSION) {
811 /* Add content type to end of message. No padding added */
812 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
813 sg_mark_end(&rec->sg_content_type);
814 sg_chain(msg_pl->sg.data, i + 2, &rec->sg_content_type);
815 } else {
816 sg_mark_end(sk_msg_elem(msg_pl, i));
817 }
818
819 i = msg_pl->sg.start;
820 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
821
822 i = msg_en->sg.end;
823 sk_msg_iter_var_prev(i);
824 sg_mark_end(sk_msg_elem(msg_en, i));
825
826 i = msg_en->sg.start;
827 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
828
829 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
830 tls_ctx->tx.rec_seq, record_type, prot);
831
832 tls_fill_prepend(tls_ctx,
833 page_address(sg_page(&msg_en->sg.data[i])) +
834 msg_en->sg.data[i].offset,
835 msg_pl->sg.size + prot->tail_size,
836 record_type);
837
838 tls_ctx->pending_open_record_frags = false;
839
840 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
841 msg_pl->sg.size + prot->tail_size, i);
842 if (rc < 0) {
843 if (rc != -EINPROGRESS) {
844 tls_err_abort(sk, -EBADMSG);
845 if (split) {
846 tls_ctx->pending_open_record_frags = true;
847 tls_merge_open_record(sk, rec, tmp, orig_end);
848 }
849 }
850 ctx->async_capable = 1;
851 return rc;
852 } else if (split) {
853 msg_pl = &tmp->msg_plaintext;
854 msg_en = &tmp->msg_encrypted;
855 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
856 tls_ctx->pending_open_record_frags = true;
857 ctx->open_rec = tmp;
858 }
859
860 return tls_tx_records(sk, flags);
861 }
862
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)863 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
864 bool full_record, u8 record_type,
865 ssize_t *copied, int flags)
866 {
867 struct tls_context *tls_ctx = tls_get_ctx(sk);
868 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
869 struct sk_msg msg_redir = { };
870 struct sk_psock *psock;
871 struct sock *sk_redir;
872 struct tls_rec *rec;
873 bool enospc, policy, redir_ingress;
874 int err = 0, send;
875 u32 delta = 0;
876
877 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
878 psock = sk_psock_get(sk);
879 if (!psock || !policy) {
880 err = tls_push_record(sk, flags, record_type);
881 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
882 *copied -= sk_msg_free(sk, msg);
883 tls_free_open_rec(sk);
884 err = -sk->sk_err;
885 }
886 if (psock)
887 sk_psock_put(sk, psock);
888 return err;
889 }
890 more_data:
891 enospc = sk_msg_full(msg);
892 if (psock->eval == __SK_NONE) {
893 delta = msg->sg.size;
894 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
895 delta -= msg->sg.size;
896
897 if ((s32)delta > 0) {
898 /* It indicates that we executed bpf_msg_pop_data(),
899 * causing the plaintext data size to decrease.
900 * Therefore the encrypted data size also needs to
901 * correspondingly decrease. We only need to subtract
902 * delta to calculate the new ciphertext length since
903 * ktls does not support block encryption.
904 */
905 struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
906
907 sk_msg_trim(sk, enc, enc->sg.size - delta);
908 }
909 }
910 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
911 !enospc && !full_record) {
912 err = -ENOSPC;
913 goto out_err;
914 }
915 msg->cork_bytes = 0;
916 send = msg->sg.size;
917 if (msg->apply_bytes && msg->apply_bytes < send)
918 send = msg->apply_bytes;
919
920 switch (psock->eval) {
921 case __SK_PASS:
922 err = tls_push_record(sk, flags, record_type);
923 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
924 *copied -= sk_msg_free(sk, msg);
925 tls_free_open_rec(sk);
926 err = -sk->sk_err;
927 goto out_err;
928 }
929 break;
930 case __SK_REDIRECT:
931 redir_ingress = psock->redir_ingress;
932 sk_redir = psock->sk_redir;
933 memcpy(&msg_redir, msg, sizeof(*msg));
934 if (msg->apply_bytes < send)
935 msg->apply_bytes = 0;
936 else
937 msg->apply_bytes -= send;
938 sk_msg_return_zero(sk, msg, send);
939 msg->sg.size -= send;
940 release_sock(sk);
941 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
942 &msg_redir, send, flags);
943 lock_sock(sk);
944 if (err < 0) {
945 /* Regardless of whether the data represented by
946 * msg_redir is sent successfully, we have already
947 * uncharged it via sk_msg_return_zero(). The
948 * msg->sg.size represents the remaining unprocessed
949 * data, which needs to be uncharged here.
950 */
951 sk_mem_uncharge(sk, msg->sg.size);
952 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
953 msg->sg.size = 0;
954 }
955 if (msg->sg.size == 0)
956 tls_free_open_rec(sk);
957 break;
958 case __SK_DROP:
959 default:
960 sk_msg_free_partial(sk, msg, send);
961 if (msg->apply_bytes < send)
962 msg->apply_bytes = 0;
963 else
964 msg->apply_bytes -= send;
965 if (msg->sg.size == 0)
966 tls_free_open_rec(sk);
967 *copied -= (send + delta);
968 err = -EACCES;
969 }
970
971 if (likely(!err)) {
972 bool reset_eval = !ctx->open_rec;
973
974 rec = ctx->open_rec;
975 if (rec) {
976 msg = &rec->msg_plaintext;
977 if (!msg->apply_bytes)
978 reset_eval = true;
979 }
980 if (reset_eval) {
981 psock->eval = __SK_NONE;
982 if (psock->sk_redir) {
983 sock_put(psock->sk_redir);
984 psock->sk_redir = NULL;
985 }
986 }
987 if (rec)
988 goto more_data;
989 }
990 out_err:
991 sk_psock_put(sk, psock);
992 return err;
993 }
994
tls_sw_push_pending_record(struct sock * sk,int flags)995 static int tls_sw_push_pending_record(struct sock *sk, int flags)
996 {
997 struct tls_context *tls_ctx = tls_get_ctx(sk);
998 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
999 struct tls_rec *rec = ctx->open_rec;
1000 struct sk_msg *msg_pl;
1001 size_t copied;
1002
1003 if (!rec)
1004 return 0;
1005
1006 msg_pl = &rec->msg_plaintext;
1007 copied = msg_pl->sg.size;
1008 if (!copied)
1009 return 0;
1010
1011 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
1012 &copied, flags);
1013 }
1014
tls_sw_sendmsg_splice(struct sock * sk,struct msghdr * msg,struct sk_msg * msg_pl,size_t try_to_copy,ssize_t * copied)1015 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
1016 struct sk_msg *msg_pl, size_t try_to_copy,
1017 ssize_t *copied)
1018 {
1019 struct page *page = NULL, **pages = &page;
1020
1021 do {
1022 ssize_t part;
1023 size_t off;
1024
1025 part = iov_iter_extract_pages(&msg->msg_iter, &pages,
1026 try_to_copy, 1, 0, &off);
1027 if (part <= 0)
1028 return part ?: -EIO;
1029
1030 if (WARN_ON_ONCE(!sendpage_ok(page))) {
1031 iov_iter_revert(&msg->msg_iter, part);
1032 return -EIO;
1033 }
1034
1035 sk_msg_page_add(msg_pl, page, part, off);
1036 msg_pl->sg.copybreak = 0;
1037 msg_pl->sg.curr = msg_pl->sg.end;
1038 sk_mem_charge(sk, part);
1039 *copied += part;
1040 try_to_copy -= part;
1041 } while (try_to_copy && !sk_msg_full(msg_pl));
1042
1043 return 0;
1044 }
1045
tls_sw_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1046 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
1047 size_t size)
1048 {
1049 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1050 struct tls_context *tls_ctx = tls_get_ctx(sk);
1051 struct tls_prot_info *prot = &tls_ctx->prot_info;
1052 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1053 bool async_capable = ctx->async_capable;
1054 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1055 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1056 bool eor = !(msg->msg_flags & MSG_MORE);
1057 size_t try_to_copy;
1058 ssize_t copied = 0;
1059 struct sk_msg *msg_pl, *msg_en;
1060 struct tls_rec *rec;
1061 int required_size;
1062 int num_async = 0;
1063 bool full_record;
1064 int record_room;
1065 int num_zc = 0;
1066 int orig_size;
1067 int ret = 0;
1068
1069 if (!eor && (msg->msg_flags & MSG_EOR))
1070 return -EINVAL;
1071
1072 if (unlikely(msg->msg_controllen)) {
1073 ret = tls_process_cmsg(sk, msg, &record_type);
1074 if (ret) {
1075 if (ret == -EINPROGRESS)
1076 num_async++;
1077 else if (ret != -EAGAIN)
1078 goto end;
1079 }
1080 }
1081
1082 while (msg_data_left(msg)) {
1083 if (sk->sk_err) {
1084 ret = -sk->sk_err;
1085 goto send_end;
1086 }
1087
1088 if (ctx->open_rec)
1089 rec = ctx->open_rec;
1090 else
1091 rec = ctx->open_rec = tls_get_rec(sk);
1092 if (!rec) {
1093 ret = -ENOMEM;
1094 goto send_end;
1095 }
1096
1097 msg_pl = &rec->msg_plaintext;
1098 msg_en = &rec->msg_encrypted;
1099
1100 orig_size = msg_pl->sg.size;
1101 full_record = false;
1102 try_to_copy = msg_data_left(msg);
1103 record_room = tls_ctx->tx_max_payload_len - msg_pl->sg.size;
1104 if (try_to_copy >= record_room) {
1105 try_to_copy = record_room;
1106 full_record = true;
1107 }
1108
1109 required_size = msg_pl->sg.size + try_to_copy +
1110 prot->overhead_size;
1111
1112 if (!sk_stream_memory_free(sk))
1113 goto wait_for_sndbuf;
1114
1115 alloc_encrypted:
1116 ret = tls_alloc_encrypted_msg(sk, required_size);
1117 if (ret) {
1118 if (ret != -ENOSPC)
1119 goto wait_for_memory;
1120
1121 /* Adjust try_to_copy according to the amount that was
1122 * actually allocated. The difference is due
1123 * to max sg elements limit
1124 */
1125 try_to_copy -= required_size - msg_en->sg.size;
1126 full_record = true;
1127 }
1128
1129 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1130 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1131 try_to_copy, &copied);
1132 if (ret < 0)
1133 goto send_end;
1134 tls_ctx->pending_open_record_frags = true;
1135
1136 if (sk_msg_full(msg_pl)) {
1137 full_record = true;
1138 sk_msg_trim(sk, msg_en,
1139 msg_pl->sg.size + prot->overhead_size);
1140 }
1141
1142 if (full_record || eor)
1143 goto copied;
1144 continue;
1145 }
1146
1147 if (!is_kvec && (full_record || eor) && !async_capable) {
1148 u32 first = msg_pl->sg.end;
1149
1150 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1151 msg_pl, try_to_copy);
1152 if (ret)
1153 goto fallback_to_reg_send;
1154
1155 num_zc++;
1156 copied += try_to_copy;
1157
1158 sk_msg_sg_copy_set(msg_pl, first);
1159 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1160 record_type, &copied,
1161 msg->msg_flags);
1162 if (ret) {
1163 if (ret == -EINPROGRESS)
1164 num_async++;
1165 else if (ret == -ENOMEM)
1166 goto wait_for_memory;
1167 else if (ctx->open_rec && ret == -ENOSPC) {
1168 if (msg_pl->cork_bytes) {
1169 ret = 0;
1170 goto send_end;
1171 }
1172 goto rollback_iter;
1173 } else if (ret != -EAGAIN)
1174 goto send_end;
1175 }
1176
1177 /* Transmit if any encryptions have completed */
1178 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1179 cancel_delayed_work(&ctx->tx_work.work);
1180 tls_tx_records(sk, msg->msg_flags);
1181 }
1182
1183 continue;
1184 rollback_iter:
1185 copied -= try_to_copy;
1186 sk_msg_sg_copy_clear(msg_pl, first);
1187 iov_iter_revert(&msg->msg_iter,
1188 msg_pl->sg.size - orig_size);
1189 fallback_to_reg_send:
1190 sk_msg_trim(sk, msg_pl, orig_size);
1191 }
1192
1193 required_size = msg_pl->sg.size + try_to_copy;
1194
1195 ret = tls_clone_plaintext_msg(sk, required_size);
1196 if (ret) {
1197 if (ret != -ENOSPC)
1198 goto send_end;
1199
1200 /* Adjust try_to_copy according to the amount that was
1201 * actually allocated. The difference is due
1202 * to max sg elements limit
1203 */
1204 try_to_copy -= required_size - msg_pl->sg.size;
1205 full_record = true;
1206 sk_msg_trim(sk, msg_en,
1207 msg_pl->sg.size + prot->overhead_size);
1208 }
1209
1210 if (try_to_copy) {
1211 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1212 msg_pl, try_to_copy);
1213 if (ret < 0)
1214 goto trim_sgl;
1215 }
1216
1217 /* Open records defined only if successfully copied, otherwise
1218 * we would trim the sg but not reset the open record frags.
1219 */
1220 tls_ctx->pending_open_record_frags = true;
1221 copied += try_to_copy;
1222 copied:
1223 if (full_record || eor) {
1224 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1225 record_type, &copied,
1226 msg->msg_flags);
1227 if (ret) {
1228 if (ret == -EINPROGRESS)
1229 num_async++;
1230 else if (ret == -ENOMEM)
1231 goto wait_for_memory;
1232 else if (ret != -EAGAIN) {
1233 if (ret == -ENOSPC)
1234 ret = 0;
1235 goto send_end;
1236 }
1237 }
1238
1239 /* Transmit if any encryptions have completed */
1240 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1241 cancel_delayed_work(&ctx->tx_work.work);
1242 tls_tx_records(sk, msg->msg_flags);
1243 }
1244 }
1245
1246 continue;
1247
1248 wait_for_sndbuf:
1249 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1250 wait_for_memory:
1251 ret = sk_stream_wait_memory(sk, &timeo);
1252 if (ret) {
1253 trim_sgl:
1254 if (ctx->open_rec)
1255 tls_trim_both_msgs(sk, orig_size);
1256 goto send_end;
1257 }
1258
1259 if (ctx->open_rec && msg_en->sg.size < required_size)
1260 goto alloc_encrypted;
1261 }
1262
1263 send_end:
1264 if (!num_async) {
1265 goto end;
1266 } else if (num_zc || eor) {
1267 int err;
1268
1269 /* Wait for pending encryptions to get completed */
1270 err = tls_encrypt_async_wait(ctx);
1271 if (err) {
1272 ret = err;
1273 copied = 0;
1274 }
1275 }
1276
1277 /* Transmit if any encryptions have completed */
1278 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1279 cancel_delayed_work(&ctx->tx_work.work);
1280 tls_tx_records(sk, msg->msg_flags);
1281 }
1282
1283 end:
1284 ret = sk_stream_error(sk, msg->msg_flags, ret);
1285 return copied > 0 ? copied : ret;
1286 }
1287
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1288 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1289 {
1290 struct tls_context *tls_ctx = tls_get_ctx(sk);
1291 int ret;
1292
1293 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1294 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1295 MSG_SENDPAGE_NOPOLICY))
1296 return -EOPNOTSUPP;
1297
1298 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1299 if (ret)
1300 return ret;
1301 lock_sock(sk);
1302 ret = tls_sw_sendmsg_locked(sk, msg, size);
1303 release_sock(sk);
1304 mutex_unlock(&tls_ctx->tx_lock);
1305 return ret;
1306 }
1307
1308 /*
1309 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1310 */
tls_sw_splice_eof(struct socket * sock)1311 void tls_sw_splice_eof(struct socket *sock)
1312 {
1313 struct sock *sk = sock->sk;
1314 struct tls_context *tls_ctx = tls_get_ctx(sk);
1315 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1316 struct tls_rec *rec;
1317 struct sk_msg *msg_pl;
1318 ssize_t copied = 0;
1319 bool retrying = false;
1320 int ret = 0;
1321
1322 if (!ctx->open_rec)
1323 return;
1324
1325 mutex_lock(&tls_ctx->tx_lock);
1326 lock_sock(sk);
1327
1328 retry:
1329 /* same checks as in tls_sw_push_pending_record() */
1330 rec = ctx->open_rec;
1331 if (!rec)
1332 goto unlock;
1333
1334 msg_pl = &rec->msg_plaintext;
1335 if (msg_pl->sg.size == 0)
1336 goto unlock;
1337
1338 /* Check the BPF advisor and perform transmission. */
1339 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1340 &copied, 0);
1341 switch (ret) {
1342 case 0:
1343 case -EAGAIN:
1344 if (retrying)
1345 goto unlock;
1346 retrying = true;
1347 goto retry;
1348 case -EINPROGRESS:
1349 break;
1350 default:
1351 goto unlock;
1352 }
1353
1354 /* Wait for pending encryptions to get completed */
1355 if (tls_encrypt_async_wait(ctx))
1356 goto unlock;
1357
1358 /* Transmit if any encryptions have completed */
1359 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1360 cancel_delayed_work(&ctx->tx_work.work);
1361 tls_tx_records(sk, 0);
1362 }
1363
1364 unlock:
1365 release_sock(sk);
1366 mutex_unlock(&tls_ctx->tx_lock);
1367 }
1368
1369 static int
tls_rx_rec_wait(struct sock * sk,struct sk_psock * psock,bool nonblock,bool released)1370 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1371 bool released)
1372 {
1373 struct tls_context *tls_ctx = tls_get_ctx(sk);
1374 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1375 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1376 int ret = 0;
1377 long timeo;
1378
1379 /* a rekey is pending, let userspace deal with it */
1380 if (unlikely(ctx->key_update_pending))
1381 return -EKEYEXPIRED;
1382
1383 timeo = sock_rcvtimeo(sk, nonblock);
1384
1385 while (!tls_strp_msg_ready(ctx)) {
1386 if (!sk_psock_queue_empty(psock))
1387 return 0;
1388
1389 if (sk->sk_err)
1390 return sock_error(sk);
1391
1392 if (ret < 0)
1393 return ret;
1394
1395 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1396 tls_strp_check_rcv(&ctx->strp);
1397 if (tls_strp_msg_ready(ctx))
1398 break;
1399 }
1400
1401 if (sk->sk_shutdown & RCV_SHUTDOWN)
1402 return 0;
1403
1404 if (sock_flag(sk, SOCK_DONE))
1405 return 0;
1406
1407 if (!timeo)
1408 return -EAGAIN;
1409
1410 released = true;
1411 add_wait_queue(sk_sleep(sk), &wait);
1412 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1413 ret = sk_wait_event(sk, &timeo,
1414 tls_strp_msg_ready(ctx) ||
1415 !sk_psock_queue_empty(psock),
1416 &wait);
1417 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1418 remove_wait_queue(sk_sleep(sk), &wait);
1419
1420 /* Handle signals */
1421 if (signal_pending(current))
1422 return sock_intr_errno(timeo);
1423 }
1424
1425 if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
1426 return tls_rx_rec_wait(sk, psock, nonblock, false);
1427
1428 return 1;
1429 }
1430
tls_setup_from_iter(struct iov_iter * from,int length,int * pages_used,struct scatterlist * to,int to_max_pages)1431 static int tls_setup_from_iter(struct iov_iter *from,
1432 int length, int *pages_used,
1433 struct scatterlist *to,
1434 int to_max_pages)
1435 {
1436 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1437 struct page *pages[MAX_SKB_FRAGS];
1438 unsigned int size = 0;
1439 ssize_t copied, use;
1440 size_t offset;
1441
1442 while (length > 0) {
1443 i = 0;
1444 maxpages = to_max_pages - num_elem;
1445 if (maxpages == 0) {
1446 rc = -EFAULT;
1447 goto out;
1448 }
1449 copied = iov_iter_get_pages2(from, pages,
1450 length,
1451 maxpages, &offset);
1452 if (copied <= 0) {
1453 rc = -EFAULT;
1454 goto out;
1455 }
1456
1457 length -= copied;
1458 size += copied;
1459 while (copied) {
1460 use = min_t(int, copied, PAGE_SIZE - offset);
1461
1462 sg_set_page(&to[num_elem],
1463 pages[i], use, offset);
1464 sg_unmark_end(&to[num_elem]);
1465 /* We do not uncharge memory from this API */
1466
1467 offset = 0;
1468 copied -= use;
1469
1470 i++;
1471 num_elem++;
1472 }
1473 }
1474 /* Mark the end in the last sg entry if newly added */
1475 if (num_elem > *pages_used)
1476 sg_mark_end(&to[num_elem - 1]);
1477 out:
1478 if (rc)
1479 iov_iter_revert(from, size);
1480 *pages_used = num_elem;
1481
1482 return rc;
1483 }
1484
1485 static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock * sk,struct sk_buff * skb,unsigned int full_len)1486 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1487 unsigned int full_len)
1488 {
1489 struct strp_msg *clr_rxm;
1490 struct sk_buff *clr_skb;
1491 int err;
1492
1493 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1494 &err, sk->sk_allocation);
1495 if (!clr_skb)
1496 return NULL;
1497
1498 skb_copy_header(clr_skb, skb);
1499 clr_skb->len = full_len;
1500 clr_skb->data_len = full_len;
1501
1502 clr_rxm = strp_msg(clr_skb);
1503 clr_rxm->offset = 0;
1504
1505 return clr_skb;
1506 }
1507
1508 /* Decrypt handlers
1509 *
1510 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1511 * They must transform the darg in/out argument are as follows:
1512 * | Input | Output
1513 * -------------------------------------------------------------------
1514 * zc | Zero-copy decrypt allowed | Zero-copy performed
1515 * async | Async decrypt allowed | Async crypto used / in progress
1516 * skb | * | Output skb
1517 *
1518 * If ZC decryption was performed darg.skb will point to the input skb.
1519 */
1520
1521 /* This function decrypts the input skb into either out_iov or in out_sg
1522 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1523 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1524 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1525 * NULL, then the decryption happens inside skb buffers itself, i.e.
1526 * zero-copy gets disabled and 'darg->zc' is updated.
1527 */
tls_decrypt_sg(struct sock * sk,struct iov_iter * out_iov,struct scatterlist * out_sg,struct tls_decrypt_arg * darg)1528 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1529 struct scatterlist *out_sg,
1530 struct tls_decrypt_arg *darg)
1531 {
1532 struct tls_context *tls_ctx = tls_get_ctx(sk);
1533 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1534 struct tls_prot_info *prot = &tls_ctx->prot_info;
1535 int n_sgin, n_sgout, aead_size, err, pages = 0;
1536 struct sk_buff *skb = tls_strp_msg(ctx);
1537 const struct strp_msg *rxm = strp_msg(skb);
1538 const struct tls_msg *tlm = tls_msg(skb);
1539 struct aead_request *aead_req;
1540 struct scatterlist *sgin = NULL;
1541 struct scatterlist *sgout = NULL;
1542 const int data_len = rxm->full_len - prot->overhead_size;
1543 int tail_pages = !!prot->tail_size;
1544 struct tls_decrypt_ctx *dctx;
1545 struct sk_buff *clear_skb;
1546 int iv_offset = 0;
1547 u8 *mem;
1548
1549 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1550 rxm->full_len - prot->prepend_size);
1551 if (n_sgin < 1)
1552 return n_sgin ?: -EBADMSG;
1553
1554 if (darg->zc && (out_iov || out_sg)) {
1555 clear_skb = NULL;
1556
1557 if (out_iov)
1558 n_sgout = 1 + tail_pages +
1559 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1560 else
1561 n_sgout = sg_nents(out_sg);
1562 } else {
1563 darg->zc = false;
1564
1565 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1566 if (!clear_skb)
1567 return -ENOMEM;
1568
1569 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1570 }
1571
1572 /* Increment to accommodate AAD */
1573 n_sgin = n_sgin + 1;
1574
1575 /* Allocate a single block of memory which contains
1576 * aead_req || tls_decrypt_ctx.
1577 * Both structs are variable length.
1578 */
1579 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1580 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1581 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1582 sk->sk_allocation);
1583 if (!mem) {
1584 err = -ENOMEM;
1585 goto exit_free_skb;
1586 }
1587
1588 /* Segment the allocated memory */
1589 aead_req = (struct aead_request *)mem;
1590 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1591 dctx->sk = sk;
1592 sgin = &dctx->sg[0];
1593 sgout = &dctx->sg[n_sgin];
1594
1595 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1596 switch (prot->cipher_type) {
1597 case TLS_CIPHER_AES_CCM_128:
1598 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1599 iv_offset = 1;
1600 break;
1601 case TLS_CIPHER_SM4_CCM:
1602 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1603 iv_offset = 1;
1604 break;
1605 }
1606
1607 /* Prepare IV */
1608 if (prot->version == TLS_1_3_VERSION ||
1609 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1610 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1611 prot->iv_size + prot->salt_size);
1612 } else {
1613 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1614 &dctx->iv[iv_offset] + prot->salt_size,
1615 prot->iv_size);
1616 if (err < 0)
1617 goto exit_free;
1618 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1619 }
1620 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1621
1622 /* Prepare AAD */
1623 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1624 prot->tail_size,
1625 tls_ctx->rx.rec_seq, tlm->control, prot);
1626
1627 /* Prepare sgin */
1628 sg_init_table(sgin, n_sgin);
1629 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1630 err = skb_to_sgvec(skb, &sgin[1],
1631 rxm->offset + prot->prepend_size,
1632 rxm->full_len - prot->prepend_size);
1633 if (err < 0)
1634 goto exit_free;
1635
1636 if (clear_skb) {
1637 sg_init_table(sgout, n_sgout);
1638 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1639
1640 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1641 data_len + prot->tail_size);
1642 if (err < 0)
1643 goto exit_free;
1644 } else if (out_iov) {
1645 sg_init_table(sgout, n_sgout);
1646 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1647
1648 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1649 (n_sgout - 1 - tail_pages));
1650 if (err < 0)
1651 goto exit_free_pages;
1652
1653 if (prot->tail_size) {
1654 sg_unmark_end(&sgout[pages]);
1655 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1656 prot->tail_size);
1657 sg_mark_end(&sgout[pages + 1]);
1658 }
1659 } else if (out_sg) {
1660 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1661 }
1662 dctx->free_sgout = !!pages;
1663
1664 /* Prepare and submit AEAD request */
1665 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1666 data_len + prot->tail_size, aead_req, darg);
1667 if (err) {
1668 if (darg->async_done)
1669 goto exit_free_skb;
1670 goto exit_free_pages;
1671 }
1672
1673 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1674 clear_skb = NULL;
1675
1676 if (unlikely(darg->async)) {
1677 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1678 if (err) {
1679 err = tls_decrypt_async_wait(ctx);
1680 darg->async = false;
1681 }
1682 return err;
1683 }
1684
1685 if (unlikely(darg->async_done))
1686 return 0;
1687
1688 if (prot->tail_size)
1689 darg->tail = dctx->tail;
1690
1691 exit_free_pages:
1692 /* Release the pages in case iov was mapped to pages */
1693 for (; pages > 0; pages--)
1694 put_page(sg_page(&sgout[pages]));
1695 exit_free:
1696 kfree(mem);
1697 exit_free_skb:
1698 consume_skb(clear_skb);
1699 return err;
1700 }
1701
1702 static int
tls_decrypt_sw(struct sock * sk,struct tls_context * tls_ctx,struct msghdr * msg,struct tls_decrypt_arg * darg)1703 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1704 struct msghdr *msg, struct tls_decrypt_arg *darg)
1705 {
1706 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1707 struct tls_prot_info *prot = &tls_ctx->prot_info;
1708 struct strp_msg *rxm;
1709 int pad, err;
1710
1711 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1712 if (err < 0) {
1713 if (err == -EBADMSG)
1714 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1715 return err;
1716 }
1717 /* keep going even for ->async, the code below is TLS 1.3 */
1718
1719 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1720 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1721 darg->tail != TLS_RECORD_TYPE_DATA)) {
1722 darg->zc = false;
1723 if (!darg->tail)
1724 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1725 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1726 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1727 }
1728
1729 pad = tls_padding_length(prot, darg->skb, darg);
1730 if (pad < 0) {
1731 if (darg->skb != tls_strp_msg(ctx))
1732 consume_skb(darg->skb);
1733 return pad;
1734 }
1735
1736 rxm = strp_msg(darg->skb);
1737 rxm->full_len -= pad;
1738
1739 return 0;
1740 }
1741
1742 static int
tls_decrypt_device(struct sock * sk,struct msghdr * msg,struct tls_context * tls_ctx,struct tls_decrypt_arg * darg)1743 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1744 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1745 {
1746 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1747 struct tls_prot_info *prot = &tls_ctx->prot_info;
1748 struct strp_msg *rxm;
1749 int pad, err;
1750
1751 if (tls_ctx->rx_conf != TLS_HW)
1752 return 0;
1753
1754 err = tls_device_decrypted(sk, tls_ctx);
1755 if (err <= 0)
1756 return err;
1757
1758 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1759 if (pad < 0)
1760 return pad;
1761
1762 darg->async = false;
1763 darg->skb = tls_strp_msg(ctx);
1764 /* ->zc downgrade check, in case TLS 1.3 gets here */
1765 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1766 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1767
1768 rxm = strp_msg(darg->skb);
1769 rxm->full_len -= pad;
1770
1771 if (!darg->zc) {
1772 /* Non-ZC case needs a real skb */
1773 darg->skb = tls_strp_msg_detach(ctx);
1774 if (!darg->skb)
1775 return -ENOMEM;
1776 } else {
1777 unsigned int off, len;
1778
1779 /* In ZC case nobody cares about the output skb.
1780 * Just copy the data here. Note the skb is not fully trimmed.
1781 */
1782 off = rxm->offset + prot->prepend_size;
1783 len = rxm->full_len - prot->overhead_size;
1784
1785 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1786 if (err)
1787 return err;
1788 }
1789 return 1;
1790 }
1791
tls_check_pending_rekey(struct sock * sk,struct tls_context * ctx,struct sk_buff * skb)1792 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
1793 struct sk_buff *skb)
1794 {
1795 const struct strp_msg *rxm = strp_msg(skb);
1796 const struct tls_msg *tlm = tls_msg(skb);
1797 char hs_type;
1798 int err;
1799
1800 if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
1801 return 0;
1802
1803 if (rxm->full_len < 1)
1804 return 0;
1805
1806 err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
1807 if (err < 0) {
1808 DEBUG_NET_WARN_ON_ONCE(1);
1809 return err;
1810 }
1811
1812 if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
1813 struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
1814
1815 WRITE_ONCE(rx_ctx->key_update_pending, true);
1816 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
1817 }
1818
1819 return 0;
1820 }
1821
tls_rx_one_record(struct sock * sk,struct msghdr * msg,struct tls_decrypt_arg * darg)1822 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1823 struct tls_decrypt_arg *darg)
1824 {
1825 struct tls_context *tls_ctx = tls_get_ctx(sk);
1826 struct tls_prot_info *prot = &tls_ctx->prot_info;
1827 struct strp_msg *rxm;
1828 int err;
1829
1830 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1831 if (!err)
1832 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1833 if (err < 0)
1834 return err;
1835
1836 rxm = strp_msg(darg->skb);
1837 rxm->offset += prot->prepend_size;
1838 rxm->full_len -= prot->overhead_size;
1839 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1840
1841 return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
1842 }
1843
decrypt_skb(struct sock * sk,struct scatterlist * sgout)1844 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1845 {
1846 struct tls_decrypt_arg darg = { .zc = true, };
1847
1848 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1849 }
1850
1851 /* All records returned from a recvmsg() call must have the same type.
1852 * 0 is not a valid content type. Use it as "no type reported, yet".
1853 */
tls_record_content_type(struct msghdr * msg,struct tls_msg * tlm,u8 * control)1854 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1855 u8 *control)
1856 {
1857 int err;
1858
1859 if (!*control) {
1860 *control = tlm->control;
1861 if (!*control)
1862 return -EBADMSG;
1863
1864 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1865 sizeof(*control), control);
1866 if (*control != TLS_RECORD_TYPE_DATA) {
1867 if (err || msg->msg_flags & MSG_CTRUNC)
1868 return -EIO;
1869 }
1870 } else if (*control != tlm->control) {
1871 return 0;
1872 }
1873
1874 return 1;
1875 }
1876
tls_rx_rec_done(struct tls_sw_context_rx * ctx)1877 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1878 {
1879 tls_strp_msg_done(&ctx->strp);
1880 }
1881
1882 /* This function traverses the rx_list in tls receive context to copies the
1883 * decrypted records into the buffer provided by caller zero copy is not
1884 * true. Further, the records are removed from the rx_list if it is not a peek
1885 * case and the record has been consumed completely.
1886 */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,size_t skip,size_t len,bool is_peek,bool * more)1887 static int process_rx_list(struct tls_sw_context_rx *ctx,
1888 struct msghdr *msg,
1889 u8 *control,
1890 size_t skip,
1891 size_t len,
1892 bool is_peek,
1893 bool *more)
1894 {
1895 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1896 struct tls_msg *tlm;
1897 ssize_t copied = 0;
1898 int err;
1899
1900 while (skip && skb) {
1901 struct strp_msg *rxm = strp_msg(skb);
1902 tlm = tls_msg(skb);
1903
1904 err = tls_record_content_type(msg, tlm, control);
1905 if (err <= 0)
1906 goto more;
1907
1908 if (skip < rxm->full_len)
1909 break;
1910
1911 skip = skip - rxm->full_len;
1912 skb = skb_peek_next(skb, &ctx->rx_list);
1913 }
1914
1915 while (len && skb) {
1916 struct sk_buff *next_skb;
1917 struct strp_msg *rxm = strp_msg(skb);
1918 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1919
1920 tlm = tls_msg(skb);
1921
1922 err = tls_record_content_type(msg, tlm, control);
1923 if (err <= 0)
1924 goto more;
1925
1926 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1927 msg, chunk);
1928 if (err < 0)
1929 goto more;
1930
1931 len = len - chunk;
1932 copied = copied + chunk;
1933
1934 /* Consume the data from record if it is non-peek case*/
1935 if (!is_peek) {
1936 rxm->offset = rxm->offset + chunk;
1937 rxm->full_len = rxm->full_len - chunk;
1938
1939 /* Return if there is unconsumed data in the record */
1940 if (rxm->full_len - skip)
1941 break;
1942 }
1943
1944 /* The remaining skip-bytes must lie in 1st record in rx_list.
1945 * So from the 2nd record, 'skip' should be 0.
1946 */
1947 skip = 0;
1948
1949 if (msg)
1950 msg->msg_flags |= MSG_EOR;
1951
1952 next_skb = skb_peek_next(skb, &ctx->rx_list);
1953
1954 if (!is_peek) {
1955 __skb_unlink(skb, &ctx->rx_list);
1956 consume_skb(skb);
1957 }
1958
1959 skb = next_skb;
1960 }
1961 err = 0;
1962
1963 out:
1964 return copied ? : err;
1965 more:
1966 if (more)
1967 *more = true;
1968 goto out;
1969 }
1970
1971 static bool
tls_read_flush_backlog(struct sock * sk,struct tls_prot_info * prot,size_t len_left,size_t decrypted,ssize_t done,size_t * flushed_at)1972 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1973 size_t len_left, size_t decrypted, ssize_t done,
1974 size_t *flushed_at)
1975 {
1976 size_t max_rec;
1977
1978 if (len_left <= decrypted)
1979 return false;
1980
1981 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1982 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1983 return false;
1984
1985 *flushed_at = done;
1986 return sk_flush_backlog(sk);
1987 }
1988
tls_rx_reader_acquire(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1989 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1990 bool nonblock)
1991 {
1992 long timeo;
1993 int ret;
1994
1995 timeo = sock_rcvtimeo(sk, nonblock);
1996
1997 while (unlikely(ctx->reader_present)) {
1998 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1999
2000 ctx->reader_contended = 1;
2001
2002 add_wait_queue(&ctx->wq, &wait);
2003 ret = sk_wait_event(sk, &timeo,
2004 !READ_ONCE(ctx->reader_present), &wait);
2005 remove_wait_queue(&ctx->wq, &wait);
2006
2007 if (timeo <= 0)
2008 return -EAGAIN;
2009 if (signal_pending(current))
2010 return sock_intr_errno(timeo);
2011 if (ret < 0)
2012 return ret;
2013 }
2014
2015 WRITE_ONCE(ctx->reader_present, 1);
2016
2017 return 0;
2018 }
2019
tls_rx_reader_lock(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)2020 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
2021 bool nonblock)
2022 {
2023 int err;
2024
2025 lock_sock(sk);
2026 err = tls_rx_reader_acquire(sk, ctx, nonblock);
2027 if (err)
2028 release_sock(sk);
2029 return err;
2030 }
2031
tls_rx_reader_release(struct sock * sk,struct tls_sw_context_rx * ctx)2032 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
2033 {
2034 if (unlikely(ctx->reader_contended)) {
2035 if (wq_has_sleeper(&ctx->wq))
2036 wake_up(&ctx->wq);
2037 else
2038 ctx->reader_contended = 0;
2039
2040 WARN_ON_ONCE(!ctx->reader_present);
2041 }
2042
2043 WRITE_ONCE(ctx->reader_present, 0);
2044 }
2045
tls_rx_reader_unlock(struct sock * sk,struct tls_sw_context_rx * ctx)2046 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
2047 {
2048 tls_rx_reader_release(sk, ctx);
2049 release_sock(sk);
2050 }
2051
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2052 int tls_sw_recvmsg(struct sock *sk,
2053 struct msghdr *msg,
2054 size_t len,
2055 int flags)
2056 {
2057 struct tls_context *tls_ctx = tls_get_ctx(sk);
2058 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2059 struct tls_prot_info *prot = &tls_ctx->prot_info;
2060 ssize_t decrypted = 0, async_copy_bytes = 0;
2061 struct sk_psock *psock;
2062 unsigned char control = 0;
2063 size_t flushed_at = 0;
2064 struct strp_msg *rxm;
2065 struct tls_msg *tlm;
2066 ssize_t copied = 0;
2067 ssize_t peeked = 0;
2068 bool async = false;
2069 int target, err;
2070 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2071 bool is_peek = flags & MSG_PEEK;
2072 bool rx_more = false;
2073 bool released = true;
2074 bool bpf_strp_enabled;
2075 bool zc_capable;
2076
2077 if (unlikely(flags & MSG_ERRQUEUE))
2078 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2079
2080 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2081 if (err < 0)
2082 return err;
2083 psock = sk_psock_get(sk);
2084 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2085
2086 /* If crypto failed the connection is broken */
2087 err = ctx->async_wait.err;
2088 if (err)
2089 goto end;
2090
2091 /* Process pending decrypted records. It must be non-zero-copy */
2092 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2093 if (err < 0)
2094 goto end;
2095
2096 /* process_rx_list() will set @control if it processed any records */
2097 copied = err;
2098 if (len <= copied || rx_more ||
2099 (control && control != TLS_RECORD_TYPE_DATA))
2100 goto end;
2101
2102 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2103 len = len - copied;
2104
2105 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2106 ctx->zc_capable;
2107 decrypted = 0;
2108 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2109 struct tls_decrypt_arg darg;
2110 int to_decrypt, chunk;
2111
2112 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2113 released);
2114 if (err <= 0) {
2115 if (psock) {
2116 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2117 flags);
2118 if (chunk > 0) {
2119 decrypted += chunk;
2120 len -= chunk;
2121 continue;
2122 }
2123 }
2124 goto recv_end;
2125 }
2126
2127 memset(&darg.inargs, 0, sizeof(darg.inargs));
2128
2129 rxm = strp_msg(tls_strp_msg(ctx));
2130 tlm = tls_msg(tls_strp_msg(ctx));
2131
2132 to_decrypt = rxm->full_len - prot->overhead_size;
2133
2134 if (zc_capable && to_decrypt <= len &&
2135 tlm->control == TLS_RECORD_TYPE_DATA)
2136 darg.zc = true;
2137
2138 /* Do not use async mode if record is non-data */
2139 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2140 darg.async = ctx->async_capable;
2141 else
2142 darg.async = false;
2143
2144 err = tls_rx_one_record(sk, msg, &darg);
2145 if (err < 0) {
2146 tls_err_abort(sk, -EBADMSG);
2147 goto recv_end;
2148 }
2149
2150 async |= darg.async;
2151
2152 /* If the type of records being processed is not known yet,
2153 * set it to record type just dequeued. If it is already known,
2154 * but does not match the record type just dequeued, go to end.
2155 * We always get record type here since for tls1.2, record type
2156 * is known just after record is dequeued from stream parser.
2157 * For tls1.3, we disable async.
2158 */
2159 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2160 if (err <= 0) {
2161 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2162 tls_rx_rec_done(ctx);
2163 put_on_rx_list_err:
2164 __skb_queue_tail(&ctx->rx_list, darg.skb);
2165 goto recv_end;
2166 }
2167
2168 /* periodically flush backlog, and feed strparser */
2169 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2170 decrypted + copied,
2171 &flushed_at);
2172
2173 /* TLS 1.3 may have updated the length by more than overhead */
2174 rxm = strp_msg(darg.skb);
2175 chunk = rxm->full_len;
2176 tls_rx_rec_done(ctx);
2177
2178 if (!darg.zc) {
2179 bool partially_consumed = chunk > len;
2180 struct sk_buff *skb = darg.skb;
2181
2182 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2183
2184 if (async) {
2185 /* TLS 1.2-only, to_decrypt must be text len */
2186 chunk = min_t(int, to_decrypt, len);
2187 async_copy_bytes += chunk;
2188 put_on_rx_list:
2189 decrypted += chunk;
2190 len -= chunk;
2191 __skb_queue_tail(&ctx->rx_list, skb);
2192 if (unlikely(control != TLS_RECORD_TYPE_DATA))
2193 break;
2194 continue;
2195 }
2196
2197 if (bpf_strp_enabled) {
2198 released = true;
2199 err = sk_psock_tls_strp_read(psock, skb);
2200 if (err != __SK_PASS) {
2201 rxm->offset = rxm->offset + rxm->full_len;
2202 rxm->full_len = 0;
2203 if (err == __SK_DROP)
2204 consume_skb(skb);
2205 continue;
2206 }
2207 }
2208
2209 if (partially_consumed)
2210 chunk = len;
2211
2212 err = skb_copy_datagram_msg(skb, rxm->offset,
2213 msg, chunk);
2214 if (err < 0)
2215 goto put_on_rx_list_err;
2216
2217 if (is_peek) {
2218 peeked += chunk;
2219 goto put_on_rx_list;
2220 }
2221
2222 if (partially_consumed) {
2223 rxm->offset += chunk;
2224 rxm->full_len -= chunk;
2225 goto put_on_rx_list;
2226 }
2227
2228 consume_skb(skb);
2229 }
2230
2231 decrypted += chunk;
2232 len -= chunk;
2233
2234 /* Return full control message to userspace before trying
2235 * to parse another message type
2236 */
2237 msg->msg_flags |= MSG_EOR;
2238 if (control != TLS_RECORD_TYPE_DATA)
2239 break;
2240 }
2241
2242 recv_end:
2243 if (async) {
2244 int ret;
2245
2246 /* Wait for all previously submitted records to be decrypted */
2247 ret = tls_decrypt_async_wait(ctx);
2248
2249 if (ret) {
2250 if (err >= 0 || err == -EINPROGRESS)
2251 err = ret;
2252 goto end;
2253 }
2254
2255 /* Drain records from the rx_list & copy if required */
2256 if (is_peek)
2257 err = process_rx_list(ctx, msg, &control, copied + peeked,
2258 decrypted - peeked, is_peek, NULL);
2259 else
2260 err = process_rx_list(ctx, msg, &control, 0,
2261 async_copy_bytes, is_peek, NULL);
2262
2263 /* we could have copied less than we wanted, and possibly nothing */
2264 decrypted += max(err, 0) - async_copy_bytes;
2265 }
2266
2267 copied += decrypted;
2268
2269 end:
2270 tls_rx_reader_unlock(sk, ctx);
2271 if (psock)
2272 sk_psock_put(sk, psock);
2273 return copied ? : err;
2274 }
2275
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2276 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2277 struct pipe_inode_info *pipe,
2278 size_t len, unsigned int flags)
2279 {
2280 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2281 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2282 struct strp_msg *rxm = NULL;
2283 struct sock *sk = sock->sk;
2284 struct tls_msg *tlm;
2285 struct sk_buff *skb;
2286 ssize_t copied = 0;
2287 int chunk;
2288 int err;
2289
2290 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2291 if (err < 0)
2292 return err;
2293
2294 if (!skb_queue_empty(&ctx->rx_list)) {
2295 skb = __skb_dequeue(&ctx->rx_list);
2296 } else {
2297 struct tls_decrypt_arg darg;
2298
2299 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2300 true);
2301 if (err <= 0)
2302 goto splice_read_end;
2303
2304 memset(&darg.inargs, 0, sizeof(darg.inargs));
2305
2306 err = tls_rx_one_record(sk, NULL, &darg);
2307 if (err < 0) {
2308 tls_err_abort(sk, -EBADMSG);
2309 goto splice_read_end;
2310 }
2311
2312 tls_rx_rec_done(ctx);
2313 skb = darg.skb;
2314 }
2315
2316 rxm = strp_msg(skb);
2317 tlm = tls_msg(skb);
2318
2319 /* splice does not support reading control messages */
2320 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2321 err = -EINVAL;
2322 goto splice_requeue;
2323 }
2324
2325 chunk = min_t(unsigned int, rxm->full_len, len);
2326 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2327 if (copied < 0)
2328 goto splice_requeue;
2329
2330 if (copied < rxm->full_len) {
2331 rxm->offset += copied;
2332 rxm->full_len -= copied;
2333 goto splice_requeue;
2334 }
2335
2336 consume_skb(skb);
2337
2338 splice_read_end:
2339 tls_rx_reader_unlock(sk, ctx);
2340 return copied ? : err;
2341
2342 splice_requeue:
2343 __skb_queue_head(&ctx->rx_list, skb);
2344 goto splice_read_end;
2345 }
2346
tls_sw_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t read_actor)2347 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2348 sk_read_actor_t read_actor)
2349 {
2350 struct tls_context *tls_ctx = tls_get_ctx(sk);
2351 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2352 struct tls_prot_info *prot = &tls_ctx->prot_info;
2353 struct strp_msg *rxm = NULL;
2354 struct sk_buff *skb = NULL;
2355 struct sk_psock *psock;
2356 size_t flushed_at = 0;
2357 bool released = true;
2358 struct tls_msg *tlm;
2359 ssize_t copied = 0;
2360 ssize_t decrypted;
2361 int err, used;
2362
2363 psock = sk_psock_get(sk);
2364 if (psock) {
2365 sk_psock_put(sk, psock);
2366 return -EINVAL;
2367 }
2368 err = tls_rx_reader_acquire(sk, ctx, true);
2369 if (err < 0)
2370 return err;
2371
2372 /* If crypto failed the connection is broken */
2373 err = ctx->async_wait.err;
2374 if (err)
2375 goto read_sock_end;
2376
2377 decrypted = 0;
2378 do {
2379 if (!skb_queue_empty(&ctx->rx_list)) {
2380 skb = __skb_dequeue(&ctx->rx_list);
2381 rxm = strp_msg(skb);
2382 tlm = tls_msg(skb);
2383 } else {
2384 struct tls_decrypt_arg darg;
2385
2386 err = tls_rx_rec_wait(sk, NULL, true, released);
2387 if (err <= 0)
2388 goto read_sock_end;
2389
2390 memset(&darg.inargs, 0, sizeof(darg.inargs));
2391
2392 err = tls_rx_one_record(sk, NULL, &darg);
2393 if (err < 0) {
2394 tls_err_abort(sk, -EBADMSG);
2395 goto read_sock_end;
2396 }
2397
2398 released = tls_read_flush_backlog(sk, prot, INT_MAX,
2399 0, decrypted,
2400 &flushed_at);
2401 skb = darg.skb;
2402 rxm = strp_msg(skb);
2403 tlm = tls_msg(skb);
2404 decrypted += rxm->full_len;
2405
2406 tls_rx_rec_done(ctx);
2407 }
2408
2409 /* read_sock does not support reading control messages */
2410 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2411 err = -EINVAL;
2412 goto read_sock_requeue;
2413 }
2414
2415 used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2416 if (used <= 0) {
2417 if (!copied)
2418 err = used;
2419 goto read_sock_requeue;
2420 }
2421 copied += used;
2422 if (used < rxm->full_len) {
2423 rxm->offset += used;
2424 rxm->full_len -= used;
2425 if (!desc->count)
2426 goto read_sock_requeue;
2427 } else {
2428 consume_skb(skb);
2429 if (!desc->count)
2430 skb = NULL;
2431 }
2432 } while (skb);
2433
2434 read_sock_end:
2435 tls_rx_reader_release(sk, ctx);
2436 return copied ? : err;
2437
2438 read_sock_requeue:
2439 __skb_queue_head(&ctx->rx_list, skb);
2440 goto read_sock_end;
2441 }
2442
tls_sw_sock_is_readable(struct sock * sk)2443 bool tls_sw_sock_is_readable(struct sock *sk)
2444 {
2445 struct tls_context *tls_ctx = tls_get_ctx(sk);
2446 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2447 bool ingress_empty = true;
2448 struct sk_psock *psock;
2449
2450 rcu_read_lock();
2451 psock = sk_psock(sk);
2452 if (psock)
2453 ingress_empty = list_empty(&psock->ingress_msg);
2454 rcu_read_unlock();
2455
2456 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2457 !skb_queue_empty(&ctx->rx_list);
2458 }
2459
tls_rx_msg_size(struct tls_strparser * strp,struct sk_buff * skb)2460 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2461 {
2462 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2463 struct tls_prot_info *prot = &tls_ctx->prot_info;
2464 char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
2465 size_t cipher_overhead;
2466 size_t data_len = 0;
2467 int ret;
2468
2469 /* Verify that we have a full TLS header, or wait for more data */
2470 if (strp->stm.offset + prot->prepend_size > skb->len)
2471 return 0;
2472
2473 /* Sanity-check size of on-stack buffer. */
2474 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2475 ret = -EINVAL;
2476 goto read_failure;
2477 }
2478
2479 /* Linearize header to local buffer */
2480 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2481 if (ret < 0)
2482 goto read_failure;
2483
2484 strp->mark = header[0];
2485
2486 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2487
2488 cipher_overhead = prot->tag_size;
2489 if (prot->version != TLS_1_3_VERSION &&
2490 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2491 cipher_overhead += prot->iv_size;
2492
2493 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2494 prot->tail_size) {
2495 ret = -EMSGSIZE;
2496 goto read_failure;
2497 }
2498 if (data_len < cipher_overhead) {
2499 ret = -EBADMSG;
2500 goto read_failure;
2501 }
2502
2503 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2504 if (header[1] != TLS_1_2_VERSION_MINOR ||
2505 header[2] != TLS_1_2_VERSION_MAJOR) {
2506 ret = -EINVAL;
2507 goto read_failure;
2508 }
2509
2510 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2511 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2512 return data_len + TLS_HEADER_SIZE;
2513
2514 read_failure:
2515 tls_strp_abort_strp(strp, ret);
2516 return ret;
2517 }
2518
tls_rx_msg_ready(struct tls_strparser * strp)2519 void tls_rx_msg_ready(struct tls_strparser *strp)
2520 {
2521 struct tls_sw_context_rx *ctx;
2522
2523 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2524 ctx->saved_data_ready(strp->sk);
2525 }
2526
tls_data_ready(struct sock * sk)2527 static void tls_data_ready(struct sock *sk)
2528 {
2529 struct tls_context *tls_ctx = tls_get_ctx(sk);
2530 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2531 struct sk_psock *psock;
2532 gfp_t alloc_save;
2533
2534 trace_sk_data_ready(sk);
2535
2536 alloc_save = sk->sk_allocation;
2537 sk->sk_allocation = GFP_ATOMIC;
2538 tls_strp_data_ready(&ctx->strp);
2539 sk->sk_allocation = alloc_save;
2540
2541 psock = sk_psock_get(sk);
2542 if (psock) {
2543 if (!list_empty(&psock->ingress_msg))
2544 ctx->saved_data_ready(sk);
2545 sk_psock_put(sk, psock);
2546 }
2547 }
2548
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2549 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2550 {
2551 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2552
2553 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2554 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2555 disable_delayed_work_sync(&ctx->tx_work.work);
2556 }
2557
tls_sw_release_resources_tx(struct sock * sk)2558 void tls_sw_release_resources_tx(struct sock *sk)
2559 {
2560 struct tls_context *tls_ctx = tls_get_ctx(sk);
2561 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2562 struct tls_rec *rec, *tmp;
2563
2564 /* Wait for any pending async encryptions to complete */
2565 tls_encrypt_async_wait(ctx);
2566
2567 tls_tx_records(sk, -1);
2568
2569 /* Free up un-sent records in tx_list. First, free
2570 * the partially sent record if any at head of tx_list.
2571 */
2572 if (tls_ctx->partially_sent_record) {
2573 tls_free_partial_record(sk, tls_ctx);
2574 rec = list_first_entry(&ctx->tx_list,
2575 struct tls_rec, list);
2576 list_del(&rec->list);
2577 sk_msg_free(sk, &rec->msg_plaintext);
2578 kfree(rec);
2579 }
2580
2581 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2582 list_del(&rec->list);
2583 sk_msg_free(sk, &rec->msg_encrypted);
2584 sk_msg_free(sk, &rec->msg_plaintext);
2585 kfree(rec);
2586 }
2587
2588 crypto_free_aead(ctx->aead_send);
2589 tls_free_open_rec(sk);
2590 }
2591
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2592 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2593 {
2594 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2595
2596 kfree(ctx);
2597 }
2598
tls_sw_release_resources_rx(struct sock * sk)2599 void tls_sw_release_resources_rx(struct sock *sk)
2600 {
2601 struct tls_context *tls_ctx = tls_get_ctx(sk);
2602 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2603
2604 if (ctx->aead_recv) {
2605 __skb_queue_purge(&ctx->rx_list);
2606 crypto_free_aead(ctx->aead_recv);
2607 tls_strp_stop(&ctx->strp);
2608 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2609 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2610 * never swapped.
2611 */
2612 if (ctx->saved_data_ready) {
2613 write_lock_bh(&sk->sk_callback_lock);
2614 sk->sk_data_ready = ctx->saved_data_ready;
2615 write_unlock_bh(&sk->sk_callback_lock);
2616 }
2617 }
2618 }
2619
tls_sw_strparser_done(struct tls_context * tls_ctx)2620 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2621 {
2622 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2623
2624 tls_strp_done(&ctx->strp);
2625 }
2626
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2627 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2628 {
2629 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2630
2631 kfree(ctx);
2632 }
2633
tls_sw_free_resources_rx(struct sock * sk)2634 void tls_sw_free_resources_rx(struct sock *sk)
2635 {
2636 struct tls_context *tls_ctx = tls_get_ctx(sk);
2637 struct tls_sw_context_rx *ctx;
2638
2639 ctx = tls_sw_ctx_rx(tls_ctx);
2640
2641 tls_sw_release_resources_rx(sk);
2642 __tls_strp_done(&ctx->strp);
2643 tls_sw_free_ctx_rx(tls_ctx);
2644 }
2645
2646 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2647 static void tx_work_handler(struct work_struct *work)
2648 {
2649 struct delayed_work *delayed_work = to_delayed_work(work);
2650 struct tx_work *tx_work = container_of(delayed_work,
2651 struct tx_work, work);
2652 struct sock *sk = tx_work->sk;
2653 struct tls_context *tls_ctx = tls_get_ctx(sk);
2654 struct tls_sw_context_tx *ctx;
2655
2656 if (unlikely(!tls_ctx))
2657 return;
2658
2659 ctx = tls_sw_ctx_tx(tls_ctx);
2660 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2661 return;
2662
2663 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2664 return;
2665
2666 if (mutex_trylock(&tls_ctx->tx_lock)) {
2667 lock_sock(sk);
2668 tls_tx_records(sk, -1);
2669 release_sock(sk);
2670 mutex_unlock(&tls_ctx->tx_lock);
2671 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2672 /* Someone is holding the tx_lock, they will likely run Tx
2673 * and cancel the work on their way out of the lock section.
2674 * Schedule a long delay just in case.
2675 */
2676 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2677 }
2678 }
2679
tls_is_tx_ready(struct tls_sw_context_tx * ctx)2680 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2681 {
2682 struct tls_rec *rec;
2683
2684 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2685 if (!rec)
2686 return false;
2687
2688 return READ_ONCE(rec->tx_ready);
2689 }
2690
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2691 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2692 {
2693 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2694
2695 /* Schedule the transmission if tx list is ready */
2696 if (tls_is_tx_ready(tx_ctx) &&
2697 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2698 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2699 }
2700
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2701 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2702 {
2703 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2704
2705 write_lock_bh(&sk->sk_callback_lock);
2706 rx_ctx->saved_data_ready = sk->sk_data_ready;
2707 sk->sk_data_ready = tls_data_ready;
2708 write_unlock_bh(&sk->sk_callback_lock);
2709 }
2710
tls_update_rx_zc_capable(struct tls_context * tls_ctx)2711 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2712 {
2713 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2714
2715 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2716 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2717 }
2718
init_ctx_tx(struct tls_context * ctx,struct sock * sk)2719 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2720 {
2721 struct tls_sw_context_tx *sw_ctx_tx;
2722
2723 if (!ctx->priv_ctx_tx) {
2724 sw_ctx_tx = kzalloc_obj(*sw_ctx_tx);
2725 if (!sw_ctx_tx)
2726 return NULL;
2727 } else {
2728 sw_ctx_tx = ctx->priv_ctx_tx;
2729 }
2730
2731 crypto_init_wait(&sw_ctx_tx->async_wait);
2732 atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2733 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2734 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2735 sw_ctx_tx->tx_work.sk = sk;
2736
2737 return sw_ctx_tx;
2738 }
2739
init_ctx_rx(struct tls_context * ctx)2740 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2741 {
2742 struct tls_sw_context_rx *sw_ctx_rx;
2743
2744 if (!ctx->priv_ctx_rx) {
2745 sw_ctx_rx = kzalloc_obj(*sw_ctx_rx);
2746 if (!sw_ctx_rx)
2747 return NULL;
2748 } else {
2749 sw_ctx_rx = ctx->priv_ctx_rx;
2750 }
2751
2752 crypto_init_wait(&sw_ctx_rx->async_wait);
2753 atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2754 init_waitqueue_head(&sw_ctx_rx->wq);
2755 skb_queue_head_init(&sw_ctx_rx->rx_list);
2756 skb_queue_head_init(&sw_ctx_rx->async_hold);
2757
2758 return sw_ctx_rx;
2759 }
2760
init_prot_info(struct tls_prot_info * prot,const struct tls_crypto_info * crypto_info,const struct tls_cipher_desc * cipher_desc)2761 int init_prot_info(struct tls_prot_info *prot,
2762 const struct tls_crypto_info *crypto_info,
2763 const struct tls_cipher_desc *cipher_desc)
2764 {
2765 u16 nonce_size = cipher_desc->nonce;
2766
2767 if (crypto_info->version == TLS_1_3_VERSION) {
2768 nonce_size = 0;
2769 prot->aad_size = TLS_HEADER_SIZE;
2770 prot->tail_size = 1;
2771 } else {
2772 prot->aad_size = TLS_AAD_SPACE_SIZE;
2773 prot->tail_size = 0;
2774 }
2775
2776 /* Sanity-check the sizes for stack allocations. */
2777 if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
2778 return -EINVAL;
2779
2780 prot->version = crypto_info->version;
2781 prot->cipher_type = crypto_info->cipher_type;
2782 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2783 prot->tag_size = cipher_desc->tag;
2784 prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
2785 prot->iv_size = cipher_desc->iv;
2786 prot->salt_size = cipher_desc->salt;
2787 prot->rec_seq_size = cipher_desc->rec_seq;
2788
2789 return 0;
2790 }
2791
tls_finish_key_update(struct sock * sk,struct tls_context * tls_ctx)2792 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
2793 {
2794 struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
2795
2796 WRITE_ONCE(ctx->key_update_pending, false);
2797 /* wake-up pre-existing poll() */
2798 ctx->saved_data_ready(sk);
2799 }
2800
tls_set_sw_offload(struct sock * sk,int tx,struct tls_crypto_info * new_crypto_info)2801 int tls_set_sw_offload(struct sock *sk, int tx,
2802 struct tls_crypto_info *new_crypto_info)
2803 {
2804 struct tls_crypto_info *crypto_info, *src_crypto_info;
2805 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2806 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2807 const struct tls_cipher_desc *cipher_desc;
2808 char *iv, *rec_seq, *key, *salt;
2809 struct cipher_context *cctx;
2810 struct tls_prot_info *prot;
2811 struct crypto_aead **aead;
2812 struct tls_context *ctx;
2813 struct crypto_tfm *tfm;
2814 int rc = 0;
2815
2816 ctx = tls_get_ctx(sk);
2817 prot = &ctx->prot_info;
2818
2819 /* new_crypto_info != NULL means rekey */
2820 if (!new_crypto_info) {
2821 if (tx) {
2822 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2823 if (!ctx->priv_ctx_tx)
2824 return -ENOMEM;
2825 } else {
2826 ctx->priv_ctx_rx = init_ctx_rx(ctx);
2827 if (!ctx->priv_ctx_rx)
2828 return -ENOMEM;
2829 }
2830 }
2831
2832 if (tx) {
2833 sw_ctx_tx = ctx->priv_ctx_tx;
2834 crypto_info = &ctx->crypto_send.info;
2835 cctx = &ctx->tx;
2836 aead = &sw_ctx_tx->aead_send;
2837 } else {
2838 sw_ctx_rx = ctx->priv_ctx_rx;
2839 crypto_info = &ctx->crypto_recv.info;
2840 cctx = &ctx->rx;
2841 aead = &sw_ctx_rx->aead_recv;
2842 }
2843
2844 src_crypto_info = new_crypto_info ?: crypto_info;
2845
2846 cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
2847 if (!cipher_desc) {
2848 rc = -EINVAL;
2849 goto free_priv;
2850 }
2851
2852 rc = init_prot_info(prot, src_crypto_info, cipher_desc);
2853 if (rc)
2854 goto free_priv;
2855
2856 iv = crypto_info_iv(src_crypto_info, cipher_desc);
2857 key = crypto_info_key(src_crypto_info, cipher_desc);
2858 salt = crypto_info_salt(src_crypto_info, cipher_desc);
2859 rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
2860
2861 if (!*aead) {
2862 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
2863 if (IS_ERR(*aead)) {
2864 rc = PTR_ERR(*aead);
2865 *aead = NULL;
2866 goto free_priv;
2867 }
2868 }
2869
2870 ctx->push_pending_record = tls_sw_push_pending_record;
2871
2872 /* setkey is the last operation that could fail during a
2873 * rekey. if it succeeds, we can start modifying the
2874 * context.
2875 */
2876 rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
2877 if (rc) {
2878 if (new_crypto_info)
2879 goto out;
2880 else
2881 goto free_aead;
2882 }
2883
2884 if (!new_crypto_info) {
2885 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2886 if (rc)
2887 goto free_aead;
2888 }
2889
2890 if (!tx && !new_crypto_info) {
2891 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2892
2893 tls_update_rx_zc_capable(ctx);
2894 sw_ctx_rx->async_capable =
2895 src_crypto_info->version != TLS_1_3_VERSION &&
2896 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2897
2898 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2899 if (rc)
2900 goto free_aead;
2901 }
2902
2903 memcpy(cctx->iv, salt, cipher_desc->salt);
2904 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
2905 memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
2906
2907 if (new_crypto_info) {
2908 unsafe_memcpy(crypto_info, new_crypto_info,
2909 cipher_desc->crypto_info,
2910 /* size was checked in do_tls_setsockopt_conf */);
2911 memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
2912 if (!tx)
2913 tls_finish_key_update(sk, ctx);
2914 }
2915
2916 goto out;
2917
2918 free_aead:
2919 crypto_free_aead(*aead);
2920 *aead = NULL;
2921 free_priv:
2922 if (!new_crypto_info) {
2923 if (tx) {
2924 kfree(ctx->priv_ctx_tx);
2925 ctx->priv_ctx_tx = NULL;
2926 } else {
2927 kfree(ctx->priv_ctx_rx);
2928 ctx->priv_ctx_rx = NULL;
2929 }
2930 }
2931 out:
2932 return rc;
2933 }
2934