1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ 3 4 #include <linux/skbuff.h> 5 #include <linux/skbuff_ref.h> 6 #include <linux/workqueue.h> 7 #include <net/strparser.h> 8 #include <net/tcp.h> 9 #include <net/sock.h> 10 #include <net/tls.h> 11 12 #include "tls.h" 13 14 static struct workqueue_struct *tls_strp_wq; 15 16 static void tls_strp_abort_strp(struct tls_strparser *strp, int err) 17 { 18 if (strp->stopped) 19 return; 20 21 strp->stopped = 1; 22 23 /* Report an error on the lower socket */ 24 WRITE_ONCE(strp->sk->sk_err, -err); 25 /* Paired with smp_rmb() in tcp_poll() */ 26 smp_wmb(); 27 sk_error_report(strp->sk); 28 } 29 30 static void tls_strp_anchor_free(struct tls_strparser *strp) 31 { 32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 33 34 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 35 if (!strp->copy_mode) 36 shinfo->frag_list = NULL; 37 consume_skb(strp->anchor); 38 strp->anchor = NULL; 39 } 40 41 static struct sk_buff * 42 tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb, 43 int offset, int len) 44 { 45 struct sk_buff *skb; 46 int i, err; 47 48 skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER, 49 &err, strp->sk->sk_allocation); 50 if (!skb) 51 return NULL; 52 53 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 55 56 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 57 skb_frag_address(frag), 58 skb_frag_size(frag))); 59 offset += skb_frag_size(frag); 60 } 61 62 skb->len = len; 63 skb->data_len = len; 64 skb_copy_header(skb, in_skb); 65 return skb; 66 } 67 68 /* Create a new skb with the contents of input copied to its page frags */ 69 static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) 70 { 71 struct strp_msg *rxm; 72 struct sk_buff *skb; 73 74 skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset, 75 strp->stm.full_len); 76 if (!skb) 77 return NULL; 78 79 rxm = strp_msg(skb); 80 rxm->offset = 0; 81 return skb; 82 } 83 84 /* Steal the input skb, input msg is invalid after calling this function */ 85 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx) 86 { 87 struct tls_strparser *strp = &ctx->strp; 88 89 #ifdef CONFIG_TLS_DEVICE 90 DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted); 91 #else 92 /* This function turns an input into an output, 93 * that can only happen if we have offload. 94 */ 95 WARN_ON(1); 96 #endif 97 98 if (strp->copy_mode) { 99 struct sk_buff *skb; 100 101 /* Replace anchor with an empty skb, this is a little 102 * dangerous but __tls_cur_msg() warns on empty skbs 103 * so hopefully we'll catch abuses. 104 */ 105 skb = alloc_skb(0, strp->sk->sk_allocation); 106 if (!skb) 107 return NULL; 108 109 swap(strp->anchor, skb); 110 return skb; 111 } 112 113 return tls_strp_msg_make_copy(strp); 114 } 115 116 /* Force the input skb to be in copy mode. The data ownership remains 117 * with the input skb itself (meaning unpause will wipe it) but it can 118 * be modified. 119 */ 120 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx) 121 { 122 struct tls_strparser *strp = &ctx->strp; 123 struct sk_buff *skb; 124 125 if (strp->copy_mode) 126 return 0; 127 128 skb = tls_strp_msg_make_copy(strp); 129 if (!skb) 130 return -ENOMEM; 131 132 tls_strp_anchor_free(strp); 133 strp->anchor = skb; 134 135 tcp_read_done(strp->sk, strp->stm.full_len); 136 strp->copy_mode = 1; 137 138 return 0; 139 } 140 141 /* Make a clone (in the skb sense) of the input msg to keep a reference 142 * to the underlying data. The reference-holding skbs get placed on 143 * @dst. 144 */ 145 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst) 146 { 147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 148 149 if (strp->copy_mode) { 150 struct sk_buff *skb; 151 152 WARN_ON_ONCE(!shinfo->nr_frags); 153 154 /* We can't skb_clone() the anchor, it gets wiped by unpause */ 155 skb = alloc_skb(0, strp->sk->sk_allocation); 156 if (!skb) 157 return -ENOMEM; 158 159 __skb_queue_tail(dst, strp->anchor); 160 strp->anchor = skb; 161 } else { 162 struct sk_buff *iter, *clone; 163 int chunk, len, offset; 164 165 offset = strp->stm.offset; 166 len = strp->stm.full_len; 167 iter = shinfo->frag_list; 168 169 while (len > 0) { 170 if (iter->len <= offset) { 171 offset -= iter->len; 172 goto next; 173 } 174 175 chunk = iter->len - offset; 176 offset = 0; 177 178 clone = skb_clone(iter, strp->sk->sk_allocation); 179 if (!clone) 180 return -ENOMEM; 181 __skb_queue_tail(dst, clone); 182 183 len -= chunk; 184 next: 185 iter = iter->next; 186 } 187 } 188 189 return 0; 190 } 191 192 static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) 193 { 194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 195 int i; 196 197 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 198 199 for (i = 0; i < shinfo->nr_frags; i++) 200 __skb_frag_unref(&shinfo->frags[i], false); 201 shinfo->nr_frags = 0; 202 if (strp->copy_mode) { 203 kfree_skb_list(shinfo->frag_list); 204 shinfo->frag_list = NULL; 205 } 206 strp->copy_mode = 0; 207 strp->mixed_decrypted = 0; 208 } 209 210 static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, 211 struct sk_buff *in_skb, unsigned int offset, 212 size_t in_len) 213 { 214 size_t len, chunk; 215 skb_frag_t *frag; 216 int sz; 217 218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; 219 220 len = in_len; 221 /* First make sure we got the header */ 222 if (!strp->stm.full_len) { 223 /* Assume one page is more than enough for headers */ 224 chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); 225 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 226 skb_frag_address(frag) + 227 skb_frag_size(frag), 228 chunk)); 229 230 skb->len += chunk; 231 skb->data_len += chunk; 232 skb_frag_size_add(frag, chunk); 233 234 sz = tls_rx_msg_size(strp, skb); 235 if (sz < 0) 236 return sz; 237 238 /* We may have over-read, sz == 0 is guaranteed under-read */ 239 if (unlikely(sz && sz < skb->len)) { 240 int over = skb->len - sz; 241 242 WARN_ON_ONCE(over > chunk); 243 skb->len -= over; 244 skb->data_len -= over; 245 skb_frag_size_add(frag, -over); 246 247 chunk -= over; 248 } 249 250 frag++; 251 len -= chunk; 252 offset += chunk; 253 254 strp->stm.full_len = sz; 255 if (!strp->stm.full_len) 256 goto read_done; 257 } 258 259 /* Load up more data */ 260 while (len && strp->stm.full_len > skb->len) { 261 chunk = min_t(size_t, len, strp->stm.full_len - skb->len); 262 chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag)); 263 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 264 skb_frag_address(frag) + 265 skb_frag_size(frag), 266 chunk)); 267 268 skb->len += chunk; 269 skb->data_len += chunk; 270 skb_frag_size_add(frag, chunk); 271 frag++; 272 len -= chunk; 273 offset += chunk; 274 } 275 276 read_done: 277 return in_len - len; 278 } 279 280 static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb, 281 struct sk_buff *in_skb, unsigned int offset, 282 size_t in_len) 283 { 284 struct sk_buff *nskb, *first, *last; 285 struct skb_shared_info *shinfo; 286 size_t chunk; 287 int sz; 288 289 if (strp->stm.full_len) 290 chunk = strp->stm.full_len - skb->len; 291 else 292 chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; 293 chunk = min(chunk, in_len); 294 295 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); 296 if (!nskb) 297 return -ENOMEM; 298 299 shinfo = skb_shinfo(skb); 300 if (!shinfo->frag_list) { 301 shinfo->frag_list = nskb; 302 nskb->prev = nskb; 303 } else { 304 first = shinfo->frag_list; 305 last = first->prev; 306 last->next = nskb; 307 first->prev = nskb; 308 } 309 310 skb->len += chunk; 311 skb->data_len += chunk; 312 313 if (!strp->stm.full_len) { 314 sz = tls_rx_msg_size(strp, skb); 315 if (sz < 0) 316 return sz; 317 318 /* We may have over-read, sz == 0 is guaranteed under-read */ 319 if (unlikely(sz && sz < skb->len)) { 320 int over = skb->len - sz; 321 322 WARN_ON_ONCE(over > chunk); 323 skb->len -= over; 324 skb->data_len -= over; 325 __pskb_trim(nskb, nskb->len - over); 326 327 chunk -= over; 328 } 329 330 strp->stm.full_len = sz; 331 } 332 333 return chunk; 334 } 335 336 static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, 337 unsigned int offset, size_t in_len) 338 { 339 struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; 340 struct sk_buff *skb; 341 int ret; 342 343 if (strp->msg_ready) 344 return 0; 345 346 skb = strp->anchor; 347 if (!skb->len) 348 skb_copy_decrypted(skb, in_skb); 349 else 350 strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb); 351 352 if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted) 353 ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len); 354 else 355 ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len); 356 if (ret < 0) { 357 desc->error = ret; 358 ret = 0; 359 } 360 361 if (strp->stm.full_len && strp->stm.full_len == skb->len) { 362 desc->count = 0; 363 364 WRITE_ONCE(strp->msg_ready, 1); 365 tls_rx_msg_ready(strp); 366 } 367 368 return ret; 369 } 370 371 static int tls_strp_read_copyin(struct tls_strparser *strp) 372 { 373 read_descriptor_t desc; 374 375 desc.arg.data = strp; 376 desc.error = 0; 377 desc.count = 1; /* give more than one skb per call */ 378 379 /* sk should be locked here, so okay to do read_sock */ 380 tcp_read_sock(strp->sk, &desc, tls_strp_copyin); 381 382 return desc.error; 383 } 384 385 static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) 386 { 387 struct skb_shared_info *shinfo; 388 struct page *page; 389 int need_spc, len; 390 391 /* If the rbuf is small or rcv window has collapsed to 0 we need 392 * to read the data out. Otherwise the connection will stall. 393 * Without pressure threshold of INT_MAX will never be ready. 394 */ 395 if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) 396 return 0; 397 398 shinfo = skb_shinfo(strp->anchor); 399 shinfo->frag_list = NULL; 400 401 /* If we don't know the length go max plus page for cipher overhead */ 402 need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; 403 404 for (len = need_spc; len > 0; len -= PAGE_SIZE) { 405 page = alloc_page(strp->sk->sk_allocation); 406 if (!page) { 407 tls_strp_flush_anchor_copy(strp); 408 return -ENOMEM; 409 } 410 411 skb_fill_page_desc(strp->anchor, shinfo->nr_frags++, 412 page, 0, 0); 413 } 414 415 strp->copy_mode = 1; 416 strp->stm.offset = 0; 417 418 strp->anchor->len = 0; 419 strp->anchor->data_len = 0; 420 strp->anchor->truesize = round_up(need_spc, PAGE_SIZE); 421 422 tls_strp_read_copyin(strp); 423 424 return 0; 425 } 426 427 static bool tls_strp_check_queue_ok(struct tls_strparser *strp) 428 { 429 unsigned int len = strp->stm.offset + strp->stm.full_len; 430 struct sk_buff *first, *skb; 431 u32 seq; 432 433 first = skb_shinfo(strp->anchor)->frag_list; 434 skb = first; 435 seq = TCP_SKB_CB(first)->seq; 436 437 /* Make sure there's no duplicate data in the queue, 438 * and the decrypted status matches. 439 */ 440 while (skb->len < len) { 441 seq += skb->len; 442 len -= skb->len; 443 skb = skb->next; 444 445 if (TCP_SKB_CB(skb)->seq != seq) 446 return false; 447 if (skb_cmp_decrypted(first, skb)) 448 return false; 449 } 450 451 return true; 452 } 453 454 static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) 455 { 456 struct tcp_sock *tp = tcp_sk(strp->sk); 457 struct sk_buff *first; 458 u32 offset; 459 460 first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset); 461 if (WARN_ON_ONCE(!first)) 462 return; 463 464 /* Bestow the state onto the anchor */ 465 strp->anchor->len = offset + len; 466 strp->anchor->data_len = offset + len; 467 strp->anchor->truesize = offset + len; 468 469 skb_shinfo(strp->anchor)->frag_list = first; 470 471 skb_copy_header(strp->anchor, first); 472 strp->anchor->destructor = NULL; 473 474 strp->stm.offset = offset; 475 } 476 477 void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) 478 { 479 struct strp_msg *rxm; 480 struct tls_msg *tlm; 481 482 DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready); 483 DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); 484 485 if (!strp->copy_mode && force_refresh) { 486 if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) 487 return; 488 489 tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); 490 } 491 492 rxm = strp_msg(strp->anchor); 493 rxm->full_len = strp->stm.full_len; 494 rxm->offset = strp->stm.offset; 495 tlm = tls_msg(strp->anchor); 496 tlm->control = strp->mark; 497 } 498 499 /* Called with lock held on lower socket */ 500 static int tls_strp_read_sock(struct tls_strparser *strp) 501 { 502 int sz, inq; 503 504 inq = tcp_inq(strp->sk); 505 if (inq < 1) 506 return 0; 507 508 if (unlikely(strp->copy_mode)) 509 return tls_strp_read_copyin(strp); 510 511 if (inq < strp->stm.full_len) 512 return tls_strp_read_copy(strp, true); 513 514 if (!strp->stm.full_len) { 515 tls_strp_load_anchor_with_queue(strp, inq); 516 517 sz = tls_rx_msg_size(strp, strp->anchor); 518 if (sz < 0) { 519 tls_strp_abort_strp(strp, sz); 520 return sz; 521 } 522 523 strp->stm.full_len = sz; 524 525 if (!strp->stm.full_len || inq < strp->stm.full_len) 526 return tls_strp_read_copy(strp, true); 527 } 528 529 if (!tls_strp_check_queue_ok(strp)) 530 return tls_strp_read_copy(strp, false); 531 532 WRITE_ONCE(strp->msg_ready, 1); 533 tls_rx_msg_ready(strp); 534 535 return 0; 536 } 537 538 void tls_strp_check_rcv(struct tls_strparser *strp) 539 { 540 if (unlikely(strp->stopped) || strp->msg_ready) 541 return; 542 543 if (tls_strp_read_sock(strp) == -ENOMEM) 544 queue_work(tls_strp_wq, &strp->work); 545 } 546 547 /* Lower sock lock held */ 548 void tls_strp_data_ready(struct tls_strparser *strp) 549 { 550 /* This check is needed to synchronize with do_tls_strp_work. 551 * do_tls_strp_work acquires a process lock (lock_sock) whereas 552 * the lock held here is bh_lock_sock. The two locks can be 553 * held by different threads at the same time, but bh_lock_sock 554 * allows a thread in BH context to safely check if the process 555 * lock is held. In this case, if the lock is held, queue work. 556 */ 557 if (sock_owned_by_user_nocheck(strp->sk)) { 558 queue_work(tls_strp_wq, &strp->work); 559 return; 560 } 561 562 tls_strp_check_rcv(strp); 563 } 564 565 static void tls_strp_work(struct work_struct *w) 566 { 567 struct tls_strparser *strp = 568 container_of(w, struct tls_strparser, work); 569 570 lock_sock(strp->sk); 571 tls_strp_check_rcv(strp); 572 release_sock(strp->sk); 573 } 574 575 void tls_strp_msg_done(struct tls_strparser *strp) 576 { 577 WARN_ON(!strp->stm.full_len); 578 579 if (likely(!strp->copy_mode)) 580 tcp_read_done(strp->sk, strp->stm.full_len); 581 else 582 tls_strp_flush_anchor_copy(strp); 583 584 WRITE_ONCE(strp->msg_ready, 0); 585 memset(&strp->stm, 0, sizeof(strp->stm)); 586 587 tls_strp_check_rcv(strp); 588 } 589 590 void tls_strp_stop(struct tls_strparser *strp) 591 { 592 strp->stopped = 1; 593 } 594 595 int tls_strp_init(struct tls_strparser *strp, struct sock *sk) 596 { 597 memset(strp, 0, sizeof(*strp)); 598 599 strp->sk = sk; 600 601 strp->anchor = alloc_skb(0, GFP_KERNEL); 602 if (!strp->anchor) 603 return -ENOMEM; 604 605 INIT_WORK(&strp->work, tls_strp_work); 606 607 return 0; 608 } 609 610 /* strp must already be stopped so that tls_strp_recv will no longer be called. 611 * Note that tls_strp_done is not called with the lower socket held. 612 */ 613 void tls_strp_done(struct tls_strparser *strp) 614 { 615 WARN_ON(!strp->stopped); 616 617 cancel_work_sync(&strp->work); 618 tls_strp_anchor_free(strp); 619 } 620 621 int __init tls_strp_dev_init(void) 622 { 623 tls_strp_wq = create_workqueue("tls-strp"); 624 if (unlikely(!tls_strp_wq)) 625 return -ENOMEM; 626 627 return 0; 628 } 629 630 void tls_strp_dev_exit(void) 631 { 632 destroy_workqueue(tls_strp_wq); 633 } 634