1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _TLS_OFFLOAD_H 35 #define _TLS_OFFLOAD_H 36 37 #include <linux/types.h> 38 #include <asm/byteorder.h> 39 #include <linux/crypto.h> 40 #include <linux/socket.h> 41 #include <linux/tcp.h> 42 #include <linux/skmsg.h> 43 #include <linux/mutex.h> 44 #include <linux/netdevice.h> 45 #include <linux/rcupdate.h> 46 47 #include <net/net_namespace.h> 48 #include <net/tcp.h> 49 #include <net/strparser.h> 50 #include <crypto/aead.h> 51 #include <uapi/linux/tls.h> 52 53 54 /* Maximum data size carried in a TLS record */ 55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) 56 57 #define TLS_HEADER_SIZE 5 58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE 59 60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) 61 62 #define TLS_RECORD_TYPE_DATA 0x17 63 64 #define TLS_AAD_SPACE_SIZE 13 65 66 #define MAX_IV_SIZE 16 67 #define TLS_MAX_REC_SEQ_SIZE 8 68 69 /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. 70 * 71 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] 72 * 73 * The field 'length' is encoded in field 'b0' as '(length width - 1)'. 74 * Hence b0 contains (3 - 1) = 2. 75 */ 76 #define TLS_AES_CCM_IV_B0_BYTE 2 77 #define TLS_SM4_CCM_IV_B0_BYTE 2 78 79 #define __TLS_INC_STATS(net, field) \ 80 __SNMP_INC_STATS((net)->mib.tls_statistics, field) 81 #define TLS_INC_STATS(net, field) \ 82 SNMP_INC_STATS((net)->mib.tls_statistics, field) 83 #define TLS_DEC_STATS(net, field) \ 84 SNMP_DEC_STATS((net)->mib.tls_statistics, field) 85 86 enum { 87 TLS_BASE, 88 TLS_SW, 89 TLS_HW, 90 TLS_HW_RECORD, 91 TLS_NUM_CONFIG, 92 }; 93 94 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 95 * allocated or mapped for each TLS record. After encryption, the records are 96 * stores in a linked list. 97 */ 98 struct tls_rec { 99 struct list_head list; 100 int tx_ready; 101 int tx_flags; 102 103 struct sk_msg msg_plaintext; 104 struct sk_msg msg_encrypted; 105 106 /* AAD | msg_plaintext.sg.data | sg_tag */ 107 struct scatterlist sg_aead_in[2]; 108 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 109 struct scatterlist sg_aead_out[2]; 110 111 char content_type; 112 struct scatterlist sg_content_type; 113 114 char aad_space[TLS_AAD_SPACE_SIZE]; 115 u8 iv_data[MAX_IV_SIZE]; 116 struct aead_request aead_req; 117 u8 aead_req_ctx[]; 118 }; 119 120 struct tls_msg { 121 struct strp_msg rxm; 122 u8 control; 123 }; 124 125 struct tx_work { 126 struct delayed_work work; 127 struct sock *sk; 128 }; 129 130 struct tls_sw_context_tx { 131 struct crypto_aead *aead_send; 132 struct crypto_wait async_wait; 133 struct tx_work tx_work; 134 struct tls_rec *open_rec; 135 struct list_head tx_list; 136 atomic_t encrypt_pending; 137 /* protect crypto_wait with encrypt_pending */ 138 spinlock_t encrypt_compl_lock; 139 int async_notify; 140 u8 async_capable:1; 141 142 #define BIT_TX_SCHEDULED 0 143 #define BIT_TX_CLOSING 1 144 unsigned long tx_bitmask; 145 }; 146 147 struct tls_sw_context_rx { 148 struct crypto_aead *aead_recv; 149 struct crypto_wait async_wait; 150 struct strparser strp; 151 struct sk_buff_head rx_list; /* list of decrypted 'data' records */ 152 void (*saved_data_ready)(struct sock *sk); 153 154 struct sk_buff *recv_pkt; 155 u8 control; 156 u8 async_capable:1; 157 u8 decrypted:1; 158 atomic_t decrypt_pending; 159 /* protect crypto_wait with decrypt_pending*/ 160 spinlock_t decrypt_compl_lock; 161 bool async_notify; 162 }; 163 164 struct tls_record_info { 165 struct list_head list; 166 u32 end_seq; 167 int len; 168 int num_frags; 169 skb_frag_t frags[MAX_SKB_FRAGS]; 170 }; 171 172 struct tls_offload_context_tx { 173 struct crypto_aead *aead_send; 174 spinlock_t lock; /* protects records list */ 175 struct list_head records_list; 176 struct tls_record_info *open_record; 177 struct tls_record_info *retransmit_hint; 178 u64 hint_record_sn; 179 u64 unacked_record_sn; 180 181 struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; 182 void (*sk_destruct)(struct sock *sk); 183 u8 driver_state[] __aligned(8); 184 /* The TLS layer reserves room for driver specific state 185 * Currently the belief is that there is not enough 186 * driver specific state to justify another layer of indirection 187 */ 188 #define TLS_DRIVER_STATE_SIZE_TX 16 189 }; 190 191 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ 192 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) 193 194 enum tls_context_flags { 195 /* tls_device_down was called after the netdev went down, device state 196 * was released, and kTLS works in software, even though rx_conf is 197 * still TLS_HW (needed for transition). 198 */ 199 TLS_RX_DEV_DEGRADED = 0, 200 /* Unlike RX where resync is driven entirely by the core in TX only 201 * the driver knows when things went out of sync, so we need the flag 202 * to be atomic. 203 */ 204 TLS_TX_SYNC_SCHED = 1, 205 /* tls_dev_del was called for the RX side, device state was released, 206 * but tls_ctx->netdev might still be kept, because TX-side driver 207 * resources might not be released yet. Used to prevent the second 208 * tls_dev_del call in tls_device_down if it happens simultaneously. 209 */ 210 TLS_RX_DEV_CLOSED = 2, 211 }; 212 213 struct cipher_context { 214 char *iv; 215 char *rec_seq; 216 }; 217 218 union tls_crypto_context { 219 struct tls_crypto_info info; 220 union { 221 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; 222 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; 223 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; 224 struct tls12_crypto_info_sm4_gcm sm4_gcm; 225 struct tls12_crypto_info_sm4_ccm sm4_ccm; 226 }; 227 }; 228 229 struct tls_prot_info { 230 u16 version; 231 u16 cipher_type; 232 u16 prepend_size; 233 u16 tag_size; 234 u16 overhead_size; 235 u16 iv_size; 236 u16 salt_size; 237 u16 rec_seq_size; 238 u16 aad_size; 239 u16 tail_size; 240 }; 241 242 struct tls_context { 243 /* read-only cache line */ 244 struct tls_prot_info prot_info; 245 246 u8 tx_conf:3; 247 u8 rx_conf:3; 248 249 int (*push_pending_record)(struct sock *sk, int flags); 250 void (*sk_write_space)(struct sock *sk); 251 252 void *priv_ctx_tx; 253 void *priv_ctx_rx; 254 255 struct net_device *netdev; 256 257 /* rw cache line */ 258 struct cipher_context tx; 259 struct cipher_context rx; 260 261 struct scatterlist *partially_sent_record; 262 u16 partially_sent_offset; 263 264 bool in_tcp_sendpages; 265 bool pending_open_record_frags; 266 267 struct mutex tx_lock; /* protects partially_sent_* fields and 268 * per-type TX fields 269 */ 270 unsigned long flags; 271 272 /* cache cold stuff */ 273 struct proto *sk_proto; 274 struct sock *sk; 275 276 void (*sk_destruct)(struct sock *sk); 277 278 union tls_crypto_context crypto_send; 279 union tls_crypto_context crypto_recv; 280 281 struct list_head list; 282 refcount_t refcount; 283 struct rcu_head rcu; 284 }; 285 286 enum tls_offload_ctx_dir { 287 TLS_OFFLOAD_CTX_DIR_RX, 288 TLS_OFFLOAD_CTX_DIR_TX, 289 }; 290 291 struct tlsdev_ops { 292 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, 293 enum tls_offload_ctx_dir direction, 294 struct tls_crypto_info *crypto_info, 295 u32 start_offload_tcp_sn); 296 void (*tls_dev_del)(struct net_device *netdev, 297 struct tls_context *ctx, 298 enum tls_offload_ctx_dir direction); 299 int (*tls_dev_resync)(struct net_device *netdev, 300 struct sock *sk, u32 seq, u8 *rcd_sn, 301 enum tls_offload_ctx_dir direction); 302 }; 303 304 enum tls_offload_sync_type { 305 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, 306 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, 307 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, 308 }; 309 310 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 311 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 312 313 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 314 struct tls_offload_resync_async { 315 atomic64_t req; 316 u16 loglen; 317 u16 rcd_delta; 318 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; 319 }; 320 321 struct tls_offload_context_rx { 322 /* sw must be the first member of tls_offload_context_rx */ 323 struct tls_sw_context_rx sw; 324 enum tls_offload_sync_type resync_type; 325 /* this member is set regardless of resync_type, to avoid branches */ 326 u8 resync_nh_reset:1; 327 /* CORE_NEXT_HINT-only member, but use the hole here */ 328 u8 resync_nh_do_now:1; 329 union { 330 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ 331 struct { 332 atomic64_t resync_req; 333 }; 334 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ 335 struct { 336 u32 decrypted_failed; 337 u32 decrypted_tgt; 338 } resync_nh; 339 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ 340 struct { 341 struct tls_offload_resync_async *resync_async; 342 }; 343 }; 344 u8 driver_state[] __aligned(8); 345 /* The TLS layer reserves room for driver specific state 346 * Currently the belief is that there is not enough 347 * driver specific state to justify another layer of indirection 348 */ 349 #define TLS_DRIVER_STATE_SIZE_RX 8 350 }; 351 352 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ 353 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) 354 355 struct tls_context *tls_ctx_create(struct sock *sk); 356 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 357 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 358 359 int wait_on_pending_writer(struct sock *sk, long *timeo); 360 int tls_sk_query(struct sock *sk, int optname, char __user *optval, 361 int __user *optlen); 362 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 363 unsigned int optlen); 364 void tls_err_abort(struct sock *sk, int err); 365 366 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 367 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 368 void tls_sw_strparser_done(struct tls_context *tls_ctx); 369 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 370 int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 371 int offset, size_t size, int flags); 372 int tls_sw_sendpage(struct sock *sk, struct page *page, 373 int offset, size_t size, int flags); 374 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 375 void tls_sw_release_resources_tx(struct sock *sk); 376 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 377 void tls_sw_free_resources_rx(struct sock *sk); 378 void tls_sw_release_resources_rx(struct sock *sk); 379 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 380 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 381 int nonblock, int flags, int *addr_len); 382 bool tls_sw_sock_is_readable(struct sock *sk); 383 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 384 struct pipe_inode_info *pipe, 385 size_t len, unsigned int flags); 386 387 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 388 int tls_device_sendpage(struct sock *sk, struct page *page, 389 int offset, size_t size, int flags); 390 int tls_tx_records(struct sock *sk, int flags); 391 392 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, 393 u32 seq, u64 *p_record_sn); 394 395 static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 396 { 397 return rec->len == 0; 398 } 399 400 static inline u32 tls_record_start_seq(struct tls_record_info *rec) 401 { 402 return rec->end_seq - rec->len; 403 } 404 405 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 406 struct scatterlist *sg, u16 first_offset, 407 int flags); 408 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 409 int flags); 410 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 411 412 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 413 { 414 return (struct tls_msg *)strp_msg(skb); 415 } 416 417 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 418 { 419 return !!ctx->partially_sent_record; 420 } 421 422 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 423 { 424 return tls_ctx->pending_open_record_frags; 425 } 426 427 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) 428 { 429 struct tls_rec *rec; 430 431 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); 432 if (!rec) 433 return false; 434 435 return READ_ONCE(rec->tx_ready); 436 } 437 438 static inline u16 tls_user_config(struct tls_context *ctx, bool tx) 439 { 440 u16 config = tx ? ctx->tx_conf : ctx->rx_conf; 441 442 switch (config) { 443 case TLS_BASE: 444 return TLS_CONF_BASE; 445 case TLS_SW: 446 return TLS_CONF_SW; 447 case TLS_HW: 448 return TLS_CONF_HW; 449 case TLS_HW_RECORD: 450 return TLS_CONF_HW_RECORD; 451 } 452 return 0; 453 } 454 455 struct sk_buff * 456 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, 457 struct sk_buff *skb); 458 struct sk_buff * 459 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, 460 struct sk_buff *skb); 461 462 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 463 { 464 #ifdef CONFIG_SOCK_VALIDATE_XMIT 465 return sk_fullsock(sk) && 466 (smp_load_acquire(&sk->sk_validate_xmit_skb) == 467 &tls_validate_xmit_skb); 468 #else 469 return false; 470 #endif 471 } 472 473 static inline bool tls_bigint_increment(unsigned char *seq, int len) 474 { 475 int i; 476 477 for (i = len - 1; i >= 0; i--) { 478 ++seq[i]; 479 if (seq[i] != 0) 480 break; 481 } 482 483 return (i == -1); 484 } 485 486 static inline void tls_bigint_subtract(unsigned char *seq, int n) 487 { 488 u64 rcd_sn; 489 __be64 *p; 490 491 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); 492 493 p = (__be64 *)seq; 494 rcd_sn = be64_to_cpu(*p); 495 *p = cpu_to_be64(rcd_sn - n); 496 } 497 498 static inline struct tls_context *tls_get_ctx(const struct sock *sk) 499 { 500 struct inet_connection_sock *icsk = inet_csk(sk); 501 502 /* Use RCU on icsk_ulp_data only for sock diag code, 503 * TLS data path doesn't need rcu_dereference(). 504 */ 505 return (__force void *)icsk->icsk_ulp_data; 506 } 507 508 static inline void tls_advance_record_sn(struct sock *sk, 509 struct tls_prot_info *prot, 510 struct cipher_context *ctx) 511 { 512 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 513 tls_err_abort(sk, -EBADMSG); 514 515 if (prot->version != TLS_1_3_VERSION && 516 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 517 tls_bigint_increment(ctx->iv + prot->salt_size, 518 prot->iv_size); 519 } 520 521 static inline void tls_fill_prepend(struct tls_context *ctx, 522 char *buf, 523 size_t plaintext_len, 524 unsigned char record_type) 525 { 526 struct tls_prot_info *prot = &ctx->prot_info; 527 size_t pkt_len, iv_size = prot->iv_size; 528 529 pkt_len = plaintext_len + prot->tag_size; 530 if (prot->version != TLS_1_3_VERSION && 531 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) { 532 pkt_len += iv_size; 533 534 memcpy(buf + TLS_NONCE_OFFSET, 535 ctx->tx.iv + prot->salt_size, iv_size); 536 } 537 538 /* we cover nonce explicit here as well, so buf should be of 539 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 540 */ 541 buf[0] = prot->version == TLS_1_3_VERSION ? 542 TLS_RECORD_TYPE_DATA : record_type; 543 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 544 buf[1] = TLS_1_2_VERSION_MINOR; 545 buf[2] = TLS_1_2_VERSION_MAJOR; 546 /* we can use IV for nonce explicit according to spec */ 547 buf[3] = pkt_len >> 8; 548 buf[4] = pkt_len & 0xFF; 549 } 550 551 static inline void tls_make_aad(char *buf, 552 size_t size, 553 char *record_sequence, 554 unsigned char record_type, 555 struct tls_prot_info *prot) 556 { 557 if (prot->version != TLS_1_3_VERSION) { 558 memcpy(buf, record_sequence, prot->rec_seq_size); 559 buf += 8; 560 } else { 561 size += prot->tag_size; 562 } 563 564 buf[0] = prot->version == TLS_1_3_VERSION ? 565 TLS_RECORD_TYPE_DATA : record_type; 566 buf[1] = TLS_1_2_VERSION_MAJOR; 567 buf[2] = TLS_1_2_VERSION_MINOR; 568 buf[3] = size >> 8; 569 buf[4] = size & 0xFF; 570 } 571 572 static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) 573 { 574 int i; 575 576 if (prot->version == TLS_1_3_VERSION || 577 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 578 for (i = 0; i < 8; i++) 579 iv[i + 4] ^= seq[i]; 580 } 581 } 582 583 584 static inline struct tls_sw_context_rx *tls_sw_ctx_rx( 585 const struct tls_context *tls_ctx) 586 { 587 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx; 588 } 589 590 static inline struct tls_sw_context_tx *tls_sw_ctx_tx( 591 const struct tls_context *tls_ctx) 592 { 593 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 594 } 595 596 static inline struct tls_offload_context_tx * 597 tls_offload_ctx_tx(const struct tls_context *tls_ctx) 598 { 599 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; 600 } 601 602 static inline bool tls_sw_has_ctx_tx(const struct sock *sk) 603 { 604 struct tls_context *ctx = tls_get_ctx(sk); 605 606 if (!ctx) 607 return false; 608 return !!tls_sw_ctx_tx(ctx); 609 } 610 611 static inline bool tls_sw_has_ctx_rx(const struct sock *sk) 612 { 613 struct tls_context *ctx = tls_get_ctx(sk); 614 615 if (!ctx) 616 return false; 617 return !!tls_sw_ctx_rx(ctx); 618 } 619 620 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 621 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 622 623 static inline struct tls_offload_context_rx * 624 tls_offload_ctx_rx(const struct tls_context *tls_ctx) 625 { 626 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; 627 } 628 629 #if IS_ENABLED(CONFIG_TLS_DEVICE) 630 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, 631 enum tls_offload_ctx_dir direction) 632 { 633 if (direction == TLS_OFFLOAD_CTX_DIR_TX) 634 return tls_offload_ctx_tx(tls_ctx)->driver_state; 635 else 636 return tls_offload_ctx_rx(tls_ctx)->driver_state; 637 } 638 639 static inline void * 640 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) 641 { 642 return __tls_driver_ctx(tls_get_ctx(sk), direction); 643 } 644 #endif 645 646 #define RESYNC_REQ BIT(0) 647 #define RESYNC_REQ_ASYNC BIT(1) 648 /* The TLS context is valid until sk_destruct is called */ 649 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 650 { 651 struct tls_context *tls_ctx = tls_get_ctx(sk); 652 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 653 654 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 655 } 656 657 /* Log all TLS record header TCP sequences in [seq, seq+len] */ 658 static inline void 659 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) 660 { 661 struct tls_context *tls_ctx = tls_get_ctx(sk); 662 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 663 664 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | 665 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); 666 rx_ctx->resync_async->loglen = 0; 667 rx_ctx->resync_async->rcd_delta = 0; 668 } 669 670 static inline void 671 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) 672 { 673 struct tls_context *tls_ctx = tls_get_ctx(sk); 674 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 675 676 atomic64_set(&rx_ctx->resync_async->req, 677 ((u64)ntohl(seq) << 32) | RESYNC_REQ); 678 } 679 680 static inline void 681 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) 682 { 683 struct tls_context *tls_ctx = tls_get_ctx(sk); 684 685 tls_offload_ctx_rx(tls_ctx)->resync_type = type; 686 } 687 688 /* Driver's seq tracking has to be disabled until resync succeeded */ 689 static inline bool tls_offload_tx_resync_pending(struct sock *sk) 690 { 691 struct tls_context *tls_ctx = tls_get_ctx(sk); 692 bool ret; 693 694 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); 695 smp_mb__after_atomic(); 696 return ret; 697 } 698 699 int __net_init tls_proc_init(struct net *net); 700 void __net_exit tls_proc_fini(struct net *net); 701 702 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 703 unsigned char *record_type); 704 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 705 struct scatterlist *sgout); 706 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); 707 708 int tls_sw_fallback_init(struct sock *sk, 709 struct tls_offload_context_tx *offload_ctx, 710 struct tls_crypto_info *crypto_info); 711 712 #ifdef CONFIG_TLS_DEVICE 713 void tls_device_init(void); 714 void tls_device_cleanup(void); 715 void tls_device_sk_destruct(struct sock *sk); 716 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 717 void tls_device_free_resources_tx(struct sock *sk); 718 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 719 void tls_device_offload_cleanup_rx(struct sock *sk); 720 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 721 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq); 722 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 723 struct sk_buff *skb, struct strp_msg *rxm); 724 725 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) 726 { 727 if (!sk_fullsock(sk) || 728 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct) 729 return false; 730 return tls_get_ctx(sk)->rx_conf == TLS_HW; 731 } 732 #else 733 static inline void tls_device_init(void) {} 734 static inline void tls_device_cleanup(void) {} 735 736 static inline int 737 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 738 { 739 return -EOPNOTSUPP; 740 } 741 742 static inline void tls_device_free_resources_tx(struct sock *sk) {} 743 744 static inline int 745 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 746 { 747 return -EOPNOTSUPP; 748 } 749 750 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 751 static inline void 752 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 753 754 static inline int 755 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, 756 struct sk_buff *skb, struct strp_msg *rxm) 757 { 758 return 0; 759 } 760 #endif 761 #endif /* _TLS_OFFLOAD_H */ 762