1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/module.h>
35
36 #include <net/tcp.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
42 #include <linux/inet_diag.h>
43
44 #include <net/snmp.h>
45 #include <net/tls.h>
46 #include <net/tls_toe.h>
47
48 #include "tls.h"
49
50 MODULE_AUTHOR("Mellanox Technologies");
51 MODULE_DESCRIPTION("Transport Layer Security Support");
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_ALIAS_TCP_ULP("tls");
54
55 enum {
56 TLSV4,
57 TLSV6,
58 TLS_NUM_PROTS,
59 };
60
61 #define CHECK_CIPHER_DESC(cipher,ci) \
62 static_assert(cipher ## _IV_SIZE <= TLS_MAX_IV_SIZE); \
63 static_assert(cipher ## _SALT_SIZE <= TLS_MAX_SALT_SIZE); \
64 static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \
65 static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \
66 static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \
67 static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \
68 static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \
69 static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE);
70
71 #define __CIPHER_DESC(ci) \
72 .iv_offset = offsetof(struct ci, iv), \
73 .key_offset = offsetof(struct ci, key), \
74 .salt_offset = offsetof(struct ci, salt), \
75 .rec_seq_offset = offsetof(struct ci, rec_seq), \
76 .crypto_info = sizeof(struct ci)
77
78 #define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
79 .nonce = cipher ## _IV_SIZE, \
80 .iv = cipher ## _IV_SIZE, \
81 .key = cipher ## _KEY_SIZE, \
82 .salt = cipher ## _SALT_SIZE, \
83 .tag = cipher ## _TAG_SIZE, \
84 .rec_seq = cipher ## _REC_SEQ_SIZE, \
85 .cipher_name = algname, \
86 .offloadable = _offloadable, \
87 __CIPHER_DESC(ci), \
88 }
89
90 #define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
91 .nonce = 0, \
92 .iv = cipher ## _IV_SIZE, \
93 .key = cipher ## _KEY_SIZE, \
94 .salt = cipher ## _SALT_SIZE, \
95 .tag = cipher ## _TAG_SIZE, \
96 .rec_seq = cipher ## _REC_SEQ_SIZE, \
97 .cipher_name = algname, \
98 .offloadable = _offloadable, \
99 __CIPHER_DESC(ci), \
100 }
101
102 const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
103 CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true),
104 CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true),
105 CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false),
106 CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false),
107 CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false),
108 CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false),
109 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false),
110 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false),
111 };
112
113 CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128);
114 CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256);
115 CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128);
116 CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305);
117 CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm);
118 CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm);
119 CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128);
120 CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256);
121
122 static const struct proto *saved_tcpv6_prot;
123 static DEFINE_MUTEX(tcpv6_prot_mutex);
124 static const struct proto *saved_tcpv4_prot;
125 static DEFINE_MUTEX(tcpv4_prot_mutex);
126 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
127 static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
128 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
129 const struct proto *base);
130
update_sk_prot(struct sock * sk,struct tls_context * ctx)131 void update_sk_prot(struct sock *sk, struct tls_context *ctx)
132 {
133 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
134
135 WRITE_ONCE(sk->sk_prot,
136 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
137 WRITE_ONCE(sk->sk_socket->ops,
138 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
139 }
140
wait_on_pending_writer(struct sock * sk,long * timeo)141 int wait_on_pending_writer(struct sock *sk, long *timeo)
142 {
143 DEFINE_WAIT_FUNC(wait, woken_wake_function);
144 int ret, rc = 0;
145
146 add_wait_queue(sk_sleep(sk), &wait);
147 while (1) {
148 if (!*timeo) {
149 rc = -EAGAIN;
150 break;
151 }
152
153 if (signal_pending(current)) {
154 rc = sock_intr_errno(*timeo);
155 break;
156 }
157
158 ret = sk_wait_event(sk, timeo,
159 !READ_ONCE(sk->sk_write_pending), &wait);
160 if (ret) {
161 if (ret < 0)
162 rc = ret;
163 break;
164 }
165 }
166 remove_wait_queue(sk_sleep(sk), &wait);
167 return rc;
168 }
169
tls_push_sg(struct sock * sk,struct tls_context * ctx,struct scatterlist * sg,u16 first_offset,int flags)170 int tls_push_sg(struct sock *sk,
171 struct tls_context *ctx,
172 struct scatterlist *sg,
173 u16 first_offset,
174 int flags)
175 {
176 struct bio_vec bvec;
177 struct msghdr msg = {
178 .msg_flags = MSG_SPLICE_PAGES | flags,
179 };
180 int ret = 0;
181 struct page *p;
182 size_t size;
183 int offset = first_offset;
184
185 size = sg->length - offset;
186 offset += sg->offset;
187
188 ctx->splicing_pages = true;
189 while (1) {
190 /* is sending application-limited? */
191 tcp_rate_check_app_limited(sk);
192 p = sg_page(sg);
193 retry:
194 bvec_set_page(&bvec, p, size, offset);
195 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
196
197 ret = tcp_sendmsg_locked(sk, &msg, size);
198
199 if (ret != size) {
200 if (ret > 0) {
201 offset += ret;
202 size -= ret;
203 goto retry;
204 }
205
206 offset -= sg->offset;
207 ctx->partially_sent_offset = offset;
208 ctx->partially_sent_record = (void *)sg;
209 ctx->splicing_pages = false;
210 return ret;
211 }
212
213 put_page(p);
214 sk_mem_uncharge(sk, sg->length);
215 sg = sg_next(sg);
216 if (!sg)
217 break;
218
219 offset = sg->offset;
220 size = sg->length;
221 }
222
223 ctx->splicing_pages = false;
224
225 return 0;
226 }
227
tls_handle_open_record(struct sock * sk,int flags)228 static int tls_handle_open_record(struct sock *sk, int flags)
229 {
230 struct tls_context *ctx = tls_get_ctx(sk);
231
232 if (tls_is_pending_open_record(ctx))
233 return ctx->push_pending_record(sk, flags);
234
235 return 0;
236 }
237
tls_process_cmsg(struct sock * sk,struct msghdr * msg,unsigned char * record_type)238 int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
239 unsigned char *record_type)
240 {
241 struct cmsghdr *cmsg;
242 int rc = -EINVAL;
243
244 for_each_cmsghdr(cmsg, msg) {
245 if (!CMSG_OK(msg, cmsg))
246 return -EINVAL;
247 if (cmsg->cmsg_level != SOL_TLS)
248 continue;
249
250 switch (cmsg->cmsg_type) {
251 case TLS_SET_RECORD_TYPE:
252 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
253 return -EINVAL;
254
255 if (msg->msg_flags & MSG_MORE)
256 return -EINVAL;
257
258 rc = tls_handle_open_record(sk, msg->msg_flags);
259 if (rc)
260 return rc;
261
262 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
263 rc = 0;
264 break;
265 default:
266 return -EINVAL;
267 }
268 }
269
270 return rc;
271 }
272
tls_push_partial_record(struct sock * sk,struct tls_context * ctx,int flags)273 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
274 int flags)
275 {
276 struct scatterlist *sg;
277 u16 offset;
278
279 sg = ctx->partially_sent_record;
280 offset = ctx->partially_sent_offset;
281
282 ctx->partially_sent_record = NULL;
283 return tls_push_sg(sk, ctx, sg, offset, flags);
284 }
285
tls_free_partial_record(struct sock * sk,struct tls_context * ctx)286 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
287 {
288 struct scatterlist *sg;
289
290 for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
291 put_page(sg_page(sg));
292 sk_mem_uncharge(sk, sg->length);
293 }
294 ctx->partially_sent_record = NULL;
295 }
296
tls_write_space(struct sock * sk)297 static void tls_write_space(struct sock *sk)
298 {
299 struct tls_context *ctx = tls_get_ctx(sk);
300
301 /* If splicing_pages call lower protocol write space handler
302 * to ensure we wake up any waiting operations there. For example
303 * if splicing pages where to call sk_wait_event.
304 */
305 if (ctx->splicing_pages) {
306 ctx->sk_write_space(sk);
307 return;
308 }
309
310 #ifdef CONFIG_TLS_DEVICE
311 if (ctx->tx_conf == TLS_HW)
312 tls_device_write_space(sk, ctx);
313 else
314 #endif
315 tls_sw_write_space(sk, ctx);
316
317 ctx->sk_write_space(sk);
318 }
319
320 /**
321 * tls_ctx_free() - free TLS ULP context
322 * @sk: socket to with @ctx is attached
323 * @ctx: TLS context structure
324 *
325 * Free TLS context. If @sk is %NULL caller guarantees that the socket
326 * to which @ctx was attached has no outstanding references.
327 */
tls_ctx_free(struct sock * sk,struct tls_context * ctx)328 void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
329 {
330 if (!ctx)
331 return;
332
333 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
334 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
335 mutex_destroy(&ctx->tx_lock);
336
337 if (sk)
338 kfree_rcu(ctx, rcu);
339 else
340 kfree(ctx);
341 }
342
tls_sk_proto_cleanup(struct sock * sk,struct tls_context * ctx,long timeo)343 static void tls_sk_proto_cleanup(struct sock *sk,
344 struct tls_context *ctx, long timeo)
345 {
346 if (unlikely(sk->sk_write_pending) &&
347 !wait_on_pending_writer(sk, &timeo))
348 tls_handle_open_record(sk, 0);
349
350 /* We need these for tls_sw_fallback handling of other packets */
351 if (ctx->tx_conf == TLS_SW) {
352 tls_sw_release_resources_tx(sk);
353 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
354 } else if (ctx->tx_conf == TLS_HW) {
355 tls_device_free_resources_tx(sk);
356 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
357 }
358
359 if (ctx->rx_conf == TLS_SW) {
360 tls_sw_release_resources_rx(sk);
361 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
362 } else if (ctx->rx_conf == TLS_HW) {
363 tls_device_offload_cleanup_rx(sk);
364 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
365 }
366 }
367
tls_sk_proto_close(struct sock * sk,long timeout)368 static void tls_sk_proto_close(struct sock *sk, long timeout)
369 {
370 struct inet_connection_sock *icsk = inet_csk(sk);
371 struct tls_context *ctx = tls_get_ctx(sk);
372 long timeo = sock_sndtimeo(sk, 0);
373 bool free_ctx;
374
375 if (ctx->tx_conf == TLS_SW)
376 tls_sw_cancel_work_tx(ctx);
377
378 lock_sock(sk);
379 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
380
381 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
382 tls_sk_proto_cleanup(sk, ctx, timeo);
383
384 write_lock_bh(&sk->sk_callback_lock);
385 if (free_ctx)
386 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
387 WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
388 if (sk->sk_write_space == tls_write_space)
389 sk->sk_write_space = ctx->sk_write_space;
390 write_unlock_bh(&sk->sk_callback_lock);
391 release_sock(sk);
392 if (ctx->tx_conf == TLS_SW)
393 tls_sw_free_ctx_tx(ctx);
394 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
395 tls_sw_strparser_done(ctx);
396 if (ctx->rx_conf == TLS_SW)
397 tls_sw_free_ctx_rx(ctx);
398 ctx->sk_proto->close(sk, timeout);
399
400 if (free_ctx)
401 tls_ctx_free(sk, ctx);
402 }
403
tls_sk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)404 static __poll_t tls_sk_poll(struct file *file, struct socket *sock,
405 struct poll_table_struct *wait)
406 {
407 struct tls_sw_context_rx *ctx;
408 struct tls_context *tls_ctx;
409 struct sock *sk = sock->sk;
410 struct sk_psock *psock;
411 __poll_t mask = 0;
412 u8 shutdown;
413 int state;
414
415 mask = tcp_poll(file, sock, wait);
416
417 state = inet_sk_state_load(sk);
418 shutdown = READ_ONCE(sk->sk_shutdown);
419 if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN))
420 return mask;
421
422 tls_ctx = tls_get_ctx(sk);
423 ctx = tls_sw_ctx_rx(tls_ctx);
424 psock = sk_psock_get(sk);
425
426 if ((skb_queue_empty_lockless(&ctx->rx_list) &&
427 !tls_strp_msg_ready(ctx) &&
428 sk_psock_queue_empty(psock)) ||
429 READ_ONCE(ctx->key_update_pending))
430 mask &= ~(EPOLLIN | EPOLLRDNORM);
431
432 if (psock)
433 sk_psock_put(sk, psock);
434
435 return mask;
436 }
437
do_tls_getsockopt_conf(struct sock * sk,char __user * optval,int __user * optlen,int tx)438 static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
439 int __user *optlen, int tx)
440 {
441 int rc = 0;
442 const struct tls_cipher_desc *cipher_desc;
443 struct tls_context *ctx = tls_get_ctx(sk);
444 struct tls_crypto_info *crypto_info;
445 struct cipher_context *cctx;
446 int len;
447
448 if (get_user(len, optlen))
449 return -EFAULT;
450
451 if (!optval || (len < sizeof(*crypto_info))) {
452 rc = -EINVAL;
453 goto out;
454 }
455
456 if (!ctx) {
457 rc = -EBUSY;
458 goto out;
459 }
460
461 /* get user crypto info */
462 if (tx) {
463 crypto_info = &ctx->crypto_send.info;
464 cctx = &ctx->tx;
465 } else {
466 crypto_info = &ctx->crypto_recv.info;
467 cctx = &ctx->rx;
468 }
469
470 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
471 rc = -EBUSY;
472 goto out;
473 }
474
475 if (len == sizeof(*crypto_info)) {
476 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
477 rc = -EFAULT;
478 goto out;
479 }
480
481 cipher_desc = get_cipher_desc(crypto_info->cipher_type);
482 if (!cipher_desc || len != cipher_desc->crypto_info) {
483 rc = -EINVAL;
484 goto out;
485 }
486
487 memcpy(crypto_info_iv(crypto_info, cipher_desc),
488 cctx->iv + cipher_desc->salt, cipher_desc->iv);
489 memcpy(crypto_info_rec_seq(crypto_info, cipher_desc),
490 cctx->rec_seq, cipher_desc->rec_seq);
491
492 if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info))
493 rc = -EFAULT;
494
495 out:
496 return rc;
497 }
498
do_tls_getsockopt_tx_zc(struct sock * sk,char __user * optval,int __user * optlen)499 static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
500 int __user *optlen)
501 {
502 struct tls_context *ctx = tls_get_ctx(sk);
503 unsigned int value;
504 int len;
505
506 if (get_user(len, optlen))
507 return -EFAULT;
508
509 if (len != sizeof(value))
510 return -EINVAL;
511
512 value = ctx->zerocopy_sendfile;
513 if (copy_to_user(optval, &value, sizeof(value)))
514 return -EFAULT;
515
516 return 0;
517 }
518
do_tls_getsockopt_no_pad(struct sock * sk,char __user * optval,int __user * optlen)519 static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
520 int __user *optlen)
521 {
522 struct tls_context *ctx = tls_get_ctx(sk);
523 int value, len;
524
525 if (ctx->prot_info.version != TLS_1_3_VERSION)
526 return -EINVAL;
527
528 if (get_user(len, optlen))
529 return -EFAULT;
530 if (len < sizeof(value))
531 return -EINVAL;
532
533 value = -EINVAL;
534 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
535 value = ctx->rx_no_pad;
536 if (value < 0)
537 return value;
538
539 if (put_user(sizeof(value), optlen))
540 return -EFAULT;
541 if (copy_to_user(optval, &value, sizeof(value)))
542 return -EFAULT;
543
544 return 0;
545 }
546
do_tls_getsockopt(struct sock * sk,int optname,char __user * optval,int __user * optlen)547 static int do_tls_getsockopt(struct sock *sk, int optname,
548 char __user *optval, int __user *optlen)
549 {
550 int rc = 0;
551
552 lock_sock(sk);
553
554 switch (optname) {
555 case TLS_TX:
556 case TLS_RX:
557 rc = do_tls_getsockopt_conf(sk, optval, optlen,
558 optname == TLS_TX);
559 break;
560 case TLS_TX_ZEROCOPY_RO:
561 rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
562 break;
563 case TLS_RX_EXPECT_NO_PAD:
564 rc = do_tls_getsockopt_no_pad(sk, optval, optlen);
565 break;
566 default:
567 rc = -ENOPROTOOPT;
568 break;
569 }
570
571 release_sock(sk);
572
573 return rc;
574 }
575
tls_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)576 static int tls_getsockopt(struct sock *sk, int level, int optname,
577 char __user *optval, int __user *optlen)
578 {
579 struct tls_context *ctx = tls_get_ctx(sk);
580
581 if (level != SOL_TLS)
582 return ctx->sk_proto->getsockopt(sk, level,
583 optname, optval, optlen);
584
585 return do_tls_getsockopt(sk, optname, optval, optlen);
586 }
587
validate_crypto_info(const struct tls_crypto_info * crypto_info,const struct tls_crypto_info * alt_crypto_info)588 static int validate_crypto_info(const struct tls_crypto_info *crypto_info,
589 const struct tls_crypto_info *alt_crypto_info)
590 {
591 if (crypto_info->version != TLS_1_2_VERSION &&
592 crypto_info->version != TLS_1_3_VERSION)
593 return -EINVAL;
594
595 switch (crypto_info->cipher_type) {
596 case TLS_CIPHER_ARIA_GCM_128:
597 case TLS_CIPHER_ARIA_GCM_256:
598 if (crypto_info->version != TLS_1_2_VERSION)
599 return -EINVAL;
600 break;
601 }
602
603 /* Ensure that TLS version and ciphers are same in both directions */
604 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
605 if (alt_crypto_info->version != crypto_info->version ||
606 alt_crypto_info->cipher_type != crypto_info->cipher_type)
607 return -EINVAL;
608 }
609
610 return 0;
611 }
612
do_tls_setsockopt_conf(struct sock * sk,sockptr_t optval,unsigned int optlen,int tx)613 static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
614 unsigned int optlen, int tx)
615 {
616 struct tls_crypto_info *crypto_info, *alt_crypto_info;
617 struct tls_crypto_info *old_crypto_info = NULL;
618 struct tls_context *ctx = tls_get_ctx(sk);
619 const struct tls_cipher_desc *cipher_desc;
620 union tls_crypto_context *crypto_ctx;
621 union tls_crypto_context tmp = {};
622 bool update = false;
623 int rc = 0;
624 int conf;
625
626 if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info)))
627 return -EINVAL;
628
629 if (tx) {
630 crypto_ctx = &ctx->crypto_send;
631 alt_crypto_info = &ctx->crypto_recv.info;
632 } else {
633 crypto_ctx = &ctx->crypto_recv;
634 alt_crypto_info = &ctx->crypto_send.info;
635 }
636
637 crypto_info = &crypto_ctx->info;
638
639 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
640 /* Currently we only support setting crypto info more
641 * than one time for TLS 1.3
642 */
643 if (crypto_info->version != TLS_1_3_VERSION) {
644 TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
645 : LINUX_MIB_TLSRXREKEYERROR);
646 return -EBUSY;
647 }
648
649 update = true;
650 old_crypto_info = crypto_info;
651 crypto_info = &tmp.info;
652 crypto_ctx = &tmp;
653 }
654
655 rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
656 if (rc) {
657 rc = -EFAULT;
658 goto err_crypto_info;
659 }
660
661 if (update) {
662 /* Ensure that TLS version and ciphers are not modified */
663 if (crypto_info->version != old_crypto_info->version ||
664 crypto_info->cipher_type != old_crypto_info->cipher_type)
665 rc = -EINVAL;
666 } else {
667 rc = validate_crypto_info(crypto_info, alt_crypto_info);
668 }
669 if (rc)
670 goto err_crypto_info;
671
672 cipher_desc = get_cipher_desc(crypto_info->cipher_type);
673 if (!cipher_desc) {
674 rc = -EINVAL;
675 goto err_crypto_info;
676 }
677
678 if (optlen != cipher_desc->crypto_info) {
679 rc = -EINVAL;
680 goto err_crypto_info;
681 }
682
683 rc = copy_from_sockptr_offset(crypto_info + 1, optval,
684 sizeof(*crypto_info),
685 optlen - sizeof(*crypto_info));
686 if (rc) {
687 rc = -EFAULT;
688 goto err_crypto_info;
689 }
690
691 if (tx) {
692 rc = tls_set_device_offload(sk);
693 conf = TLS_HW;
694 if (!rc) {
695 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
696 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
697 } else {
698 rc = tls_set_sw_offload(sk, 1,
699 update ? crypto_info : NULL);
700 if (rc)
701 goto err_crypto_info;
702
703 if (update) {
704 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXREKEYOK);
705 } else {
706 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
707 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
708 }
709 conf = TLS_SW;
710 }
711 } else {
712 rc = tls_set_device_offload_rx(sk, ctx);
713 conf = TLS_HW;
714 if (!rc) {
715 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
716 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
717 } else {
718 rc = tls_set_sw_offload(sk, 0,
719 update ? crypto_info : NULL);
720 if (rc)
721 goto err_crypto_info;
722
723 if (update) {
724 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYOK);
725 } else {
726 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
727 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
728 }
729 conf = TLS_SW;
730 }
731 if (!update)
732 tls_sw_strparser_arm(sk, ctx);
733 }
734
735 if (tx)
736 ctx->tx_conf = conf;
737 else
738 ctx->rx_conf = conf;
739 update_sk_prot(sk, ctx);
740
741 if (update)
742 return 0;
743
744 if (tx) {
745 ctx->sk_write_space = sk->sk_write_space;
746 sk->sk_write_space = tls_write_space;
747 } else {
748 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx);
749
750 tls_strp_check_rcv(&rx_ctx->strp);
751 }
752 return 0;
753
754 err_crypto_info:
755 if (update) {
756 TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
757 : LINUX_MIB_TLSRXREKEYERROR);
758 }
759 memzero_explicit(crypto_ctx, sizeof(*crypto_ctx));
760 return rc;
761 }
762
do_tls_setsockopt_tx_zc(struct sock * sk,sockptr_t optval,unsigned int optlen)763 static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
764 unsigned int optlen)
765 {
766 struct tls_context *ctx = tls_get_ctx(sk);
767 unsigned int value;
768
769 if (sockptr_is_null(optval) || optlen != sizeof(value))
770 return -EINVAL;
771
772 if (copy_from_sockptr(&value, optval, sizeof(value)))
773 return -EFAULT;
774
775 if (value > 1)
776 return -EINVAL;
777
778 ctx->zerocopy_sendfile = value;
779
780 return 0;
781 }
782
do_tls_setsockopt_no_pad(struct sock * sk,sockptr_t optval,unsigned int optlen)783 static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval,
784 unsigned int optlen)
785 {
786 struct tls_context *ctx = tls_get_ctx(sk);
787 u32 val;
788 int rc;
789
790 if (ctx->prot_info.version != TLS_1_3_VERSION ||
791 sockptr_is_null(optval) || optlen < sizeof(val))
792 return -EINVAL;
793
794 rc = copy_from_sockptr(&val, optval, sizeof(val));
795 if (rc)
796 return -EFAULT;
797 if (val > 1)
798 return -EINVAL;
799 rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val));
800 if (rc < 1)
801 return rc == 0 ? -EINVAL : rc;
802
803 lock_sock(sk);
804 rc = -EINVAL;
805 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) {
806 ctx->rx_no_pad = val;
807 tls_update_rx_zc_capable(ctx);
808 rc = 0;
809 }
810 release_sock(sk);
811
812 return rc;
813 }
814
do_tls_setsockopt(struct sock * sk,int optname,sockptr_t optval,unsigned int optlen)815 static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
816 unsigned int optlen)
817 {
818 int rc = 0;
819
820 switch (optname) {
821 case TLS_TX:
822 case TLS_RX:
823 lock_sock(sk);
824 rc = do_tls_setsockopt_conf(sk, optval, optlen,
825 optname == TLS_TX);
826 release_sock(sk);
827 break;
828 case TLS_TX_ZEROCOPY_RO:
829 lock_sock(sk);
830 rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
831 release_sock(sk);
832 break;
833 case TLS_RX_EXPECT_NO_PAD:
834 rc = do_tls_setsockopt_no_pad(sk, optval, optlen);
835 break;
836 default:
837 rc = -ENOPROTOOPT;
838 break;
839 }
840 return rc;
841 }
842
tls_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)843 static int tls_setsockopt(struct sock *sk, int level, int optname,
844 sockptr_t optval, unsigned int optlen)
845 {
846 struct tls_context *ctx = tls_get_ctx(sk);
847
848 if (level != SOL_TLS)
849 return ctx->sk_proto->setsockopt(sk, level, optname, optval,
850 optlen);
851
852 return do_tls_setsockopt(sk, optname, optval, optlen);
853 }
854
tls_ctx_create(struct sock * sk)855 struct tls_context *tls_ctx_create(struct sock *sk)
856 {
857 struct inet_connection_sock *icsk = inet_csk(sk);
858 struct tls_context *ctx;
859
860 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
861 if (!ctx)
862 return NULL;
863
864 mutex_init(&ctx->tx_lock);
865 ctx->sk_proto = READ_ONCE(sk->sk_prot);
866 ctx->sk = sk;
867 /* Release semantic of rcu_assign_pointer() ensures that
868 * ctx->sk_proto is visible before changing sk->sk_prot in
869 * update_sk_prot(), and prevents reading uninitialized value in
870 * tls_{getsockopt, setsockopt}. Note that we do not need a
871 * read barrier in tls_{getsockopt,setsockopt} as there is an
872 * address dependency between sk->sk_proto->{getsockopt,setsockopt}
873 * and ctx->sk_proto.
874 */
875 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
876 return ctx;
877 }
878
build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],const struct proto_ops * base)879 static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
880 const struct proto_ops *base)
881 {
882 ops[TLS_BASE][TLS_BASE] = *base;
883
884 ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
885 ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
886
887 ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
888 ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
889 ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll;
890 ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock;
891
892 ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
893 ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
894 ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll;
895 ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock;
896
897 #ifdef CONFIG_TLS_DEVICE
898 ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
899
900 ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
901
902 ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
903
904 ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
905
906 ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
907 #endif
908 #ifdef CONFIG_TLS_TOE
909 ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
910 #endif
911 }
912
tls_build_proto(struct sock * sk)913 static void tls_build_proto(struct sock *sk)
914 {
915 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
916 struct proto *prot = READ_ONCE(sk->sk_prot);
917
918 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
919 if (ip_ver == TLSV6 &&
920 unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
921 mutex_lock(&tcpv6_prot_mutex);
922 if (likely(prot != saved_tcpv6_prot)) {
923 build_protos(tls_prots[TLSV6], prot);
924 build_proto_ops(tls_proto_ops[TLSV6],
925 sk->sk_socket->ops);
926 smp_store_release(&saved_tcpv6_prot, prot);
927 }
928 mutex_unlock(&tcpv6_prot_mutex);
929 }
930
931 if (ip_ver == TLSV4 &&
932 unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
933 mutex_lock(&tcpv4_prot_mutex);
934 if (likely(prot != saved_tcpv4_prot)) {
935 build_protos(tls_prots[TLSV4], prot);
936 build_proto_ops(tls_proto_ops[TLSV4],
937 sk->sk_socket->ops);
938 smp_store_release(&saved_tcpv4_prot, prot);
939 }
940 mutex_unlock(&tcpv4_prot_mutex);
941 }
942 }
943
build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],const struct proto * base)944 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
945 const struct proto *base)
946 {
947 prot[TLS_BASE][TLS_BASE] = *base;
948 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
949 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
950 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
951
952 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
953 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
954 prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
955
956 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
957 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
958 prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
959 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
960
961 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
962 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
963 prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
964 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
965
966 #ifdef CONFIG_TLS_DEVICE
967 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
968 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
969 prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof;
970
971 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
972 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
973 prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof;
974
975 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
976
977 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
978
979 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
980 #endif
981 #ifdef CONFIG_TLS_TOE
982 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
983 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
984 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
985 #endif
986 }
987
tls_init(struct sock * sk)988 static int tls_init(struct sock *sk)
989 {
990 struct tls_context *ctx;
991 int rc = 0;
992
993 tls_build_proto(sk);
994
995 #ifdef CONFIG_TLS_TOE
996 if (tls_toe_bypass(sk))
997 return 0;
998 #endif
999
1000 /* The TLS ulp is currently supported only for TCP sockets
1001 * in ESTABLISHED state.
1002 * Supporting sockets in LISTEN state will require us
1003 * to modify the accept implementation to clone rather then
1004 * share the ulp context.
1005 */
1006 if (sk->sk_state != TCP_ESTABLISHED)
1007 return -ENOTCONN;
1008
1009 /* allocate tls context */
1010 write_lock_bh(&sk->sk_callback_lock);
1011 ctx = tls_ctx_create(sk);
1012 if (!ctx) {
1013 rc = -ENOMEM;
1014 goto out;
1015 }
1016
1017 ctx->tx_conf = TLS_BASE;
1018 ctx->rx_conf = TLS_BASE;
1019 update_sk_prot(sk, ctx);
1020 out:
1021 write_unlock_bh(&sk->sk_callback_lock);
1022 return rc;
1023 }
1024
tls_update(struct sock * sk,struct proto * p,void (* write_space)(struct sock * sk))1025 static void tls_update(struct sock *sk, struct proto *p,
1026 void (*write_space)(struct sock *sk))
1027 {
1028 struct tls_context *ctx;
1029
1030 WARN_ON_ONCE(sk->sk_prot == p);
1031
1032 ctx = tls_get_ctx(sk);
1033 if (likely(ctx)) {
1034 ctx->sk_write_space = write_space;
1035 ctx->sk_proto = p;
1036 } else {
1037 /* Pairs with lockless read in sk_clone_lock(). */
1038 WRITE_ONCE(sk->sk_prot, p);
1039 sk->sk_write_space = write_space;
1040 }
1041 }
1042
tls_user_config(struct tls_context * ctx,bool tx)1043 static u16 tls_user_config(struct tls_context *ctx, bool tx)
1044 {
1045 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
1046
1047 switch (config) {
1048 case TLS_BASE:
1049 return TLS_CONF_BASE;
1050 case TLS_SW:
1051 return TLS_CONF_SW;
1052 case TLS_HW:
1053 return TLS_CONF_HW;
1054 case TLS_HW_RECORD:
1055 return TLS_CONF_HW_RECORD;
1056 }
1057 return 0;
1058 }
1059
tls_get_info(struct sock * sk,struct sk_buff * skb,bool net_admin)1060 static int tls_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin)
1061 {
1062 u16 version, cipher_type;
1063 struct tls_context *ctx;
1064 struct nlattr *start;
1065 int err;
1066
1067 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
1068 if (!start)
1069 return -EMSGSIZE;
1070
1071 rcu_read_lock();
1072 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
1073 if (!ctx) {
1074 err = 0;
1075 goto nla_failure;
1076 }
1077 version = ctx->prot_info.version;
1078 if (version) {
1079 err = nla_put_u16(skb, TLS_INFO_VERSION, version);
1080 if (err)
1081 goto nla_failure;
1082 }
1083 cipher_type = ctx->prot_info.cipher_type;
1084 if (cipher_type) {
1085 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
1086 if (err)
1087 goto nla_failure;
1088 }
1089 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
1090 if (err)
1091 goto nla_failure;
1092
1093 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
1094 if (err)
1095 goto nla_failure;
1096
1097 if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
1098 err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
1099 if (err)
1100 goto nla_failure;
1101 }
1102 if (ctx->rx_no_pad) {
1103 err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD);
1104 if (err)
1105 goto nla_failure;
1106 }
1107
1108 rcu_read_unlock();
1109 nla_nest_end(skb, start);
1110 return 0;
1111
1112 nla_failure:
1113 rcu_read_unlock();
1114 nla_nest_cancel(skb, start);
1115 return err;
1116 }
1117
tls_get_info_size(const struct sock * sk,bool net_admin)1118 static size_t tls_get_info_size(const struct sock *sk, bool net_admin)
1119 {
1120 size_t size = 0;
1121
1122 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
1123 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
1124 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
1125 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
1126 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
1127 nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
1128 nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */
1129 0;
1130
1131 return size;
1132 }
1133
tls_init_net(struct net * net)1134 static int __net_init tls_init_net(struct net *net)
1135 {
1136 int err;
1137
1138 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
1139 if (!net->mib.tls_statistics)
1140 return -ENOMEM;
1141
1142 err = tls_proc_init(net);
1143 if (err)
1144 goto err_free_stats;
1145
1146 return 0;
1147 err_free_stats:
1148 free_percpu(net->mib.tls_statistics);
1149 return err;
1150 }
1151
tls_exit_net(struct net * net)1152 static void __net_exit tls_exit_net(struct net *net)
1153 {
1154 tls_proc_fini(net);
1155 free_percpu(net->mib.tls_statistics);
1156 }
1157
1158 static struct pernet_operations tls_proc_ops = {
1159 .init = tls_init_net,
1160 .exit = tls_exit_net,
1161 };
1162
1163 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
1164 .name = "tls",
1165 .owner = THIS_MODULE,
1166 .init = tls_init,
1167 .update = tls_update,
1168 .get_info = tls_get_info,
1169 .get_info_size = tls_get_info_size,
1170 };
1171
tls_register(void)1172 static int __init tls_register(void)
1173 {
1174 int err;
1175
1176 err = register_pernet_subsys(&tls_proc_ops);
1177 if (err)
1178 return err;
1179
1180 err = tls_strp_dev_init();
1181 if (err)
1182 goto err_pernet;
1183
1184 err = tls_device_init();
1185 if (err)
1186 goto err_strp;
1187
1188 tcp_register_ulp(&tcp_tls_ulp_ops);
1189
1190 return 0;
1191 err_strp:
1192 tls_strp_dev_exit();
1193 err_pernet:
1194 unregister_pernet_subsys(&tls_proc_ops);
1195 return err;
1196 }
1197
tls_unregister(void)1198 static void __exit tls_unregister(void)
1199 {
1200 tcp_unregister_ulp(&tcp_tls_ulp_ops);
1201 tls_strp_dev_exit();
1202 tls_device_cleanup();
1203 unregister_pernet_subsys(&tls_proc_ops);
1204 }
1205
1206 module_init(tls_register);
1207 module_exit(tls_unregister);
1208