xref: /linux/net/tls/tls_device.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2  *
3  * This software is available to you under a choice of one of two
4  * licenses.  You may choose to be licensed under the terms of the GNU
5  * General Public License (GPL) Version 2, available from the file
6  * COPYING in the main directory of this source tree, or the
7  * OpenIB.org BSD license below:
8  *
9  *     Redistribution and use in source and binary forms, with or
10  *     without modification, are permitted provided that the following
11  *     conditions are met:
12  *
13  *      - Redistributions of source code must retain the above
14  *        copyright notice, this list of conditions and the following
15  *        disclaimer.
16  *
17  *      - Redistributions in binary form must reproduce the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer in the documentation and/or other materials
20  *        provided with the distribution.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29  * SOFTWARE.
30  */
31 
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
36 #include <net/dst.h>
37 #include <net/inet_connection_sock.h>
38 #include <net/tcp.h>
39 #include <net/tls.h>
40 #include <linux/skbuff_ref.h>
41 
42 #include "tls.h"
43 #include "trace.h"
44 
45 /* device_offload_lock is used to synchronize tls_dev_add
46  * against NETDEV_DOWN notifications.
47  */
48 static DECLARE_RWSEM(device_offload_lock);
49 
50 static struct workqueue_struct *destruct_wq __read_mostly;
51 
52 static LIST_HEAD(tls_device_list);
53 static LIST_HEAD(tls_device_down_list);
54 static DEFINE_SPINLOCK(tls_device_lock);
55 
56 static struct page *dummy_page;
57 
58 static void tls_device_free_ctx(struct tls_context *ctx)
59 {
60 	if (ctx->tx_conf == TLS_HW)
61 		kfree(tls_offload_ctx_tx(ctx));
62 
63 	if (ctx->rx_conf == TLS_HW)
64 		kfree(tls_offload_ctx_rx(ctx));
65 
66 	tls_ctx_free(NULL, ctx);
67 }
68 
69 static void tls_device_tx_del_task(struct work_struct *work)
70 {
71 	struct tls_offload_context_tx *offload_ctx =
72 		container_of(work, struct tls_offload_context_tx, destruct_work);
73 	struct tls_context *ctx = offload_ctx->ctx;
74 	struct net_device *netdev;
75 
76 	/* Safe, because this is the destroy flow, refcount is 0, so
77 	 * tls_device_down can't store this field in parallel.
78 	 */
79 	netdev = rcu_dereference_protected(ctx->netdev,
80 					   !refcount_read(&ctx->refcount));
81 
82 	netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
83 	dev_put(netdev);
84 	ctx->netdev = NULL;
85 	tls_device_free_ctx(ctx);
86 }
87 
88 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
89 {
90 	struct net_device *netdev;
91 	unsigned long flags;
92 	bool async_cleanup;
93 
94 	spin_lock_irqsave(&tls_device_lock, flags);
95 	if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
96 		spin_unlock_irqrestore(&tls_device_lock, flags);
97 		return;
98 	}
99 
100 	list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
101 
102 	/* Safe, because this is the destroy flow, refcount is 0, so
103 	 * tls_device_down can't store this field in parallel.
104 	 */
105 	netdev = rcu_dereference_protected(ctx->netdev,
106 					   !refcount_read(&ctx->refcount));
107 
108 	async_cleanup = netdev && ctx->tx_conf == TLS_HW;
109 	if (async_cleanup) {
110 		struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
111 
112 		/* queue_work inside the spinlock
113 		 * to make sure tls_device_down waits for that work.
114 		 */
115 		queue_work(destruct_wq, &offload_ctx->destruct_work);
116 	}
117 	spin_unlock_irqrestore(&tls_device_lock, flags);
118 
119 	if (!async_cleanup)
120 		tls_device_free_ctx(ctx);
121 }
122 
123 /* We assume that the socket is already connected */
124 static struct net_device *get_netdev_for_sock(struct sock *sk)
125 {
126 	struct net_device *dev, *lowest_dev = NULL;
127 	struct dst_entry *dst;
128 
129 	rcu_read_lock();
130 	dst = __sk_dst_get(sk);
131 	dev = dst ? dst_dev_rcu(dst) : NULL;
132 	if (likely(dev)) {
133 		lowest_dev = netdev_sk_get_lowest_dev(dev, sk);
134 		dev_hold(lowest_dev);
135 	}
136 	rcu_read_unlock();
137 
138 	return lowest_dev;
139 }
140 
141 static void destroy_record(struct tls_record_info *record)
142 {
143 	int i;
144 
145 	for (i = 0; i < record->num_frags; i++)
146 		__skb_frag_unref(&record->frags[i], false);
147 	kfree(record);
148 }
149 
150 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
151 {
152 	struct tls_record_info *info, *temp;
153 
154 	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
155 		list_del(&info->list);
156 		destroy_record(info);
157 	}
158 
159 	offload_ctx->retransmit_hint = NULL;
160 }
161 
162 static void tls_tcp_clean_acked(struct sock *sk, u32 acked_seq)
163 {
164 	struct tls_context *tls_ctx = tls_get_ctx(sk);
165 	struct tls_record_info *info, *temp;
166 	struct tls_offload_context_tx *ctx;
167 	u64 deleted_records = 0;
168 	unsigned long flags;
169 
170 	if (!tls_ctx)
171 		return;
172 
173 	ctx = tls_offload_ctx_tx(tls_ctx);
174 
175 	spin_lock_irqsave(&ctx->lock, flags);
176 	info = ctx->retransmit_hint;
177 	if (info && !before(acked_seq, info->end_seq))
178 		ctx->retransmit_hint = NULL;
179 
180 	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
181 		if (before(acked_seq, info->end_seq))
182 			break;
183 		list_del(&info->list);
184 
185 		destroy_record(info);
186 		deleted_records++;
187 	}
188 
189 	ctx->unacked_record_sn += deleted_records;
190 	spin_unlock_irqrestore(&ctx->lock, flags);
191 }
192 
193 /* At this point, there should be no references on this
194  * socket and no in-flight SKBs associated with this
195  * socket, so it is safe to free all the resources.
196  */
197 void tls_device_sk_destruct(struct sock *sk)
198 {
199 	struct tls_context *tls_ctx = tls_get_ctx(sk);
200 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
201 
202 	tls_ctx->sk_destruct(sk);
203 
204 	if (tls_ctx->tx_conf == TLS_HW) {
205 		if (ctx->open_record)
206 			destroy_record(ctx->open_record);
207 		delete_all_records(ctx);
208 		crypto_free_aead(ctx->aead_send);
209 		clean_acked_data_disable(tcp_sk(sk));
210 	}
211 
212 	tls_device_queue_ctx_destruction(tls_ctx);
213 }
214 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
215 
216 void tls_device_free_resources_tx(struct sock *sk)
217 {
218 	struct tls_context *tls_ctx = tls_get_ctx(sk);
219 
220 	tls_free_partial_record(sk, tls_ctx);
221 }
222 
223 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
224 {
225 	struct tls_context *tls_ctx = tls_get_ctx(sk);
226 
227 	trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
228 	WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
229 }
230 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
231 
232 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
233 				 u32 seq)
234 {
235 	struct net_device *netdev;
236 	int err = 0;
237 	u8 *rcd_sn;
238 
239 	tcp_write_collapse_fence(sk);
240 	rcd_sn = tls_ctx->tx.rec_seq;
241 
242 	trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
243 	down_read(&device_offload_lock);
244 	netdev = rcu_dereference_protected(tls_ctx->netdev,
245 					   lockdep_is_held(&device_offload_lock));
246 	if (netdev)
247 		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
248 							 rcd_sn,
249 							 TLS_OFFLOAD_CTX_DIR_TX);
250 	up_read(&device_offload_lock);
251 	if (err)
252 		return;
253 
254 	clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
255 }
256 
257 static void tls_append_frag(struct tls_record_info *record,
258 			    struct page_frag *pfrag,
259 			    int size)
260 {
261 	skb_frag_t *frag;
262 
263 	frag = &record->frags[record->num_frags - 1];
264 	if (skb_frag_page(frag) == pfrag->page &&
265 	    skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
266 		skb_frag_size_add(frag, size);
267 	} else {
268 		++frag;
269 		skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
270 					size);
271 		++record->num_frags;
272 		get_page(pfrag->page);
273 	}
274 
275 	pfrag->offset += size;
276 	record->len += size;
277 }
278 
279 static int tls_push_record(struct sock *sk,
280 			   struct tls_context *ctx,
281 			   struct tls_offload_context_tx *offload_ctx,
282 			   struct tls_record_info *record,
283 			   int flags)
284 {
285 	struct tls_prot_info *prot = &ctx->prot_info;
286 	struct tcp_sock *tp = tcp_sk(sk);
287 	skb_frag_t *frag;
288 	int i;
289 
290 	record->end_seq = tp->write_seq + record->len;
291 	list_add_tail_rcu(&record->list, &offload_ctx->records_list);
292 	offload_ctx->open_record = NULL;
293 
294 	if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
295 		tls_device_resync_tx(sk, ctx, tp->write_seq);
296 
297 	tls_advance_record_sn(sk, prot, &ctx->tx);
298 
299 	for (i = 0; i < record->num_frags; i++) {
300 		frag = &record->frags[i];
301 		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
302 		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
303 			    skb_frag_size(frag), skb_frag_off(frag));
304 		sk_mem_charge(sk, skb_frag_size(frag));
305 		get_page(skb_frag_page(frag));
306 	}
307 	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
308 
309 	/* all ready, send */
310 	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
311 }
312 
313 static void tls_device_record_close(struct sock *sk,
314 				    struct tls_context *ctx,
315 				    struct tls_record_info *record,
316 				    struct page_frag *pfrag,
317 				    unsigned char record_type)
318 {
319 	struct tls_prot_info *prot = &ctx->prot_info;
320 	struct page_frag dummy_tag_frag;
321 
322 	/* append tag
323 	 * device will fill in the tag, we just need to append a placeholder
324 	 * use socket memory to improve coalescing (re-using a single buffer
325 	 * increases frag count)
326 	 * if we can't allocate memory now use the dummy page
327 	 */
328 	if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
329 	    !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
330 		dummy_tag_frag.page = dummy_page;
331 		dummy_tag_frag.offset = 0;
332 		pfrag = &dummy_tag_frag;
333 	}
334 	tls_append_frag(record, pfrag, prot->tag_size);
335 
336 	/* fill prepend */
337 	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
338 			 record->len - prot->overhead_size,
339 			 record_type);
340 }
341 
342 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
343 				 struct page_frag *pfrag,
344 				 size_t prepend_size)
345 {
346 	struct tls_record_info *record;
347 	skb_frag_t *frag;
348 
349 	record = kmalloc(sizeof(*record), GFP_KERNEL);
350 	if (!record)
351 		return -ENOMEM;
352 
353 	frag = &record->frags[0];
354 	skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
355 				prepend_size);
356 
357 	get_page(pfrag->page);
358 	pfrag->offset += prepend_size;
359 
360 	record->num_frags = 1;
361 	record->len = prepend_size;
362 	offload_ctx->open_record = record;
363 	return 0;
364 }
365 
366 static int tls_do_allocation(struct sock *sk,
367 			     struct tls_offload_context_tx *offload_ctx,
368 			     struct page_frag *pfrag,
369 			     size_t prepend_size)
370 {
371 	int ret;
372 
373 	if (!offload_ctx->open_record) {
374 		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
375 						   sk->sk_allocation))) {
376 			READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
377 			sk_stream_moderate_sndbuf(sk);
378 			return -ENOMEM;
379 		}
380 
381 		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
382 		if (ret)
383 			return ret;
384 
385 		if (pfrag->size > pfrag->offset)
386 			return 0;
387 	}
388 
389 	if (!sk_page_frag_refill(sk, pfrag))
390 		return -ENOMEM;
391 
392 	return 0;
393 }
394 
395 static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
396 {
397 	size_t pre_copy, nocache;
398 
399 	pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
400 	if (pre_copy) {
401 		pre_copy = min(pre_copy, bytes);
402 		if (copy_from_iter(addr, pre_copy, i) != pre_copy)
403 			return -EFAULT;
404 		bytes -= pre_copy;
405 		addr += pre_copy;
406 	}
407 
408 	nocache = round_down(bytes, SMP_CACHE_BYTES);
409 	if (copy_from_iter_nocache(addr, nocache, i) != nocache)
410 		return -EFAULT;
411 	bytes -= nocache;
412 	addr += nocache;
413 
414 	if (bytes && copy_from_iter(addr, bytes, i) != bytes)
415 		return -EFAULT;
416 
417 	return 0;
418 }
419 
420 static int tls_push_data(struct sock *sk,
421 			 struct iov_iter *iter,
422 			 size_t size, int flags,
423 			 unsigned char record_type)
424 {
425 	struct tls_context *tls_ctx = tls_get_ctx(sk);
426 	struct tls_prot_info *prot = &tls_ctx->prot_info;
427 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
428 	struct tls_record_info *record;
429 	int tls_push_record_flags;
430 	struct page_frag *pfrag;
431 	size_t orig_size = size;
432 	u32 max_open_record_len;
433 	bool more = false;
434 	bool done = false;
435 	int copy, rc = 0;
436 	long timeo;
437 
438 	if (flags &
439 	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
440 	      MSG_SPLICE_PAGES | MSG_EOR))
441 		return -EOPNOTSUPP;
442 
443 	if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
444 		return -EINVAL;
445 
446 	if (unlikely(sk->sk_err))
447 		return -sk->sk_err;
448 
449 	flags |= MSG_SENDPAGE_DECRYPTED;
450 	tls_push_record_flags = flags | MSG_MORE;
451 
452 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
453 	if (tls_is_partially_sent_record(tls_ctx)) {
454 		rc = tls_push_partial_record(sk, tls_ctx, flags);
455 		if (rc < 0)
456 			return rc;
457 	}
458 
459 	pfrag = sk_page_frag(sk);
460 
461 	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
462 	 * we need to leave room for an authentication tag.
463 	 */
464 	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
465 			      prot->prepend_size;
466 	do {
467 		rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
468 		if (unlikely(rc)) {
469 			rc = sk_stream_wait_memory(sk, &timeo);
470 			if (!rc)
471 				continue;
472 
473 			record = ctx->open_record;
474 			if (!record)
475 				break;
476 handle_error:
477 			if (record_type != TLS_RECORD_TYPE_DATA) {
478 				/* avoid sending partial
479 				 * record with type !=
480 				 * application_data
481 				 */
482 				size = orig_size;
483 				destroy_record(record);
484 				ctx->open_record = NULL;
485 			} else if (record->len > prot->prepend_size) {
486 				goto last_record;
487 			}
488 
489 			break;
490 		}
491 
492 		record = ctx->open_record;
493 
494 		copy = min_t(size_t, size, max_open_record_len - record->len);
495 		if (copy && (flags & MSG_SPLICE_PAGES)) {
496 			struct page_frag zc_pfrag;
497 			struct page **pages = &zc_pfrag.page;
498 			size_t off;
499 
500 			rc = iov_iter_extract_pages(iter, &pages,
501 						    copy, 1, 0, &off);
502 			if (rc <= 0) {
503 				if (rc == 0)
504 					rc = -EIO;
505 				goto handle_error;
506 			}
507 			copy = rc;
508 
509 			if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
510 				iov_iter_revert(iter, copy);
511 				rc = -EIO;
512 				goto handle_error;
513 			}
514 
515 			zc_pfrag.offset = off;
516 			zc_pfrag.size = copy;
517 			tls_append_frag(record, &zc_pfrag, copy);
518 		} else if (copy) {
519 			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
520 
521 			rc = tls_device_copy_data(page_address(pfrag->page) +
522 						  pfrag->offset, copy,
523 						  iter);
524 			if (rc)
525 				goto handle_error;
526 			tls_append_frag(record, pfrag, copy);
527 		}
528 
529 		size -= copy;
530 		if (!size) {
531 last_record:
532 			tls_push_record_flags = flags;
533 			if (flags & MSG_MORE) {
534 				more = true;
535 				break;
536 			}
537 
538 			done = true;
539 		}
540 
541 		if (done || record->len >= max_open_record_len ||
542 		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
543 			tls_device_record_close(sk, tls_ctx, record,
544 						pfrag, record_type);
545 
546 			rc = tls_push_record(sk,
547 					     tls_ctx,
548 					     ctx,
549 					     record,
550 					     tls_push_record_flags);
551 			if (rc < 0)
552 				break;
553 		}
554 	} while (!done);
555 
556 	tls_ctx->pending_open_record_frags = more;
557 
558 	if (orig_size - size > 0)
559 		rc = orig_size - size;
560 
561 	return rc;
562 }
563 
564 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
565 {
566 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
567 	struct tls_context *tls_ctx = tls_get_ctx(sk);
568 	int rc;
569 
570 	if (!tls_ctx->zerocopy_sendfile)
571 		msg->msg_flags &= ~MSG_SPLICE_PAGES;
572 
573 	mutex_lock(&tls_ctx->tx_lock);
574 	lock_sock(sk);
575 
576 	if (unlikely(msg->msg_controllen)) {
577 		rc = tls_process_cmsg(sk, msg, &record_type);
578 		if (rc)
579 			goto out;
580 	}
581 
582 	rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
583 			   record_type);
584 
585 out:
586 	release_sock(sk);
587 	mutex_unlock(&tls_ctx->tx_lock);
588 	return rc;
589 }
590 
591 void tls_device_splice_eof(struct socket *sock)
592 {
593 	struct sock *sk = sock->sk;
594 	struct tls_context *tls_ctx = tls_get_ctx(sk);
595 	struct iov_iter iter = {};
596 
597 	if (!tls_is_partially_sent_record(tls_ctx))
598 		return;
599 
600 	mutex_lock(&tls_ctx->tx_lock);
601 	lock_sock(sk);
602 
603 	if (tls_is_partially_sent_record(tls_ctx)) {
604 		iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
605 		tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
606 	}
607 
608 	release_sock(sk);
609 	mutex_unlock(&tls_ctx->tx_lock);
610 }
611 
612 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
613 				       u32 seq, u64 *p_record_sn)
614 {
615 	u64 record_sn = context->hint_record_sn;
616 	struct tls_record_info *info, *last;
617 
618 	info = context->retransmit_hint;
619 	if (!info ||
620 	    before(seq, info->end_seq - info->len)) {
621 		/* if retransmit_hint is irrelevant start
622 		 * from the beginning of the list
623 		 */
624 		info = list_first_entry_or_null(&context->records_list,
625 						struct tls_record_info, list);
626 		if (!info)
627 			return NULL;
628 		/* send the start_marker record if seq number is before the
629 		 * tls offload start marker sequence number. This record is
630 		 * required to handle TCP packets which are before TLS offload
631 		 * started.
632 		 *  And if it's not start marker, look if this seq number
633 		 * belongs to the list.
634 		 */
635 		if (likely(!tls_record_is_start_marker(info))) {
636 			/* we have the first record, get the last record to see
637 			 * if this seq number belongs to the list.
638 			 */
639 			last = list_last_entry(&context->records_list,
640 					       struct tls_record_info, list);
641 
642 			if (!between(seq, tls_record_start_seq(info),
643 				     last->end_seq))
644 				return NULL;
645 		}
646 		record_sn = context->unacked_record_sn;
647 	}
648 
649 	/* We just need the _rcu for the READ_ONCE() */
650 	rcu_read_lock();
651 	list_for_each_entry_from_rcu(info, &context->records_list, list) {
652 		if (before(seq, info->end_seq)) {
653 			if (!context->retransmit_hint ||
654 			    after(info->end_seq,
655 				  context->retransmit_hint->end_seq)) {
656 				context->hint_record_sn = record_sn;
657 				context->retransmit_hint = info;
658 			}
659 			*p_record_sn = record_sn;
660 			goto exit_rcu_unlock;
661 		}
662 		record_sn++;
663 	}
664 	info = NULL;
665 
666 exit_rcu_unlock:
667 	rcu_read_unlock();
668 	return info;
669 }
670 EXPORT_SYMBOL(tls_get_record);
671 
672 static int tls_device_push_pending_record(struct sock *sk, int flags)
673 {
674 	struct iov_iter iter;
675 
676 	iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
677 	return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
678 }
679 
680 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
681 {
682 	if (tls_is_partially_sent_record(ctx)) {
683 		gfp_t sk_allocation = sk->sk_allocation;
684 
685 		WARN_ON_ONCE(sk->sk_write_pending);
686 
687 		sk->sk_allocation = GFP_ATOMIC;
688 		tls_push_partial_record(sk, ctx,
689 					MSG_DONTWAIT | MSG_NOSIGNAL |
690 					MSG_SENDPAGE_DECRYPTED);
691 		sk->sk_allocation = sk_allocation;
692 	}
693 }
694 
695 static void tls_device_resync_rx(struct tls_context *tls_ctx,
696 				 struct sock *sk, u32 seq, u8 *rcd_sn)
697 {
698 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
699 	struct net_device *netdev;
700 
701 	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
702 	rcu_read_lock();
703 	netdev = rcu_dereference(tls_ctx->netdev);
704 	if (netdev)
705 		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
706 						   TLS_OFFLOAD_CTX_DIR_RX);
707 	rcu_read_unlock();
708 	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
709 }
710 
711 static bool
712 tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
713 			   s64 resync_req, u32 *seq, u16 *rcd_delta)
714 {
715 	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
716 	u32 req_seq = resync_req >> 32;
717 	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
718 	u16 i;
719 
720 	*rcd_delta = 0;
721 
722 	if (is_async) {
723 		/* shouldn't get to wraparound:
724 		 * too long in async stage, something bad happened
725 		 */
726 		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
727 			return false;
728 
729 		/* asynchronous stage: log all headers seq such that
730 		 * req_seq <= seq <= end_seq, and wait for real resync request
731 		 */
732 		if (before(*seq, req_seq))
733 			return false;
734 		if (!after(*seq, req_end) &&
735 		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
736 			resync_async->log[resync_async->loglen++] = *seq;
737 
738 		resync_async->rcd_delta++;
739 
740 		return false;
741 	}
742 
743 	/* synchronous stage: check against the logged entries and
744 	 * proceed to check the next entries if no match was found
745 	 */
746 	for (i = 0; i < resync_async->loglen; i++)
747 		if (req_seq == resync_async->log[i] &&
748 		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
749 			*rcd_delta = resync_async->rcd_delta - i;
750 			*seq = req_seq;
751 			resync_async->loglen = 0;
752 			resync_async->rcd_delta = 0;
753 			return true;
754 		}
755 
756 	resync_async->loglen = 0;
757 	resync_async->rcd_delta = 0;
758 
759 	if (req_seq == *seq &&
760 	    atomic64_try_cmpxchg(&resync_async->req,
761 				 &resync_req, 0))
762 		return true;
763 
764 	return false;
765 }
766 
767 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
768 {
769 	struct tls_context *tls_ctx = tls_get_ctx(sk);
770 	struct tls_offload_context_rx *rx_ctx;
771 	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
772 	u32 sock_data, is_req_pending;
773 	struct tls_prot_info *prot;
774 	s64 resync_req;
775 	u16 rcd_delta;
776 	u32 req_seq;
777 
778 	if (tls_ctx->rx_conf != TLS_HW)
779 		return;
780 	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
781 		return;
782 
783 	prot = &tls_ctx->prot_info;
784 	rx_ctx = tls_offload_ctx_rx(tls_ctx);
785 	memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
786 
787 	switch (rx_ctx->resync_type) {
788 	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
789 		resync_req = atomic64_read(&rx_ctx->resync_req);
790 		req_seq = resync_req >> 32;
791 		seq += TLS_HEADER_SIZE - 1;
792 		is_req_pending = resync_req;
793 
794 		if (likely(!is_req_pending) || req_seq != seq ||
795 		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
796 			return;
797 		break;
798 	case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
799 		if (likely(!rx_ctx->resync_nh_do_now))
800 			return;
801 
802 		/* head of next rec is already in, note that the sock_inq will
803 		 * include the currently parsed message when called from parser
804 		 */
805 		sock_data = tcp_inq(sk);
806 		if (sock_data > rcd_len) {
807 			trace_tls_device_rx_resync_nh_delay(sk, sock_data,
808 							    rcd_len);
809 			return;
810 		}
811 
812 		rx_ctx->resync_nh_do_now = 0;
813 		seq += rcd_len;
814 		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
815 		break;
816 	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
817 		resync_req = atomic64_read(&rx_ctx->resync_async->req);
818 		is_req_pending = resync_req;
819 		if (likely(!is_req_pending))
820 			return;
821 
822 		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
823 						resync_req, &seq, &rcd_delta))
824 			return;
825 		tls_bigint_subtract(rcd_sn, rcd_delta);
826 		break;
827 	}
828 
829 	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
830 }
831 
832 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
833 					   struct tls_offload_context_rx *ctx,
834 					   struct sock *sk, struct sk_buff *skb)
835 {
836 	struct strp_msg *rxm;
837 
838 	/* device will request resyncs by itself based on stream scan */
839 	if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
840 		return;
841 	/* already scheduled */
842 	if (ctx->resync_nh_do_now)
843 		return;
844 	/* seen decrypted fragments since last fully-failed record */
845 	if (ctx->resync_nh_reset) {
846 		ctx->resync_nh_reset = 0;
847 		ctx->resync_nh.decrypted_failed = 1;
848 		ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
849 		return;
850 	}
851 
852 	if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
853 		return;
854 
855 	/* doing resync, bump the next target in case it fails */
856 	if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
857 		ctx->resync_nh.decrypted_tgt *= 2;
858 	else
859 		ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
860 
861 	rxm = strp_msg(skb);
862 
863 	/* head of next rec is already in, parser will sync for us */
864 	if (tcp_inq(sk) > rxm->full_len) {
865 		trace_tls_device_rx_resync_nh_schedule(sk);
866 		ctx->resync_nh_do_now = 1;
867 	} else {
868 		struct tls_prot_info *prot = &tls_ctx->prot_info;
869 		u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
870 
871 		memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
872 		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
873 
874 		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
875 				     rcd_sn);
876 	}
877 }
878 
879 static int
880 tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
881 {
882 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
883 	const struct tls_cipher_desc *cipher_desc;
884 	int err, offset, copy, data_len, pos;
885 	struct sk_buff *skb, *skb_iter;
886 	struct scatterlist sg[1];
887 	struct strp_msg *rxm;
888 	char *orig_buf, *buf;
889 
890 	cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
891 	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
892 
893 	rxm = strp_msg(tls_strp_msg(sw_ctx));
894 	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
895 			   sk->sk_allocation);
896 	if (!orig_buf)
897 		return -ENOMEM;
898 	buf = orig_buf;
899 
900 	err = tls_strp_msg_cow(sw_ctx);
901 	if (unlikely(err))
902 		goto free_buf;
903 
904 	skb = tls_strp_msg(sw_ctx);
905 	rxm = strp_msg(skb);
906 	offset = rxm->offset;
907 
908 	sg_init_table(sg, 1);
909 	sg_set_buf(&sg[0], buf,
910 		   rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
911 	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
912 	if (err)
913 		goto free_buf;
914 
915 	/* We are interested only in the decrypted data not the auth */
916 	err = decrypt_skb(sk, sg);
917 	if (err != -EBADMSG)
918 		goto free_buf;
919 	else
920 		err = 0;
921 
922 	data_len = rxm->full_len - cipher_desc->tag;
923 
924 	if (skb_pagelen(skb) > offset) {
925 		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
926 
927 		if (skb->decrypted) {
928 			err = skb_store_bits(skb, offset, buf, copy);
929 			if (err)
930 				goto free_buf;
931 		}
932 
933 		offset += copy;
934 		buf += copy;
935 	}
936 
937 	pos = skb_pagelen(skb);
938 	skb_walk_frags(skb, skb_iter) {
939 		int frag_pos;
940 
941 		/* Practically all frags must belong to msg if reencrypt
942 		 * is needed with current strparser and coalescing logic,
943 		 * but strparser may "get optimized", so let's be safe.
944 		 */
945 		if (pos + skb_iter->len <= offset)
946 			goto done_with_frag;
947 		if (pos >= data_len + rxm->offset)
948 			break;
949 
950 		frag_pos = offset - pos;
951 		copy = min_t(int, skb_iter->len - frag_pos,
952 			     data_len + rxm->offset - offset);
953 
954 		if (skb_iter->decrypted) {
955 			err = skb_store_bits(skb_iter, frag_pos, buf, copy);
956 			if (err)
957 				goto free_buf;
958 		}
959 
960 		offset += copy;
961 		buf += copy;
962 done_with_frag:
963 		pos += skb_iter->len;
964 	}
965 
966 free_buf:
967 	kfree(orig_buf);
968 	return err;
969 }
970 
971 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
972 {
973 	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
974 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
975 	struct sk_buff *skb = tls_strp_msg(sw_ctx);
976 	struct strp_msg *rxm = strp_msg(skb);
977 	int is_decrypted, is_encrypted;
978 
979 	if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
980 		is_decrypted = skb->decrypted;
981 		is_encrypted = !is_decrypted;
982 	} else {
983 		is_decrypted = 0;
984 		is_encrypted = 0;
985 	}
986 
987 	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
988 				   tls_ctx->rx.rec_seq, rxm->full_len,
989 				   is_encrypted, is_decrypted);
990 
991 	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
992 		if (likely(is_encrypted || is_decrypted))
993 			return is_decrypted;
994 
995 		/* After tls_device_down disables the offload, the next SKB will
996 		 * likely have initial fragments decrypted, and final ones not
997 		 * decrypted. We need to reencrypt that single SKB.
998 		 */
999 		return tls_device_reencrypt(sk, tls_ctx);
1000 	}
1001 
1002 	/* Return immediately if the record is either entirely plaintext or
1003 	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1004 	 * record.
1005 	 */
1006 	if (is_decrypted) {
1007 		ctx->resync_nh_reset = 1;
1008 		return is_decrypted;
1009 	}
1010 	if (is_encrypted) {
1011 		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1012 		return 0;
1013 	}
1014 
1015 	ctx->resync_nh_reset = 1;
1016 	return tls_device_reencrypt(sk, tls_ctx);
1017 }
1018 
1019 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1020 			      struct net_device *netdev)
1021 {
1022 	if (sk->sk_destruct != tls_device_sk_destruct) {
1023 		refcount_set(&ctx->refcount, 1);
1024 		dev_hold(netdev);
1025 		RCU_INIT_POINTER(ctx->netdev, netdev);
1026 		spin_lock_irq(&tls_device_lock);
1027 		list_add_tail(&ctx->list, &tls_device_list);
1028 		spin_unlock_irq(&tls_device_lock);
1029 
1030 		ctx->sk_destruct = sk->sk_destruct;
1031 		smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1032 	}
1033 }
1034 
1035 static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *ctx)
1036 {
1037 	struct tls_offload_context_tx *offload_ctx;
1038 	__be64 rcd_sn;
1039 
1040 	offload_ctx = kzalloc(sizeof(*offload_ctx), GFP_KERNEL);
1041 	if (!offload_ctx)
1042 		return NULL;
1043 
1044 	INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1045 	INIT_LIST_HEAD(&offload_ctx->records_list);
1046 	spin_lock_init(&offload_ctx->lock);
1047 	sg_init_table(offload_ctx->sg_tx_data,
1048 		      ARRAY_SIZE(offload_ctx->sg_tx_data));
1049 
1050 	/* start at rec_seq - 1 to account for the start marker record */
1051 	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1052 	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1053 
1054 	offload_ctx->ctx = ctx;
1055 
1056 	return offload_ctx;
1057 }
1058 
1059 int tls_set_device_offload(struct sock *sk)
1060 {
1061 	struct tls_record_info *start_marker_record;
1062 	struct tls_offload_context_tx *offload_ctx;
1063 	const struct tls_cipher_desc *cipher_desc;
1064 	struct tls_crypto_info *crypto_info;
1065 	struct tls_prot_info *prot;
1066 	struct net_device *netdev;
1067 	struct tls_context *ctx;
1068 	char *iv, *rec_seq;
1069 	int rc;
1070 
1071 	ctx = tls_get_ctx(sk);
1072 	prot = &ctx->prot_info;
1073 
1074 	if (ctx->priv_ctx_tx)
1075 		return -EEXIST;
1076 
1077 	netdev = get_netdev_for_sock(sk);
1078 	if (!netdev) {
1079 		pr_err_ratelimited("%s: netdev not found\n", __func__);
1080 		return -EINVAL;
1081 	}
1082 
1083 	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1084 		rc = -EOPNOTSUPP;
1085 		goto release_netdev;
1086 	}
1087 
1088 	crypto_info = &ctx->crypto_send.info;
1089 	if (crypto_info->version != TLS_1_2_VERSION) {
1090 		rc = -EOPNOTSUPP;
1091 		goto release_netdev;
1092 	}
1093 
1094 	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
1095 	if (!cipher_desc || !cipher_desc->offloadable) {
1096 		rc = -EINVAL;
1097 		goto release_netdev;
1098 	}
1099 
1100 	rc = init_prot_info(prot, crypto_info, cipher_desc);
1101 	if (rc)
1102 		goto release_netdev;
1103 
1104 	iv = crypto_info_iv(crypto_info, cipher_desc);
1105 	rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
1106 
1107 	memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
1108 	memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq);
1109 
1110 	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1111 	if (!start_marker_record) {
1112 		rc = -ENOMEM;
1113 		goto release_netdev;
1114 	}
1115 
1116 	offload_ctx = alloc_offload_ctx_tx(ctx);
1117 	if (!offload_ctx) {
1118 		rc = -ENOMEM;
1119 		goto free_marker_record;
1120 	}
1121 
1122 	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1123 	if (rc)
1124 		goto free_offload_ctx;
1125 
1126 	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1127 	start_marker_record->len = 0;
1128 	start_marker_record->num_frags = 0;
1129 	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1130 
1131 	clean_acked_data_enable(tcp_sk(sk), &tls_tcp_clean_acked);
1132 	ctx->push_pending_record = tls_device_push_pending_record;
1133 
1134 	/* TLS offload is greatly simplified if we don't send
1135 	 * SKBs where only part of the payload needs to be encrypted.
1136 	 * So mark the last skb in the write queue as end of record.
1137 	 */
1138 	tcp_write_collapse_fence(sk);
1139 
1140 	/* Avoid offloading if the device is down
1141 	 * We don't want to offload new flows after
1142 	 * the NETDEV_DOWN event
1143 	 *
1144 	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1145 	 * handler thus protecting from the device going down before
1146 	 * ctx was added to tls_device_list.
1147 	 */
1148 	down_read(&device_offload_lock);
1149 	if (!(netdev->flags & IFF_UP)) {
1150 		rc = -EINVAL;
1151 		goto release_lock;
1152 	}
1153 
1154 	ctx->priv_ctx_tx = offload_ctx;
1155 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1156 					     &ctx->crypto_send.info,
1157 					     tcp_sk(sk)->write_seq);
1158 	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1159 				     tcp_sk(sk)->write_seq, rec_seq, rc);
1160 	if (rc)
1161 		goto release_lock;
1162 
1163 	tls_device_attach(ctx, sk, netdev);
1164 	up_read(&device_offload_lock);
1165 
1166 	/* following this assignment tls_is_skb_tx_device_offloaded
1167 	 * will return true and the context might be accessed
1168 	 * by the netdev's xmit function.
1169 	 */
1170 	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1171 	dev_put(netdev);
1172 
1173 	return 0;
1174 
1175 release_lock:
1176 	up_read(&device_offload_lock);
1177 	clean_acked_data_disable(tcp_sk(sk));
1178 	crypto_free_aead(offload_ctx->aead_send);
1179 free_offload_ctx:
1180 	kfree(offload_ctx);
1181 	ctx->priv_ctx_tx = NULL;
1182 free_marker_record:
1183 	kfree(start_marker_record);
1184 release_netdev:
1185 	dev_put(netdev);
1186 	return rc;
1187 }
1188 
1189 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1190 {
1191 	struct tls12_crypto_info_aes_gcm_128 *info;
1192 	struct tls_offload_context_rx *context;
1193 	struct net_device *netdev;
1194 	int rc = 0;
1195 
1196 	if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1197 		return -EOPNOTSUPP;
1198 
1199 	netdev = get_netdev_for_sock(sk);
1200 	if (!netdev) {
1201 		pr_err_ratelimited("%s: netdev not found\n", __func__);
1202 		return -EINVAL;
1203 	}
1204 
1205 	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1206 		rc = -EOPNOTSUPP;
1207 		goto release_netdev;
1208 	}
1209 
1210 	/* Avoid offloading if the device is down
1211 	 * We don't want to offload new flows after
1212 	 * the NETDEV_DOWN event
1213 	 *
1214 	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1215 	 * handler thus protecting from the device going down before
1216 	 * ctx was added to tls_device_list.
1217 	 */
1218 	down_read(&device_offload_lock);
1219 	if (!(netdev->flags & IFF_UP)) {
1220 		rc = -EINVAL;
1221 		goto release_lock;
1222 	}
1223 
1224 	context = kzalloc(sizeof(*context), GFP_KERNEL);
1225 	if (!context) {
1226 		rc = -ENOMEM;
1227 		goto release_lock;
1228 	}
1229 	context->resync_nh_reset = 1;
1230 
1231 	ctx->priv_ctx_rx = context;
1232 	rc = tls_set_sw_offload(sk, 0, NULL);
1233 	if (rc)
1234 		goto release_ctx;
1235 
1236 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1237 					     &ctx->crypto_recv.info,
1238 					     tcp_sk(sk)->copied_seq);
1239 	info = (void *)&ctx->crypto_recv.info;
1240 	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1241 				     tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1242 	if (rc)
1243 		goto free_sw_resources;
1244 
1245 	tls_device_attach(ctx, sk, netdev);
1246 	up_read(&device_offload_lock);
1247 
1248 	dev_put(netdev);
1249 
1250 	return 0;
1251 
1252 free_sw_resources:
1253 	up_read(&device_offload_lock);
1254 	tls_sw_free_resources_rx(sk);
1255 	down_read(&device_offload_lock);
1256 release_ctx:
1257 	ctx->priv_ctx_rx = NULL;
1258 release_lock:
1259 	up_read(&device_offload_lock);
1260 release_netdev:
1261 	dev_put(netdev);
1262 	return rc;
1263 }
1264 
1265 void tls_device_offload_cleanup_rx(struct sock *sk)
1266 {
1267 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1268 	struct net_device *netdev;
1269 
1270 	down_read(&device_offload_lock);
1271 	netdev = rcu_dereference_protected(tls_ctx->netdev,
1272 					   lockdep_is_held(&device_offload_lock));
1273 	if (!netdev)
1274 		goto out;
1275 
1276 	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1277 					TLS_OFFLOAD_CTX_DIR_RX);
1278 
1279 	if (tls_ctx->tx_conf != TLS_HW) {
1280 		dev_put(netdev);
1281 		rcu_assign_pointer(tls_ctx->netdev, NULL);
1282 	} else {
1283 		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1284 	}
1285 out:
1286 	up_read(&device_offload_lock);
1287 	tls_sw_release_resources_rx(sk);
1288 }
1289 
1290 static int tls_device_down(struct net_device *netdev)
1291 {
1292 	struct tls_context *ctx, *tmp;
1293 	unsigned long flags;
1294 	LIST_HEAD(list);
1295 
1296 	/* Request a write lock to block new offload attempts */
1297 	down_write(&device_offload_lock);
1298 
1299 	spin_lock_irqsave(&tls_device_lock, flags);
1300 	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1301 		struct net_device *ctx_netdev =
1302 			rcu_dereference_protected(ctx->netdev,
1303 						  lockdep_is_held(&device_offload_lock));
1304 
1305 		if (ctx_netdev != netdev ||
1306 		    !refcount_inc_not_zero(&ctx->refcount))
1307 			continue;
1308 
1309 		list_move(&ctx->list, &list);
1310 	}
1311 	spin_unlock_irqrestore(&tls_device_lock, flags);
1312 
1313 	list_for_each_entry_safe(ctx, tmp, &list, list)	{
1314 		/* Stop offloaded TX and switch to the fallback.
1315 		 * tls_is_skb_tx_device_offloaded will return false.
1316 		 */
1317 		WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1318 
1319 		/* Stop the RX and TX resync.
1320 		 * tls_dev_resync must not be called after tls_dev_del.
1321 		 */
1322 		rcu_assign_pointer(ctx->netdev, NULL);
1323 
1324 		/* Start skipping the RX resync logic completely. */
1325 		set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1326 
1327 		/* Sync with inflight packets. After this point:
1328 		 * TX: no non-encrypted packets will be passed to the driver.
1329 		 * RX: resync requests from the driver will be ignored.
1330 		 */
1331 		synchronize_net();
1332 
1333 		/* Release the offload context on the driver side. */
1334 		if (ctx->tx_conf == TLS_HW)
1335 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1336 							TLS_OFFLOAD_CTX_DIR_TX);
1337 		if (ctx->rx_conf == TLS_HW &&
1338 		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1339 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1340 							TLS_OFFLOAD_CTX_DIR_RX);
1341 
1342 		dev_put(netdev);
1343 
1344 		/* Move the context to a separate list for two reasons:
1345 		 * 1. When the context is deallocated, list_del is called.
1346 		 * 2. It's no longer an offloaded context, so we don't want to
1347 		 *    run offload-specific code on this context.
1348 		 */
1349 		spin_lock_irqsave(&tls_device_lock, flags);
1350 		list_move_tail(&ctx->list, &tls_device_down_list);
1351 		spin_unlock_irqrestore(&tls_device_lock, flags);
1352 
1353 		/* Device contexts for RX and TX will be freed in on sk_destruct
1354 		 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1355 		 * Now release the ref taken above.
1356 		 */
1357 		if (refcount_dec_and_test(&ctx->refcount)) {
1358 			/* sk_destruct ran after tls_device_down took a ref, and
1359 			 * it returned early. Complete the destruction here.
1360 			 */
1361 			list_del(&ctx->list);
1362 			tls_device_free_ctx(ctx);
1363 		}
1364 	}
1365 
1366 	up_write(&device_offload_lock);
1367 
1368 	flush_workqueue(destruct_wq);
1369 
1370 	return NOTIFY_DONE;
1371 }
1372 
1373 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1374 			 void *ptr)
1375 {
1376 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1377 
1378 	if (!dev->tlsdev_ops &&
1379 	    !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1380 		return NOTIFY_DONE;
1381 
1382 	switch (event) {
1383 	case NETDEV_REGISTER:
1384 	case NETDEV_FEAT_CHANGE:
1385 		if (netif_is_bond_master(dev))
1386 			return NOTIFY_DONE;
1387 		if ((dev->features & NETIF_F_HW_TLS_RX) &&
1388 		    !dev->tlsdev_ops->tls_dev_resync)
1389 			return NOTIFY_BAD;
1390 
1391 		if  (dev->tlsdev_ops &&
1392 		     dev->tlsdev_ops->tls_dev_add &&
1393 		     dev->tlsdev_ops->tls_dev_del)
1394 			return NOTIFY_DONE;
1395 		else
1396 			return NOTIFY_BAD;
1397 	case NETDEV_DOWN:
1398 		return tls_device_down(dev);
1399 	}
1400 	return NOTIFY_DONE;
1401 }
1402 
1403 static struct notifier_block tls_dev_notifier = {
1404 	.notifier_call	= tls_dev_event,
1405 };
1406 
1407 int __init tls_device_init(void)
1408 {
1409 	int err;
1410 
1411 	dummy_page = alloc_page(GFP_KERNEL);
1412 	if (!dummy_page)
1413 		return -ENOMEM;
1414 
1415 	destruct_wq = alloc_workqueue("ktls_device_destruct", WQ_PERCPU, 0);
1416 	if (!destruct_wq) {
1417 		err = -ENOMEM;
1418 		goto err_free_dummy;
1419 	}
1420 
1421 	err = register_netdevice_notifier(&tls_dev_notifier);
1422 	if (err)
1423 		goto err_destroy_wq;
1424 
1425 	return 0;
1426 
1427 err_destroy_wq:
1428 	destroy_workqueue(destruct_wq);
1429 err_free_dummy:
1430 	put_page(dummy_page);
1431 	return err;
1432 }
1433 
1434 void __exit tls_device_cleanup(void)
1435 {
1436 	unregister_netdevice_notifier(&tls_dev_notifier);
1437 	destroy_workqueue(destruct_wq);
1438 	clean_acked_data_flush();
1439 	put_page(dummy_page);
1440 }
1441