xref: /linux/net/tls/tls_device.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1e8f69799SIlya Lesokhin /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2e8f69799SIlya Lesokhin  *
3e8f69799SIlya Lesokhin  * This software is available to you under a choice of one of two
4e8f69799SIlya Lesokhin  * licenses.  You may choose to be licensed under the terms of the GNU
5e8f69799SIlya Lesokhin  * General Public License (GPL) Version 2, available from the file
6e8f69799SIlya Lesokhin  * COPYING in the main directory of this source tree, or the
7e8f69799SIlya Lesokhin  * OpenIB.org BSD license below:
8e8f69799SIlya Lesokhin  *
9e8f69799SIlya Lesokhin  *     Redistribution and use in source and binary forms, with or
10e8f69799SIlya Lesokhin  *     without modification, are permitted provided that the following
11e8f69799SIlya Lesokhin  *     conditions are met:
12e8f69799SIlya Lesokhin  *
13e8f69799SIlya Lesokhin  *      - Redistributions of source code must retain the above
14e8f69799SIlya Lesokhin  *        copyright notice, this list of conditions and the following
15e8f69799SIlya Lesokhin  *        disclaimer.
16e8f69799SIlya Lesokhin  *
17e8f69799SIlya Lesokhin  *      - Redistributions in binary form must reproduce the above
18e8f69799SIlya Lesokhin  *        copyright notice, this list of conditions and the following
19e8f69799SIlya Lesokhin  *        disclaimer in the documentation and/or other materials
20e8f69799SIlya Lesokhin  *        provided with the distribution.
21e8f69799SIlya Lesokhin  *
22e8f69799SIlya Lesokhin  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23e8f69799SIlya Lesokhin  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24e8f69799SIlya Lesokhin  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25e8f69799SIlya Lesokhin  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26e8f69799SIlya Lesokhin  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27e8f69799SIlya Lesokhin  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28e8f69799SIlya Lesokhin  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29e8f69799SIlya Lesokhin  * SOFTWARE.
30e8f69799SIlya Lesokhin  */
31e8f69799SIlya Lesokhin 
32e8f69799SIlya Lesokhin #include <crypto/aead.h>
33e8f69799SIlya Lesokhin #include <linux/highmem.h>
34e8f69799SIlya Lesokhin #include <linux/module.h>
35e8f69799SIlya Lesokhin #include <linux/netdevice.h>
36e8f69799SIlya Lesokhin #include <net/dst.h>
37e8f69799SIlya Lesokhin #include <net/inet_connection_sock.h>
38e8f69799SIlya Lesokhin #include <net/tcp.h>
39e8f69799SIlya Lesokhin #include <net/tls.h>
40f6d827b1SMina Almasry #include <linux/skbuff_ref.h>
41e8f69799SIlya Lesokhin 
4258790314SJakub Kicinski #include "tls.h"
438538d29cSJakub Kicinski #include "trace.h"
448538d29cSJakub Kicinski 
45e8f69799SIlya Lesokhin /* device_offload_lock is used to synchronize tls_dev_add
46e8f69799SIlya Lesokhin  * against NETDEV_DOWN notifications.
47e8f69799SIlya Lesokhin  */
48e8f69799SIlya Lesokhin static DECLARE_RWSEM(device_offload_lock);
49e8f69799SIlya Lesokhin 
507adc91e0STariq Toukan static struct workqueue_struct *destruct_wq __read_mostly;
51e8f69799SIlya Lesokhin 
52e8f69799SIlya Lesokhin static LIST_HEAD(tls_device_list);
53c55dcdd4SMaxim Mikityanskiy static LIST_HEAD(tls_device_down_list);
54e8f69799SIlya Lesokhin static DEFINE_SPINLOCK(tls_device_lock);
55e8f69799SIlya Lesokhin 
566b47808fSJakub Kicinski static struct page *dummy_page;
576b47808fSJakub Kicinski 
tls_device_free_ctx(struct tls_context * ctx)58e8f69799SIlya Lesokhin static void tls_device_free_ctx(struct tls_context *ctx)
59e8f69799SIlya Lesokhin {
601c1cb311SSabrina Dubroca 	if (ctx->tx_conf == TLS_HW)
61d80a1b9dSBoris Pismenny 		kfree(tls_offload_ctx_tx(ctx));
62e8f69799SIlya Lesokhin 
634799ac81SBoris Pismenny 	if (ctx->rx_conf == TLS_HW)
644799ac81SBoris Pismenny 		kfree(tls_offload_ctx_rx(ctx));
654799ac81SBoris Pismenny 
6615a7dea7SJakub Kicinski 	tls_ctx_free(NULL, ctx);
67e8f69799SIlya Lesokhin }
68e8f69799SIlya Lesokhin 
tls_device_tx_del_task(struct work_struct * work)697adc91e0STariq Toukan static void tls_device_tx_del_task(struct work_struct *work)
70e8f69799SIlya Lesokhin {
717adc91e0STariq Toukan 	struct tls_offload_context_tx *offload_ctx =
727adc91e0STariq Toukan 		container_of(work, struct tls_offload_context_tx, destruct_work);
737adc91e0STariq Toukan 	struct tls_context *ctx = offload_ctx->ctx;
7494ce3b64SMaxim Mikityanskiy 	struct net_device *netdev;
7594ce3b64SMaxim Mikityanskiy 
7694ce3b64SMaxim Mikityanskiy 	/* Safe, because this is the destroy flow, refcount is 0, so
7794ce3b64SMaxim Mikityanskiy 	 * tls_device_down can't store this field in parallel.
7894ce3b64SMaxim Mikityanskiy 	 */
7994ce3b64SMaxim Mikityanskiy 	netdev = rcu_dereference_protected(ctx->netdev,
8094ce3b64SMaxim Mikityanskiy 					   !refcount_read(&ctx->refcount));
81e8f69799SIlya Lesokhin 
827adc91e0STariq Toukan 	netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
83e8f69799SIlya Lesokhin 	dev_put(netdev);
844799ac81SBoris Pismenny 	ctx->netdev = NULL;
85e8f69799SIlya Lesokhin 	tls_device_free_ctx(ctx);
86e8f69799SIlya Lesokhin }
87e8f69799SIlya Lesokhin 
tls_device_queue_ctx_destruction(struct tls_context * ctx)88e8f69799SIlya Lesokhin static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
89e8f69799SIlya Lesokhin {
9094ce3b64SMaxim Mikityanskiy 	struct net_device *netdev;
91e8f69799SIlya Lesokhin 	unsigned long flags;
92113671b2STariq Toukan 	bool async_cleanup;
93e8f69799SIlya Lesokhin 
94e8f69799SIlya Lesokhin 	spin_lock_irqsave(&tls_device_lock, flags);
95113671b2STariq Toukan 	if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
96113671b2STariq Toukan 		spin_unlock_irqrestore(&tls_device_lock, flags);
97113671b2STariq Toukan 		return;
98113671b2STariq Toukan 	}
99f08d8c1bSTariq Toukan 
1007adc91e0STariq Toukan 	list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
10194ce3b64SMaxim Mikityanskiy 
10294ce3b64SMaxim Mikityanskiy 	/* Safe, because this is the destroy flow, refcount is 0, so
10394ce3b64SMaxim Mikityanskiy 	 * tls_device_down can't store this field in parallel.
10494ce3b64SMaxim Mikityanskiy 	 */
10594ce3b64SMaxim Mikityanskiy 	netdev = rcu_dereference_protected(ctx->netdev,
10694ce3b64SMaxim Mikityanskiy 					   !refcount_read(&ctx->refcount));
10794ce3b64SMaxim Mikityanskiy 
10894ce3b64SMaxim Mikityanskiy 	async_cleanup = netdev && ctx->tx_conf == TLS_HW;
109113671b2STariq Toukan 	if (async_cleanup) {
1107adc91e0STariq Toukan 		struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
111e8f69799SIlya Lesokhin 
1127adc91e0STariq Toukan 		/* queue_work inside the spinlock
113e8f69799SIlya Lesokhin 		 * to make sure tls_device_down waits for that work.
114e8f69799SIlya Lesokhin 		 */
1157adc91e0STariq Toukan 		queue_work(destruct_wq, &offload_ctx->destruct_work);
116113671b2STariq Toukan 	}
117e8f69799SIlya Lesokhin 	spin_unlock_irqrestore(&tls_device_lock, flags);
118113671b2STariq Toukan 
119113671b2STariq Toukan 	if (!async_cleanup)
120113671b2STariq Toukan 		tls_device_free_ctx(ctx);
121e8f69799SIlya Lesokhin }
122e8f69799SIlya Lesokhin 
123e8f69799SIlya Lesokhin /* We assume that the socket is already connected */
get_netdev_for_sock(struct sock * sk)124e8f69799SIlya Lesokhin static struct net_device *get_netdev_for_sock(struct sock *sk)
125e8f69799SIlya Lesokhin {
126e8f69799SIlya Lesokhin 	struct dst_entry *dst = sk_dst_get(sk);
127e8f69799SIlya Lesokhin 	struct net_device *netdev = NULL;
128e8f69799SIlya Lesokhin 
129e8f69799SIlya Lesokhin 	if (likely(dst)) {
130153cbd13STariq Toukan 		netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
131e8f69799SIlya Lesokhin 		dev_hold(netdev);
132e8f69799SIlya Lesokhin 	}
133e8f69799SIlya Lesokhin 
134e8f69799SIlya Lesokhin 	dst_release(dst);
135e8f69799SIlya Lesokhin 
136e8f69799SIlya Lesokhin 	return netdev;
137e8f69799SIlya Lesokhin }
138e8f69799SIlya Lesokhin 
destroy_record(struct tls_record_info * record)139e8f69799SIlya Lesokhin static void destroy_record(struct tls_record_info *record)
140e8f69799SIlya Lesokhin {
1417ccd4519SJakub Kicinski 	int i;
142e8f69799SIlya Lesokhin 
1437ccd4519SJakub Kicinski 	for (i = 0; i < record->num_frags; i++)
144c420c989SMatteo Croce 		__skb_frag_unref(&record->frags[i], false);
145e8f69799SIlya Lesokhin 	kfree(record);
146e8f69799SIlya Lesokhin }
147e8f69799SIlya Lesokhin 
delete_all_records(struct tls_offload_context_tx * offload_ctx)148d80a1b9dSBoris Pismenny static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
149e8f69799SIlya Lesokhin {
150e8f69799SIlya Lesokhin 	struct tls_record_info *info, *temp;
151e8f69799SIlya Lesokhin 
152e8f69799SIlya Lesokhin 	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
153e8f69799SIlya Lesokhin 		list_del(&info->list);
154e8f69799SIlya Lesokhin 		destroy_record(info);
155e8f69799SIlya Lesokhin 	}
156e8f69799SIlya Lesokhin 
157e8f69799SIlya Lesokhin 	offload_ctx->retransmit_hint = NULL;
158e8f69799SIlya Lesokhin }
159e8f69799SIlya Lesokhin 
tls_icsk_clean_acked(struct sock * sk,u32 acked_seq)160e8f69799SIlya Lesokhin static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
161e8f69799SIlya Lesokhin {
162e8f69799SIlya Lesokhin 	struct tls_context *tls_ctx = tls_get_ctx(sk);
163e8f69799SIlya Lesokhin 	struct tls_record_info *info, *temp;
164d80a1b9dSBoris Pismenny 	struct tls_offload_context_tx *ctx;
165e8f69799SIlya Lesokhin 	u64 deleted_records = 0;
166e8f69799SIlya Lesokhin 	unsigned long flags;
167e8f69799SIlya Lesokhin 
168e8f69799SIlya Lesokhin 	if (!tls_ctx)
169e8f69799SIlya Lesokhin 		return;
170e8f69799SIlya Lesokhin 
171d80a1b9dSBoris Pismenny 	ctx = tls_offload_ctx_tx(tls_ctx);
172e8f69799SIlya Lesokhin 
173e8f69799SIlya Lesokhin 	spin_lock_irqsave(&ctx->lock, flags);
174e8f69799SIlya Lesokhin 	info = ctx->retransmit_hint;
1756e3d02b6SJakub Kicinski 	if (info && !before(acked_seq, info->end_seq))
176e8f69799SIlya Lesokhin 		ctx->retransmit_hint = NULL;
177e8f69799SIlya Lesokhin 
178e8f69799SIlya Lesokhin 	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
179e8f69799SIlya Lesokhin 		if (before(acked_seq, info->end_seq))
180e8f69799SIlya Lesokhin 			break;
181e8f69799SIlya Lesokhin 		list_del(&info->list);
182e8f69799SIlya Lesokhin 
183e8f69799SIlya Lesokhin 		destroy_record(info);
184e8f69799SIlya Lesokhin 		deleted_records++;
185e8f69799SIlya Lesokhin 	}
186e8f69799SIlya Lesokhin 
187e8f69799SIlya Lesokhin 	ctx->unacked_record_sn += deleted_records;
188e8f69799SIlya Lesokhin 	spin_unlock_irqrestore(&ctx->lock, flags);
189e8f69799SIlya Lesokhin }
190e8f69799SIlya Lesokhin 
191e8f69799SIlya Lesokhin /* At this point, there should be no references on this
192e8f69799SIlya Lesokhin  * socket and no in-flight SKBs associated with this
193e8f69799SIlya Lesokhin  * socket, so it is safe to free all the resources.
194e8f69799SIlya Lesokhin  */
tls_device_sk_destruct(struct sock * sk)1958d5a49e9SJakub Kicinski void tls_device_sk_destruct(struct sock *sk)
196e8f69799SIlya Lesokhin {
197e8f69799SIlya Lesokhin 	struct tls_context *tls_ctx = tls_get_ctx(sk);
198d80a1b9dSBoris Pismenny 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
199e8f69799SIlya Lesokhin 
2004799ac81SBoris Pismenny 	tls_ctx->sk_destruct(sk);
2014799ac81SBoris Pismenny 
2024799ac81SBoris Pismenny 	if (tls_ctx->tx_conf == TLS_HW) {
203e8f69799SIlya Lesokhin 		if (ctx->open_record)
204e8f69799SIlya Lesokhin 			destroy_record(ctx->open_record);
205e8f69799SIlya Lesokhin 		delete_all_records(ctx);
206e8f69799SIlya Lesokhin 		crypto_free_aead(ctx->aead_send);
207e8f69799SIlya Lesokhin 		clean_acked_data_disable(inet_csk(sk));
2084799ac81SBoris Pismenny 	}
209e8f69799SIlya Lesokhin 
210e8f69799SIlya Lesokhin 	tls_device_queue_ctx_destruction(tls_ctx);
211e8f69799SIlya Lesokhin }
2128d5a49e9SJakub Kicinski EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
213e8f69799SIlya Lesokhin 
tls_device_free_resources_tx(struct sock * sk)21435b71a34SJakub Kicinski void tls_device_free_resources_tx(struct sock *sk)
21535b71a34SJakub Kicinski {
21635b71a34SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
21735b71a34SJakub Kicinski 
21835b71a34SJakub Kicinski 	tls_free_partial_record(sk, tls_ctx);
21935b71a34SJakub Kicinski }
22035b71a34SJakub Kicinski 
tls_offload_tx_resync_request(struct sock * sk,u32 got_seq,u32 exp_seq)2218538d29cSJakub Kicinski void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
2228538d29cSJakub Kicinski {
2238538d29cSJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2248538d29cSJakub Kicinski 
2258538d29cSJakub Kicinski 	trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
2268538d29cSJakub Kicinski 	WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
2278538d29cSJakub Kicinski }
2288538d29cSJakub Kicinski EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
2298538d29cSJakub Kicinski 
tls_device_resync_tx(struct sock * sk,struct tls_context * tls_ctx,u32 seq)23050180074SJakub Kicinski static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
23150180074SJakub Kicinski 				 u32 seq)
23250180074SJakub Kicinski {
23350180074SJakub Kicinski 	struct net_device *netdev;
234b5d9a834SDirk van der Merwe 	int err = 0;
23550180074SJakub Kicinski 	u8 *rcd_sn;
23650180074SJakub Kicinski 
237*1be68a87SJakub Kicinski 	tcp_write_collapse_fence(sk);
23850180074SJakub Kicinski 	rcd_sn = tls_ctx->tx.rec_seq;
23950180074SJakub Kicinski 
2408538d29cSJakub Kicinski 	trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
24150180074SJakub Kicinski 	down_read(&device_offload_lock);
24294ce3b64SMaxim Mikityanskiy 	netdev = rcu_dereference_protected(tls_ctx->netdev,
24394ce3b64SMaxim Mikityanskiy 					   lockdep_is_held(&device_offload_lock));
24450180074SJakub Kicinski 	if (netdev)
245b5d9a834SDirk van der Merwe 		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
246b5d9a834SDirk van der Merwe 							 rcd_sn,
24750180074SJakub Kicinski 							 TLS_OFFLOAD_CTX_DIR_TX);
24850180074SJakub Kicinski 	up_read(&device_offload_lock);
249b5d9a834SDirk van der Merwe 	if (err)
250b5d9a834SDirk van der Merwe 		return;
25150180074SJakub Kicinski 
25250180074SJakub Kicinski 	clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
25350180074SJakub Kicinski }
25450180074SJakub Kicinski 
tls_append_frag(struct tls_record_info * record,struct page_frag * pfrag,int size)255e8f69799SIlya Lesokhin static void tls_append_frag(struct tls_record_info *record,
256e8f69799SIlya Lesokhin 			    struct page_frag *pfrag,
257e8f69799SIlya Lesokhin 			    int size)
258e8f69799SIlya Lesokhin {
259e8f69799SIlya Lesokhin 	skb_frag_t *frag;
260e8f69799SIlya Lesokhin 
261e8f69799SIlya Lesokhin 	frag = &record->frags[record->num_frags - 1];
262d8e18a51SMatthew Wilcox (Oracle) 	if (skb_frag_page(frag) == pfrag->page &&
263b54c9d5bSJonathan Lemon 	    skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
264d8e18a51SMatthew Wilcox (Oracle) 		skb_frag_size_add(frag, size);
265e8f69799SIlya Lesokhin 	} else {
266e8f69799SIlya Lesokhin 		++frag;
267b51f4113SYunsheng Lin 		skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
268b51f4113SYunsheng Lin 					size);
269e8f69799SIlya Lesokhin 		++record->num_frags;
270e8f69799SIlya Lesokhin 		get_page(pfrag->page);
271e8f69799SIlya Lesokhin 	}
272e8f69799SIlya Lesokhin 
273e8f69799SIlya Lesokhin 	pfrag->offset += size;
274e8f69799SIlya Lesokhin 	record->len += size;
275e8f69799SIlya Lesokhin }
276e8f69799SIlya Lesokhin 
tls_push_record(struct sock * sk,struct tls_context * ctx,struct tls_offload_context_tx * offload_ctx,struct tls_record_info * record,int flags)277e8f69799SIlya Lesokhin static int tls_push_record(struct sock *sk,
278e8f69799SIlya Lesokhin 			   struct tls_context *ctx,
279d80a1b9dSBoris Pismenny 			   struct tls_offload_context_tx *offload_ctx,
280e8f69799SIlya Lesokhin 			   struct tls_record_info *record,
281e7b159a4SJakub Kicinski 			   int flags)
282e8f69799SIlya Lesokhin {
2834509de14SVakul Garg 	struct tls_prot_info *prot = &ctx->prot_info;
284e8f69799SIlya Lesokhin 	struct tcp_sock *tp = tcp_sk(sk);
285e8f69799SIlya Lesokhin 	skb_frag_t *frag;
286e8f69799SIlya Lesokhin 	int i;
287e8f69799SIlya Lesokhin 
288e8f69799SIlya Lesokhin 	record->end_seq = tp->write_seq + record->len;
289d4774ac0SJakub Kicinski 	list_add_tail_rcu(&record->list, &offload_ctx->records_list);
290e8f69799SIlya Lesokhin 	offload_ctx->open_record = NULL;
29150180074SJakub Kicinski 
29250180074SJakub Kicinski 	if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
29350180074SJakub Kicinski 		tls_device_resync_tx(sk, ctx, tp->write_seq);
29450180074SJakub Kicinski 
295fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &ctx->tx);
296e8f69799SIlya Lesokhin 
297e8f69799SIlya Lesokhin 	for (i = 0; i < record->num_frags; i++) {
298e8f69799SIlya Lesokhin 		frag = &record->frags[i];
299e8f69799SIlya Lesokhin 		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
300e8f69799SIlya Lesokhin 		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
301b54c9d5bSJonathan Lemon 			    skb_frag_size(frag), skb_frag_off(frag));
302d8e18a51SMatthew Wilcox (Oracle) 		sk_mem_charge(sk, skb_frag_size(frag));
303e8f69799SIlya Lesokhin 		get_page(skb_frag_page(frag));
304e8f69799SIlya Lesokhin 	}
305e8f69799SIlya Lesokhin 	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
306e8f69799SIlya Lesokhin 
307e8f69799SIlya Lesokhin 	/* all ready, send */
308e8f69799SIlya Lesokhin 	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
309e8f69799SIlya Lesokhin }
310e8f69799SIlya Lesokhin 
tls_device_record_close(struct sock * sk,struct tls_context * ctx,struct tls_record_info * record,struct page_frag * pfrag,unsigned char record_type)3116b47808fSJakub Kicinski static void tls_device_record_close(struct sock *sk,
312e7b159a4SJakub Kicinski 				    struct tls_context *ctx,
313e7b159a4SJakub Kicinski 				    struct tls_record_info *record,
314e7b159a4SJakub Kicinski 				    struct page_frag *pfrag,
315e7b159a4SJakub Kicinski 				    unsigned char record_type)
316e7b159a4SJakub Kicinski {
317e7b159a4SJakub Kicinski 	struct tls_prot_info *prot = &ctx->prot_info;
3186b47808fSJakub Kicinski 	struct page_frag dummy_tag_frag;
319e7b159a4SJakub Kicinski 
320e7b159a4SJakub Kicinski 	/* append tag
321e7b159a4SJakub Kicinski 	 * device will fill in the tag, we just need to append a placeholder
322e7b159a4SJakub Kicinski 	 * use socket memory to improve coalescing (re-using a single buffer
323e7b159a4SJakub Kicinski 	 * increases frag count)
3246b47808fSJakub Kicinski 	 * if we can't allocate memory now use the dummy page
325e7b159a4SJakub Kicinski 	 */
3266b47808fSJakub Kicinski 	if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
3276b47808fSJakub Kicinski 	    !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
3286b47808fSJakub Kicinski 		dummy_tag_frag.page = dummy_page;
3296b47808fSJakub Kicinski 		dummy_tag_frag.offset = 0;
3306b47808fSJakub Kicinski 		pfrag = &dummy_tag_frag;
331e7b159a4SJakub Kicinski 	}
3326b47808fSJakub Kicinski 	tls_append_frag(record, pfrag, prot->tag_size);
333e7b159a4SJakub Kicinski 
334e7b159a4SJakub Kicinski 	/* fill prepend */
335e7b159a4SJakub Kicinski 	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
336e7b159a4SJakub Kicinski 			 record->len - prot->overhead_size,
3376942a284SVadim Fedorenko 			 record_type);
338e7b159a4SJakub Kicinski }
339e7b159a4SJakub Kicinski 
tls_create_new_record(struct tls_offload_context_tx * offload_ctx,struct page_frag * pfrag,size_t prepend_size)340d80a1b9dSBoris Pismenny static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
341e8f69799SIlya Lesokhin 				 struct page_frag *pfrag,
342e8f69799SIlya Lesokhin 				 size_t prepend_size)
343e8f69799SIlya Lesokhin {
344e8f69799SIlya Lesokhin 	struct tls_record_info *record;
345e8f69799SIlya Lesokhin 	skb_frag_t *frag;
346e8f69799SIlya Lesokhin 
347e8f69799SIlya Lesokhin 	record = kmalloc(sizeof(*record), GFP_KERNEL);
348e8f69799SIlya Lesokhin 	if (!record)
349e8f69799SIlya Lesokhin 		return -ENOMEM;
350e8f69799SIlya Lesokhin 
351e8f69799SIlya Lesokhin 	frag = &record->frags[0];
352b51f4113SYunsheng Lin 	skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
353b51f4113SYunsheng Lin 				prepend_size);
354e8f69799SIlya Lesokhin 
355e8f69799SIlya Lesokhin 	get_page(pfrag->page);
356e8f69799SIlya Lesokhin 	pfrag->offset += prepend_size;
357e8f69799SIlya Lesokhin 
358e8f69799SIlya Lesokhin 	record->num_frags = 1;
359e8f69799SIlya Lesokhin 	record->len = prepend_size;
360e8f69799SIlya Lesokhin 	offload_ctx->open_record = record;
361e8f69799SIlya Lesokhin 	return 0;
362e8f69799SIlya Lesokhin }
363e8f69799SIlya Lesokhin 
tls_do_allocation(struct sock * sk,struct tls_offload_context_tx * offload_ctx,struct page_frag * pfrag,size_t prepend_size)364e8f69799SIlya Lesokhin static int tls_do_allocation(struct sock *sk,
365d80a1b9dSBoris Pismenny 			     struct tls_offload_context_tx *offload_ctx,
366e8f69799SIlya Lesokhin 			     struct page_frag *pfrag,
367e8f69799SIlya Lesokhin 			     size_t prepend_size)
368e8f69799SIlya Lesokhin {
369e8f69799SIlya Lesokhin 	int ret;
370e8f69799SIlya Lesokhin 
371e8f69799SIlya Lesokhin 	if (!offload_ctx->open_record) {
372e8f69799SIlya Lesokhin 		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
373e8f69799SIlya Lesokhin 						   sk->sk_allocation))) {
374d5bee737SJakub Sitnicki 			READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
375e8f69799SIlya Lesokhin 			sk_stream_moderate_sndbuf(sk);
376e8f69799SIlya Lesokhin 			return -ENOMEM;
377e8f69799SIlya Lesokhin 		}
378e8f69799SIlya Lesokhin 
379e8f69799SIlya Lesokhin 		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
380e8f69799SIlya Lesokhin 		if (ret)
381e8f69799SIlya Lesokhin 			return ret;
382e8f69799SIlya Lesokhin 
383e8f69799SIlya Lesokhin 		if (pfrag->size > pfrag->offset)
384e8f69799SIlya Lesokhin 			return 0;
385e8f69799SIlya Lesokhin 	}
386e8f69799SIlya Lesokhin 
387e8f69799SIlya Lesokhin 	if (!sk_page_frag_refill(sk, pfrag))
388e8f69799SIlya Lesokhin 		return -ENOMEM;
389e8f69799SIlya Lesokhin 
390e8f69799SIlya Lesokhin 	return 0;
391e8f69799SIlya Lesokhin }
392e8f69799SIlya Lesokhin 
tls_device_copy_data(void * addr,size_t bytes,struct iov_iter * i)393e681cc60SJakub Kicinski static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
394e681cc60SJakub Kicinski {
395e681cc60SJakub Kicinski 	size_t pre_copy, nocache;
396e681cc60SJakub Kicinski 
397e681cc60SJakub Kicinski 	pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
398e681cc60SJakub Kicinski 	if (pre_copy) {
399e681cc60SJakub Kicinski 		pre_copy = min(pre_copy, bytes);
400e681cc60SJakub Kicinski 		if (copy_from_iter(addr, pre_copy, i) != pre_copy)
401e681cc60SJakub Kicinski 			return -EFAULT;
402e681cc60SJakub Kicinski 		bytes -= pre_copy;
403e681cc60SJakub Kicinski 		addr += pre_copy;
404e681cc60SJakub Kicinski 	}
405e681cc60SJakub Kicinski 
406e681cc60SJakub Kicinski 	nocache = round_down(bytes, SMP_CACHE_BYTES);
407e681cc60SJakub Kicinski 	if (copy_from_iter_nocache(addr, nocache, i) != nocache)
408e681cc60SJakub Kicinski 		return -EFAULT;
409e681cc60SJakub Kicinski 	bytes -= nocache;
410e681cc60SJakub Kicinski 	addr += nocache;
411e681cc60SJakub Kicinski 
412e681cc60SJakub Kicinski 	if (bytes && copy_from_iter(addr, bytes, i) != bytes)
413e681cc60SJakub Kicinski 		return -EFAULT;
414e681cc60SJakub Kicinski 
415e681cc60SJakub Kicinski 	return 0;
416e681cc60SJakub Kicinski }
417e681cc60SJakub Kicinski 
tls_push_data(struct sock * sk,struct iov_iter * iter,size_t size,int flags,unsigned char record_type)418e8f69799SIlya Lesokhin static int tls_push_data(struct sock *sk,
4193dc8976cSDavid Howells 			 struct iov_iter *iter,
420e8f69799SIlya Lesokhin 			 size_t size, int flags,
4213dc8976cSDavid Howells 			 unsigned char record_type)
422e8f69799SIlya Lesokhin {
423e8f69799SIlya Lesokhin 	struct tls_context *tls_ctx = tls_get_ctx(sk);
4244509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
425d80a1b9dSBoris Pismenny 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
4263afef8c7SJiapeng Chong 	struct tls_record_info *record;
42741477662SJakub Kicinski 	int tls_push_record_flags;
428e8f69799SIlya Lesokhin 	struct page_frag *pfrag;
429e8f69799SIlya Lesokhin 	size_t orig_size = size;
430e8f69799SIlya Lesokhin 	u32 max_open_record_len;
431ea1dd3e9SRohit Maheshwari 	bool more = false;
432e8f69799SIlya Lesokhin 	bool done = false;
433ea1dd3e9SRohit Maheshwari 	int copy, rc = 0;
434e8f69799SIlya Lesokhin 	long timeo;
435e8f69799SIlya Lesokhin 
436e8f69799SIlya Lesokhin 	if (flags &
437c004b0e0SHannes Reinecke 	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
438c004b0e0SHannes Reinecke 	      MSG_SPLICE_PAGES | MSG_EOR))
4394a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
440e8f69799SIlya Lesokhin 
441c004b0e0SHannes Reinecke 	if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
442c004b0e0SHannes Reinecke 		return -EINVAL;
443c004b0e0SHannes Reinecke 
44493277b25SJakub Kicinski 	if (unlikely(sk->sk_err))
445e8f69799SIlya Lesokhin 		return -sk->sk_err;
446e8f69799SIlya Lesokhin 
44741477662SJakub Kicinski 	flags |= MSG_SENDPAGE_DECRYPTED;
448f8dd95b2SDavid Howells 	tls_push_record_flags = flags | MSG_MORE;
44941477662SJakub Kicinski 
450e8f69799SIlya Lesokhin 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
45194850257SBoris Pismenny 	if (tls_is_partially_sent_record(tls_ctx)) {
45294850257SBoris Pismenny 		rc = tls_push_partial_record(sk, tls_ctx, flags);
453e8f69799SIlya Lesokhin 		if (rc < 0)
454e8f69799SIlya Lesokhin 			return rc;
45594850257SBoris Pismenny 	}
456e8f69799SIlya Lesokhin 
457e8f69799SIlya Lesokhin 	pfrag = sk_page_frag(sk);
458e8f69799SIlya Lesokhin 
459e8f69799SIlya Lesokhin 	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
460e8f69799SIlya Lesokhin 	 * we need to leave room for an authentication tag.
461e8f69799SIlya Lesokhin 	 */
462e8f69799SIlya Lesokhin 	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
4634509de14SVakul Garg 			      prot->prepend_size;
464e8f69799SIlya Lesokhin 	do {
46534ef1ed1SJakub Kicinski 		rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
46634ef1ed1SJakub Kicinski 		if (unlikely(rc)) {
467e8f69799SIlya Lesokhin 			rc = sk_stream_wait_memory(sk, &timeo);
468e8f69799SIlya Lesokhin 			if (!rc)
469e8f69799SIlya Lesokhin 				continue;
470e8f69799SIlya Lesokhin 
471e8f69799SIlya Lesokhin 			record = ctx->open_record;
472e8f69799SIlya Lesokhin 			if (!record)
473e8f69799SIlya Lesokhin 				break;
474e8f69799SIlya Lesokhin handle_error:
475e8f69799SIlya Lesokhin 			if (record_type != TLS_RECORD_TYPE_DATA) {
476e8f69799SIlya Lesokhin 				/* avoid sending partial
477e8f69799SIlya Lesokhin 				 * record with type !=
478e8f69799SIlya Lesokhin 				 * application_data
479e8f69799SIlya Lesokhin 				 */
480e8f69799SIlya Lesokhin 				size = orig_size;
481e8f69799SIlya Lesokhin 				destroy_record(record);
482e8f69799SIlya Lesokhin 				ctx->open_record = NULL;
4834509de14SVakul Garg 			} else if (record->len > prot->prepend_size) {
484e8f69799SIlya Lesokhin 				goto last_record;
485e8f69799SIlya Lesokhin 			}
486e8f69799SIlya Lesokhin 
487e8f69799SIlya Lesokhin 			break;
488e8f69799SIlya Lesokhin 		}
489e8f69799SIlya Lesokhin 
490e8f69799SIlya Lesokhin 		record = ctx->open_record;
491e8f69799SIlya Lesokhin 
492c1318b39SBoris Pismenny 		copy = min_t(size_t, size, max_open_record_len - record->len);
4933dc8976cSDavid Howells 		if (copy && (flags & MSG_SPLICE_PAGES)) {
49424763c9cSDavid Howells 			struct page_frag zc_pfrag;
49524763c9cSDavid Howells 			struct page **pages = &zc_pfrag.page;
49624763c9cSDavid Howells 			size_t off;
49724763c9cSDavid Howells 
4983dc8976cSDavid Howells 			rc = iov_iter_extract_pages(iter, &pages,
4993dc8976cSDavid Howells 						    copy, 1, 0, &off);
50024763c9cSDavid Howells 			if (rc <= 0) {
50124763c9cSDavid Howells 				if (rc == 0)
50224763c9cSDavid Howells 					rc = -EIO;
50324763c9cSDavid Howells 				goto handle_error;
50424763c9cSDavid Howells 			}
50524763c9cSDavid Howells 			copy = rc;
50624763c9cSDavid Howells 
50724763c9cSDavid Howells 			if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
5083dc8976cSDavid Howells 				iov_iter_revert(iter, copy);
50924763c9cSDavid Howells 				rc = -EIO;
51024763c9cSDavid Howells 				goto handle_error;
51124763c9cSDavid Howells 			}
51224763c9cSDavid Howells 
51324763c9cSDavid Howells 			zc_pfrag.offset = off;
51424763c9cSDavid Howells 			zc_pfrag.size = copy;
51524763c9cSDavid Howells 			tls_append_frag(record, &zc_pfrag, copy);
516c1318b39SBoris Pismenny 		} else if (copy) {
517c1318b39SBoris Pismenny 			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
518c1318b39SBoris Pismenny 
519e681cc60SJakub Kicinski 			rc = tls_device_copy_data(page_address(pfrag->page) +
520c1318b39SBoris Pismenny 						  pfrag->offset, copy,
5213dc8976cSDavid Howells 						  iter);
522e681cc60SJakub Kicinski 			if (rc)
523e8f69799SIlya Lesokhin 				goto handle_error;
524e8f69799SIlya Lesokhin 			tls_append_frag(record, pfrag, copy);
525a0df7194SMaxim Mikityanskiy 		}
526e8f69799SIlya Lesokhin 
527e8f69799SIlya Lesokhin 		size -= copy;
528e8f69799SIlya Lesokhin 		if (!size) {
529e8f69799SIlya Lesokhin last_record:
530e8f69799SIlya Lesokhin 			tls_push_record_flags = flags;
531f8dd95b2SDavid Howells 			if (flags & MSG_MORE) {
532ea1dd3e9SRohit Maheshwari 				more = true;
533e8f69799SIlya Lesokhin 				break;
534e8f69799SIlya Lesokhin 			}
535e8f69799SIlya Lesokhin 
536e8f69799SIlya Lesokhin 			done = true;
537e8f69799SIlya Lesokhin 		}
538e8f69799SIlya Lesokhin 
539e8f69799SIlya Lesokhin 		if (done || record->len >= max_open_record_len ||
540e8f69799SIlya Lesokhin 		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
5416b47808fSJakub Kicinski 			tls_device_record_close(sk, tls_ctx, record,
542e7b159a4SJakub Kicinski 						pfrag, record_type);
543e7b159a4SJakub Kicinski 
544e8f69799SIlya Lesokhin 			rc = tls_push_record(sk,
545e8f69799SIlya Lesokhin 					     tls_ctx,
546e8f69799SIlya Lesokhin 					     ctx,
547e8f69799SIlya Lesokhin 					     record,
548e7b159a4SJakub Kicinski 					     tls_push_record_flags);
549e8f69799SIlya Lesokhin 			if (rc < 0)
550e8f69799SIlya Lesokhin 				break;
551e8f69799SIlya Lesokhin 		}
552e8f69799SIlya Lesokhin 	} while (!done);
553e8f69799SIlya Lesokhin 
554ea1dd3e9SRohit Maheshwari 	tls_ctx->pending_open_record_frags = more;
555ea1dd3e9SRohit Maheshwari 
556e8f69799SIlya Lesokhin 	if (orig_size - size > 0)
557e8f69799SIlya Lesokhin 		rc = orig_size - size;
558e8f69799SIlya Lesokhin 
559e8f69799SIlya Lesokhin 	return rc;
560e8f69799SIlya Lesokhin }
561e8f69799SIlya Lesokhin 
tls_device_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)562e8f69799SIlya Lesokhin int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
563e8f69799SIlya Lesokhin {
564e8f69799SIlya Lesokhin 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
56579ffe608SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
566e8f69799SIlya Lesokhin 	int rc;
567e8f69799SIlya Lesokhin 
56824763c9cSDavid Howells 	if (!tls_ctx->zerocopy_sendfile)
56924763c9cSDavid Howells 		msg->msg_flags &= ~MSG_SPLICE_PAGES;
57024763c9cSDavid Howells 
57179ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
572e8f69799SIlya Lesokhin 	lock_sock(sk);
573e8f69799SIlya Lesokhin 
574e8f69799SIlya Lesokhin 	if (unlikely(msg->msg_controllen)) {
57558790314SJakub Kicinski 		rc = tls_process_cmsg(sk, msg, &record_type);
576e8f69799SIlya Lesokhin 		if (rc)
577e8f69799SIlya Lesokhin 			goto out;
578e8f69799SIlya Lesokhin 	}
579e8f69799SIlya Lesokhin 
5803dc8976cSDavid Howells 	rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
5813dc8976cSDavid Howells 			   record_type);
582e8f69799SIlya Lesokhin 
583e8f69799SIlya Lesokhin out:
584e8f69799SIlya Lesokhin 	release_sock(sk);
58579ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
586e8f69799SIlya Lesokhin 	return rc;
587e8f69799SIlya Lesokhin }
588e8f69799SIlya Lesokhin 
tls_device_splice_eof(struct socket * sock)589d4c1e80bSDavid Howells void tls_device_splice_eof(struct socket *sock)
590d4c1e80bSDavid Howells {
591d4c1e80bSDavid Howells 	struct sock *sk = sock->sk;
592d4c1e80bSDavid Howells 	struct tls_context *tls_ctx = tls_get_ctx(sk);
5933dc8976cSDavid Howells 	struct iov_iter iter = {};
594d4c1e80bSDavid Howells 
595d4c1e80bSDavid Howells 	if (!tls_is_partially_sent_record(tls_ctx))
596d4c1e80bSDavid Howells 		return;
597d4c1e80bSDavid Howells 
598d4c1e80bSDavid Howells 	mutex_lock(&tls_ctx->tx_lock);
599d4c1e80bSDavid Howells 	lock_sock(sk);
600d4c1e80bSDavid Howells 
601d4c1e80bSDavid Howells 	if (tls_is_partially_sent_record(tls_ctx)) {
6023dc8976cSDavid Howells 		iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
6033dc8976cSDavid Howells 		tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
604d4c1e80bSDavid Howells 	}
605d4c1e80bSDavid Howells 
606d4c1e80bSDavid Howells 	release_sock(sk);
607d4c1e80bSDavid Howells 	mutex_unlock(&tls_ctx->tx_lock);
608d4c1e80bSDavid Howells }
609d4c1e80bSDavid Howells 
tls_get_record(struct tls_offload_context_tx * context,u32 seq,u64 * p_record_sn)610d80a1b9dSBoris Pismenny struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
611e8f69799SIlya Lesokhin 				       u32 seq, u64 *p_record_sn)
612e8f69799SIlya Lesokhin {
613e8f69799SIlya Lesokhin 	u64 record_sn = context->hint_record_sn;
61406f5201cSRohit Maheshwari 	struct tls_record_info *info, *last;
615e8f69799SIlya Lesokhin 
616e8f69799SIlya Lesokhin 	info = context->retransmit_hint;
617e8f69799SIlya Lesokhin 	if (!info ||
618e8f69799SIlya Lesokhin 	    before(seq, info->end_seq - info->len)) {
619e8f69799SIlya Lesokhin 		/* if retransmit_hint is irrelevant start
62072a0f6d0SWang Hai 		 * from the beginning of the list
621e8f69799SIlya Lesokhin 		 */
622d4774ac0SJakub Kicinski 		info = list_first_entry_or_null(&context->records_list,
623e8f69799SIlya Lesokhin 						struct tls_record_info, list);
624d4774ac0SJakub Kicinski 		if (!info)
625d4774ac0SJakub Kicinski 			return NULL;
62606f5201cSRohit Maheshwari 		/* send the start_marker record if seq number is before the
62706f5201cSRohit Maheshwari 		 * tls offload start marker sequence number. This record is
62806f5201cSRohit Maheshwari 		 * required to handle TCP packets which are before TLS offload
62906f5201cSRohit Maheshwari 		 * started.
63006f5201cSRohit Maheshwari 		 *  And if it's not start marker, look if this seq number
63106f5201cSRohit Maheshwari 		 * belongs to the list.
63206f5201cSRohit Maheshwari 		 */
63306f5201cSRohit Maheshwari 		if (likely(!tls_record_is_start_marker(info))) {
63406f5201cSRohit Maheshwari 			/* we have the first record, get the last record to see
63506f5201cSRohit Maheshwari 			 * if this seq number belongs to the list.
63606f5201cSRohit Maheshwari 			 */
63706f5201cSRohit Maheshwari 			last = list_last_entry(&context->records_list,
63806f5201cSRohit Maheshwari 					       struct tls_record_info, list);
63906f5201cSRohit Maheshwari 
64006f5201cSRohit Maheshwari 			if (!between(seq, tls_record_start_seq(info),
64106f5201cSRohit Maheshwari 				     last->end_seq))
64206f5201cSRohit Maheshwari 				return NULL;
64306f5201cSRohit Maheshwari 		}
644e8f69799SIlya Lesokhin 		record_sn = context->unacked_record_sn;
645e8f69799SIlya Lesokhin 	}
646e8f69799SIlya Lesokhin 
647d4774ac0SJakub Kicinski 	/* We just need the _rcu for the READ_ONCE() */
648d4774ac0SJakub Kicinski 	rcu_read_lock();
649d4774ac0SJakub Kicinski 	list_for_each_entry_from_rcu(info, &context->records_list, list) {
650e8f69799SIlya Lesokhin 		if (before(seq, info->end_seq)) {
651e8f69799SIlya Lesokhin 			if (!context->retransmit_hint ||
652e8f69799SIlya Lesokhin 			    after(info->end_seq,
653e8f69799SIlya Lesokhin 				  context->retransmit_hint->end_seq)) {
654e8f69799SIlya Lesokhin 				context->hint_record_sn = record_sn;
655e8f69799SIlya Lesokhin 				context->retransmit_hint = info;
656e8f69799SIlya Lesokhin 			}
657e8f69799SIlya Lesokhin 			*p_record_sn = record_sn;
658d4774ac0SJakub Kicinski 			goto exit_rcu_unlock;
659e8f69799SIlya Lesokhin 		}
660e8f69799SIlya Lesokhin 		record_sn++;
661e8f69799SIlya Lesokhin 	}
662d4774ac0SJakub Kicinski 	info = NULL;
663e8f69799SIlya Lesokhin 
664d4774ac0SJakub Kicinski exit_rcu_unlock:
665d4774ac0SJakub Kicinski 	rcu_read_unlock();
666d4774ac0SJakub Kicinski 	return info;
667e8f69799SIlya Lesokhin }
668e8f69799SIlya Lesokhin EXPORT_SYMBOL(tls_get_record);
669e8f69799SIlya Lesokhin 
tls_device_push_pending_record(struct sock * sk,int flags)670e8f69799SIlya Lesokhin static int tls_device_push_pending_record(struct sock *sk, int flags)
671e8f69799SIlya Lesokhin {
6723dc8976cSDavid Howells 	struct iov_iter iter;
673e8f69799SIlya Lesokhin 
6743dc8976cSDavid Howells 	iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
6753dc8976cSDavid Howells 	return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
676e8f69799SIlya Lesokhin }
677e8f69799SIlya Lesokhin 
tls_device_write_space(struct sock * sk,struct tls_context * ctx)6787463d3a2SBoris Pismenny void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
6797463d3a2SBoris Pismenny {
68002b1fa07SJakub Kicinski 	if (tls_is_partially_sent_record(ctx)) {
6817463d3a2SBoris Pismenny 		gfp_t sk_allocation = sk->sk_allocation;
6827463d3a2SBoris Pismenny 
68302b1fa07SJakub Kicinski 		WARN_ON_ONCE(sk->sk_write_pending);
68402b1fa07SJakub Kicinski 
6857463d3a2SBoris Pismenny 		sk->sk_allocation = GFP_ATOMIC;
68641477662SJakub Kicinski 		tls_push_partial_record(sk, ctx,
68741477662SJakub Kicinski 					MSG_DONTWAIT | MSG_NOSIGNAL |
68841477662SJakub Kicinski 					MSG_SENDPAGE_DECRYPTED);
6897463d3a2SBoris Pismenny 		sk->sk_allocation = sk_allocation;
6907463d3a2SBoris Pismenny 	}
6917463d3a2SBoris Pismenny }
6927463d3a2SBoris Pismenny 
tls_device_resync_rx(struct tls_context * tls_ctx,struct sock * sk,u32 seq,u8 * rcd_sn)693e52972c1SJakub Kicinski static void tls_device_resync_rx(struct tls_context *tls_ctx,
69489fec474SJakub Kicinski 				 struct sock *sk, u32 seq, u8 *rcd_sn)
695e52972c1SJakub Kicinski {
6968538d29cSJakub Kicinski 	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
697e52972c1SJakub Kicinski 	struct net_device *netdev;
698e52972c1SJakub Kicinski 
6998538d29cSJakub Kicinski 	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
70005fc8b6cSMaxim Mikityanskiy 	rcu_read_lock();
70194ce3b64SMaxim Mikityanskiy 	netdev = rcu_dereference(tls_ctx->netdev);
702e52972c1SJakub Kicinski 	if (netdev)
703eeb2efafSJakub Kicinski 		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
704eeb2efafSJakub Kicinski 						   TLS_OFFLOAD_CTX_DIR_RX);
70505fc8b6cSMaxim Mikityanskiy 	rcu_read_unlock();
706a4d26fdbSJakub Kicinski 	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
707e52972c1SJakub Kicinski }
708e52972c1SJakub Kicinski 
709ed9b7646SBoris Pismenny static bool
tls_device_rx_resync_async(struct tls_offload_resync_async * resync_async,s64 resync_req,u32 * seq,u16 * rcd_delta)710ed9b7646SBoris Pismenny tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
711138559b9STariq Toukan 			   s64 resync_req, u32 *seq, u16 *rcd_delta)
712ed9b7646SBoris Pismenny {
713ed9b7646SBoris Pismenny 	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
714ed9b7646SBoris Pismenny 	u32 req_seq = resync_req >> 32;
715ed9b7646SBoris Pismenny 	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
716138559b9STariq Toukan 	u16 i;
717138559b9STariq Toukan 
718138559b9STariq Toukan 	*rcd_delta = 0;
719ed9b7646SBoris Pismenny 
720ed9b7646SBoris Pismenny 	if (is_async) {
721138559b9STariq Toukan 		/* shouldn't get to wraparound:
722138559b9STariq Toukan 		 * too long in async stage, something bad happened
723138559b9STariq Toukan 		 */
724138559b9STariq Toukan 		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
725138559b9STariq Toukan 			return false;
726138559b9STariq Toukan 
727ed9b7646SBoris Pismenny 		/* asynchronous stage: log all headers seq such that
728ed9b7646SBoris Pismenny 		 * req_seq <= seq <= end_seq, and wait for real resync request
729ed9b7646SBoris Pismenny 		 */
730138559b9STariq Toukan 		if (before(*seq, req_seq))
731138559b9STariq Toukan 			return false;
732138559b9STariq Toukan 		if (!after(*seq, req_end) &&
733ed9b7646SBoris Pismenny 		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
734ed9b7646SBoris Pismenny 			resync_async->log[resync_async->loglen++] = *seq;
735ed9b7646SBoris Pismenny 
736138559b9STariq Toukan 		resync_async->rcd_delta++;
737138559b9STariq Toukan 
738ed9b7646SBoris Pismenny 		return false;
739ed9b7646SBoris Pismenny 	}
740ed9b7646SBoris Pismenny 
741ed9b7646SBoris Pismenny 	/* synchronous stage: check against the logged entries and
742ed9b7646SBoris Pismenny 	 * proceed to check the next entries if no match was found
743ed9b7646SBoris Pismenny 	 */
744138559b9STariq Toukan 	for (i = 0; i < resync_async->loglen; i++)
745138559b9STariq Toukan 		if (req_seq == resync_async->log[i] &&
746138559b9STariq Toukan 		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
747138559b9STariq Toukan 			*rcd_delta = resync_async->rcd_delta - i;
748ed9b7646SBoris Pismenny 			*seq = req_seq;
749138559b9STariq Toukan 			resync_async->loglen = 0;
750138559b9STariq Toukan 			resync_async->rcd_delta = 0;
751ed9b7646SBoris Pismenny 			return true;
752ed9b7646SBoris Pismenny 		}
753138559b9STariq Toukan 
754138559b9STariq Toukan 	resync_async->loglen = 0;
755138559b9STariq Toukan 	resync_async->rcd_delta = 0;
756ed9b7646SBoris Pismenny 
757ed9b7646SBoris Pismenny 	if (req_seq == *seq &&
758ed9b7646SBoris Pismenny 	    atomic64_try_cmpxchg(&resync_async->req,
759ed9b7646SBoris Pismenny 				 &resync_req, 0))
760ed9b7646SBoris Pismenny 		return true;
761ed9b7646SBoris Pismenny 
762ed9b7646SBoris Pismenny 	return false;
763ed9b7646SBoris Pismenny }
764ed9b7646SBoris Pismenny 
tls_device_rx_resync_new_rec(struct sock * sk,u32 rcd_len,u32 seq)765f953d33bSJakub Kicinski void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
7664799ac81SBoris Pismenny {
7674799ac81SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
7684799ac81SBoris Pismenny 	struct tls_offload_context_rx *rx_ctx;
769f953d33bSJakub Kicinski 	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
770acb5a07aSBoris Pismenny 	u32 sock_data, is_req_pending;
771f953d33bSJakub Kicinski 	struct tls_prot_info *prot;
7724799ac81SBoris Pismenny 	s64 resync_req;
773138559b9STariq Toukan 	u16 rcd_delta;
7744799ac81SBoris Pismenny 	u32 req_seq;
7754799ac81SBoris Pismenny 
7764799ac81SBoris Pismenny 	if (tls_ctx->rx_conf != TLS_HW)
7774799ac81SBoris Pismenny 		return;
778c55dcdd4SMaxim Mikityanskiy 	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
779c55dcdd4SMaxim Mikityanskiy 		return;
7804799ac81SBoris Pismenny 
781f953d33bSJakub Kicinski 	prot = &tls_ctx->prot_info;
7824799ac81SBoris Pismenny 	rx_ctx = tls_offload_ctx_rx(tls_ctx);
783f953d33bSJakub Kicinski 	memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
784f953d33bSJakub Kicinski 
785f953d33bSJakub Kicinski 	switch (rx_ctx->resync_type) {
786f953d33bSJakub Kicinski 	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
7874799ac81SBoris Pismenny 		resync_req = atomic64_read(&rx_ctx->resync_req);
78849673739SJakub Kicinski 		req_seq = resync_req >> 32;
78949673739SJakub Kicinski 		seq += TLS_HEADER_SIZE - 1;
790acb5a07aSBoris Pismenny 		is_req_pending = resync_req;
7914799ac81SBoris Pismenny 
792acb5a07aSBoris Pismenny 		if (likely(!is_req_pending) || req_seq != seq ||
793f953d33bSJakub Kicinski 		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
794f953d33bSJakub Kicinski 			return;
795f953d33bSJakub Kicinski 		break;
796f953d33bSJakub Kicinski 	case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
797f953d33bSJakub Kicinski 		if (likely(!rx_ctx->resync_nh_do_now))
798f953d33bSJakub Kicinski 			return;
799f953d33bSJakub Kicinski 
800f953d33bSJakub Kicinski 		/* head of next rec is already in, note that the sock_inq will
801f953d33bSJakub Kicinski 		 * include the currently parsed message when called from parser
802f953d33bSJakub Kicinski 		 */
8038538d29cSJakub Kicinski 		sock_data = tcp_inq(sk);
8048538d29cSJakub Kicinski 		if (sock_data > rcd_len) {
8058538d29cSJakub Kicinski 			trace_tls_device_rx_resync_nh_delay(sk, sock_data,
8068538d29cSJakub Kicinski 							    rcd_len);
807f953d33bSJakub Kicinski 			return;
8088538d29cSJakub Kicinski 		}
809f953d33bSJakub Kicinski 
810f953d33bSJakub Kicinski 		rx_ctx->resync_nh_do_now = 0;
811f953d33bSJakub Kicinski 		seq += rcd_len;
812f953d33bSJakub Kicinski 		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
813f953d33bSJakub Kicinski 		break;
814ed9b7646SBoris Pismenny 	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
815ed9b7646SBoris Pismenny 		resync_req = atomic64_read(&rx_ctx->resync_async->req);
816ed9b7646SBoris Pismenny 		is_req_pending = resync_req;
817ed9b7646SBoris Pismenny 		if (likely(!is_req_pending))
818ed9b7646SBoris Pismenny 			return;
819ed9b7646SBoris Pismenny 
820ed9b7646SBoris Pismenny 		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
821138559b9STariq Toukan 						resync_req, &seq, &rcd_delta))
822ed9b7646SBoris Pismenny 			return;
823138559b9STariq Toukan 		tls_bigint_subtract(rcd_sn, rcd_delta);
824ed9b7646SBoris Pismenny 		break;
825f953d33bSJakub Kicinski 	}
826f953d33bSJakub Kicinski 
827f953d33bSJakub Kicinski 	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
828f953d33bSJakub Kicinski }
829f953d33bSJakub Kicinski 
tls_device_core_ctrl_rx_resync(struct tls_context * tls_ctx,struct tls_offload_context_rx * ctx,struct sock * sk,struct sk_buff * skb)830f953d33bSJakub Kicinski static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
831f953d33bSJakub Kicinski 					   struct tls_offload_context_rx *ctx,
832f953d33bSJakub Kicinski 					   struct sock *sk, struct sk_buff *skb)
833f953d33bSJakub Kicinski {
834f953d33bSJakub Kicinski 	struct strp_msg *rxm;
835f953d33bSJakub Kicinski 
836f953d33bSJakub Kicinski 	/* device will request resyncs by itself based on stream scan */
837f953d33bSJakub Kicinski 	if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
838f953d33bSJakub Kicinski 		return;
839f953d33bSJakub Kicinski 	/* already scheduled */
840f953d33bSJakub Kicinski 	if (ctx->resync_nh_do_now)
841f953d33bSJakub Kicinski 		return;
842f953d33bSJakub Kicinski 	/* seen decrypted fragments since last fully-failed record */
843f953d33bSJakub Kicinski 	if (ctx->resync_nh_reset) {
844f953d33bSJakub Kicinski 		ctx->resync_nh_reset = 0;
845f953d33bSJakub Kicinski 		ctx->resync_nh.decrypted_failed = 1;
846f953d33bSJakub Kicinski 		ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
847f953d33bSJakub Kicinski 		return;
848f953d33bSJakub Kicinski 	}
849f953d33bSJakub Kicinski 
850f953d33bSJakub Kicinski 	if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
851f953d33bSJakub Kicinski 		return;
852f953d33bSJakub Kicinski 
853f953d33bSJakub Kicinski 	/* doing resync, bump the next target in case it fails */
854f953d33bSJakub Kicinski 	if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
855f953d33bSJakub Kicinski 		ctx->resync_nh.decrypted_tgt *= 2;
856f953d33bSJakub Kicinski 	else
857f953d33bSJakub Kicinski 		ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
858f953d33bSJakub Kicinski 
859f953d33bSJakub Kicinski 	rxm = strp_msg(skb);
860f953d33bSJakub Kicinski 
861f953d33bSJakub Kicinski 	/* head of next rec is already in, parser will sync for us */
862f953d33bSJakub Kicinski 	if (tcp_inq(sk) > rxm->full_len) {
8638538d29cSJakub Kicinski 		trace_tls_device_rx_resync_nh_schedule(sk);
864f953d33bSJakub Kicinski 		ctx->resync_nh_do_now = 1;
865f953d33bSJakub Kicinski 	} else {
866f953d33bSJakub Kicinski 		struct tls_prot_info *prot = &tls_ctx->prot_info;
867f953d33bSJakub Kicinski 		u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
868f953d33bSJakub Kicinski 
869f953d33bSJakub Kicinski 		memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
870f953d33bSJakub Kicinski 		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
871f953d33bSJakub Kicinski 
872f953d33bSJakub Kicinski 		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
873f953d33bSJakub Kicinski 				     rcd_sn);
874f953d33bSJakub Kicinski 	}
87538030d7cSJakub Kicinski }
8764799ac81SBoris Pismenny 
877541cc48bSJakub Kicinski static int
tls_device_reencrypt(struct sock * sk,struct tls_context * tls_ctx)878ea7a9d88SGal Pressman tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
8794799ac81SBoris Pismenny {
880ea7a9d88SGal Pressman 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
8818db44ab2SSabrina Dubroca 	const struct tls_cipher_desc *cipher_desc;
8828b3c59a7SJakub Kicinski 	int err, offset, copy, data_len, pos;
8838b3c59a7SJakub Kicinski 	struct sk_buff *skb, *skb_iter;
8844799ac81SBoris Pismenny 	struct scatterlist sg[1];
885541cc48bSJakub Kicinski 	struct strp_msg *rxm;
8864799ac81SBoris Pismenny 	char *orig_buf, *buf;
8874799ac81SBoris Pismenny 
8888db44ab2SSabrina Dubroca 	cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
8898f1d532bSSabrina Dubroca 	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
890ea7a9d88SGal Pressman 
8918b3c59a7SJakub Kicinski 	rxm = strp_msg(tls_strp_msg(sw_ctx));
8928db44ab2SSabrina Dubroca 	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
893ea7a9d88SGal Pressman 			   sk->sk_allocation);
8944799ac81SBoris Pismenny 	if (!orig_buf)
8954799ac81SBoris Pismenny 		return -ENOMEM;
8964799ac81SBoris Pismenny 	buf = orig_buf;
8974799ac81SBoris Pismenny 
8988b3c59a7SJakub Kicinski 	err = tls_strp_msg_cow(sw_ctx);
8998b3c59a7SJakub Kicinski 	if (unlikely(err))
9004799ac81SBoris Pismenny 		goto free_buf;
9018b3c59a7SJakub Kicinski 
9028b3c59a7SJakub Kicinski 	skb = tls_strp_msg(sw_ctx);
9038b3c59a7SJakub Kicinski 	rxm = strp_msg(skb);
9048b3c59a7SJakub Kicinski 	offset = rxm->offset;
9054799ac81SBoris Pismenny 
9064799ac81SBoris Pismenny 	sg_init_table(sg, 1);
9074799ac81SBoris Pismenny 	sg_set_buf(&sg[0], buf,
9088db44ab2SSabrina Dubroca 		   rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
9098db44ab2SSabrina Dubroca 	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
910aeb11ff0SJakub Kicinski 	if (err)
911aeb11ff0SJakub Kicinski 		goto free_buf;
9124799ac81SBoris Pismenny 
9134799ac81SBoris Pismenny 	/* We are interested only in the decrypted data not the auth */
914541cc48bSJakub Kicinski 	err = decrypt_skb(sk, sg);
9154799ac81SBoris Pismenny 	if (err != -EBADMSG)
9164799ac81SBoris Pismenny 		goto free_buf;
9174799ac81SBoris Pismenny 	else
9184799ac81SBoris Pismenny 		err = 0;
9194799ac81SBoris Pismenny 
9208db44ab2SSabrina Dubroca 	data_len = rxm->full_len - cipher_desc->tag;
921eb3d38d5SJakub Kicinski 
92297e1caa5SJakub Kicinski 	if (skb_pagelen(skb) > offset) {
923eb3d38d5SJakub Kicinski 		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
9244799ac81SBoris Pismenny 
925aeb11ff0SJakub Kicinski 		if (skb->decrypted) {
926aeb11ff0SJakub Kicinski 			err = skb_store_bits(skb, offset, buf, copy);
927aeb11ff0SJakub Kicinski 			if (err)
928aeb11ff0SJakub Kicinski 				goto free_buf;
929aeb11ff0SJakub Kicinski 		}
9304799ac81SBoris Pismenny 
9314799ac81SBoris Pismenny 		offset += copy;
9324799ac81SBoris Pismenny 		buf += copy;
93397e1caa5SJakub Kicinski 	}
9344799ac81SBoris Pismenny 
935eb3d38d5SJakub Kicinski 	pos = skb_pagelen(skb);
9364799ac81SBoris Pismenny 	skb_walk_frags(skb, skb_iter) {
937eb3d38d5SJakub Kicinski 		int frag_pos;
938eb3d38d5SJakub Kicinski 
939eb3d38d5SJakub Kicinski 		/* Practically all frags must belong to msg if reencrypt
940eb3d38d5SJakub Kicinski 		 * is needed with current strparser and coalescing logic,
941eb3d38d5SJakub Kicinski 		 * but strparser may "get optimized", so let's be safe.
942eb3d38d5SJakub Kicinski 		 */
943eb3d38d5SJakub Kicinski 		if (pos + skb_iter->len <= offset)
944eb3d38d5SJakub Kicinski 			goto done_with_frag;
945eb3d38d5SJakub Kicinski 		if (pos >= data_len + rxm->offset)
946eb3d38d5SJakub Kicinski 			break;
947eb3d38d5SJakub Kicinski 
948eb3d38d5SJakub Kicinski 		frag_pos = offset - pos;
949eb3d38d5SJakub Kicinski 		copy = min_t(int, skb_iter->len - frag_pos,
950eb3d38d5SJakub Kicinski 			     data_len + rxm->offset - offset);
9514799ac81SBoris Pismenny 
952aeb11ff0SJakub Kicinski 		if (skb_iter->decrypted) {
953aeb11ff0SJakub Kicinski 			err = skb_store_bits(skb_iter, frag_pos, buf, copy);
954aeb11ff0SJakub Kicinski 			if (err)
955aeb11ff0SJakub Kicinski 				goto free_buf;
956aeb11ff0SJakub Kicinski 		}
9574799ac81SBoris Pismenny 
9584799ac81SBoris Pismenny 		offset += copy;
9594799ac81SBoris Pismenny 		buf += copy;
960eb3d38d5SJakub Kicinski done_with_frag:
961eb3d38d5SJakub Kicinski 		pos += skb_iter->len;
9624799ac81SBoris Pismenny 	}
9634799ac81SBoris Pismenny 
9644799ac81SBoris Pismenny free_buf:
9654799ac81SBoris Pismenny 	kfree(orig_buf);
9664799ac81SBoris Pismenny 	return err;
9674799ac81SBoris Pismenny }
9684799ac81SBoris Pismenny 
tls_device_decrypted(struct sock * sk,struct tls_context * tls_ctx)969541cc48bSJakub Kicinski int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
9704799ac81SBoris Pismenny {
9714799ac81SBoris Pismenny 	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
972541cc48bSJakub Kicinski 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
973541cc48bSJakub Kicinski 	struct sk_buff *skb = tls_strp_msg(sw_ctx);
974541cc48bSJakub Kicinski 	struct strp_msg *rxm = strp_msg(skb);
975eca9bfafSJakub Kicinski 	int is_decrypted, is_encrypted;
9764799ac81SBoris Pismenny 
977eca9bfafSJakub Kicinski 	if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
978eca9bfafSJakub Kicinski 		is_decrypted = skb->decrypted;
979eca9bfafSJakub Kicinski 		is_encrypted = !is_decrypted;
980eca9bfafSJakub Kicinski 	} else {
981eca9bfafSJakub Kicinski 		is_decrypted = 0;
982eca9bfafSJakub Kicinski 		is_encrypted = 0;
9834799ac81SBoris Pismenny 	}
9844799ac81SBoris Pismenny 
9859ec1c6acSJakub Kicinski 	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
9869ec1c6acSJakub Kicinski 				   tls_ctx->rx.rec_seq, rxm->full_len,
9879ec1c6acSJakub Kicinski 				   is_encrypted, is_decrypted);
9889ec1c6acSJakub Kicinski 
989c55dcdd4SMaxim Mikityanskiy 	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
990c55dcdd4SMaxim Mikityanskiy 		if (likely(is_encrypted || is_decrypted))
99171471ca3SJakub Kicinski 			return is_decrypted;
992c55dcdd4SMaxim Mikityanskiy 
993c55dcdd4SMaxim Mikityanskiy 		/* After tls_device_down disables the offload, the next SKB will
994c55dcdd4SMaxim Mikityanskiy 		 * likely have initial fragments decrypted, and final ones not
995c55dcdd4SMaxim Mikityanskiy 		 * decrypted. We need to reencrypt that single SKB.
996c55dcdd4SMaxim Mikityanskiy 		 */
997ea7a9d88SGal Pressman 		return tls_device_reencrypt(sk, tls_ctx);
998c55dcdd4SMaxim Mikityanskiy 	}
999c55dcdd4SMaxim Mikityanskiy 
1000f953d33bSJakub Kicinski 	/* Return immediately if the record is either entirely plaintext or
10014799ac81SBoris Pismenny 	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
10024799ac81SBoris Pismenny 	 * record.
10034799ac81SBoris Pismenny 	 */
1004f953d33bSJakub Kicinski 	if (is_decrypted) {
1005f953d33bSJakub Kicinski 		ctx->resync_nh_reset = 1;
100671471ca3SJakub Kicinski 		return is_decrypted;
1007f953d33bSJakub Kicinski 	}
1008f953d33bSJakub Kicinski 	if (is_encrypted) {
1009f953d33bSJakub Kicinski 		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1010f953d33bSJakub Kicinski 		return 0;
1011f953d33bSJakub Kicinski 	}
1012f953d33bSJakub Kicinski 
1013f953d33bSJakub Kicinski 	ctx->resync_nh_reset = 1;
1014ea7a9d88SGal Pressman 	return tls_device_reencrypt(sk, tls_ctx);
10154799ac81SBoris Pismenny }
10164799ac81SBoris Pismenny 
tls_device_attach(struct tls_context * ctx,struct sock * sk,struct net_device * netdev)10179e995797SJakub Kicinski static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
10189e995797SJakub Kicinski 			      struct net_device *netdev)
10199e995797SJakub Kicinski {
10209e995797SJakub Kicinski 	if (sk->sk_destruct != tls_device_sk_destruct) {
10219e995797SJakub Kicinski 		refcount_set(&ctx->refcount, 1);
10229e995797SJakub Kicinski 		dev_hold(netdev);
102394ce3b64SMaxim Mikityanskiy 		RCU_INIT_POINTER(ctx->netdev, netdev);
10249e995797SJakub Kicinski 		spin_lock_irq(&tls_device_lock);
10259e995797SJakub Kicinski 		list_add_tail(&ctx->list, &tls_device_list);
10269e995797SJakub Kicinski 		spin_unlock_irq(&tls_device_lock);
10279e995797SJakub Kicinski 
10289e995797SJakub Kicinski 		ctx->sk_destruct = sk->sk_destruct;
10298d5a49e9SJakub Kicinski 		smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
10309e995797SJakub Kicinski 	}
10319e995797SJakub Kicinski }
10329e995797SJakub Kicinski 
alloc_offload_ctx_tx(struct tls_context * ctx)103301374079SSabrina Dubroca static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *ctx)
103401374079SSabrina Dubroca {
103501374079SSabrina Dubroca 	struct tls_offload_context_tx *offload_ctx;
103601374079SSabrina Dubroca 	__be64 rcd_sn;
103701374079SSabrina Dubroca 
10389f0c8245SSabrina Dubroca 	offload_ctx = kzalloc(sizeof(*offload_ctx), GFP_KERNEL);
103901374079SSabrina Dubroca 	if (!offload_ctx)
104001374079SSabrina Dubroca 		return NULL;
104101374079SSabrina Dubroca 
104201374079SSabrina Dubroca 	INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
104301374079SSabrina Dubroca 	INIT_LIST_HEAD(&offload_ctx->records_list);
104401374079SSabrina Dubroca 	spin_lock_init(&offload_ctx->lock);
104501374079SSabrina Dubroca 	sg_init_table(offload_ctx->sg_tx_data,
104601374079SSabrina Dubroca 		      ARRAY_SIZE(offload_ctx->sg_tx_data));
104701374079SSabrina Dubroca 
104801374079SSabrina Dubroca 	/* start at rec_seq - 1 to account for the start marker record */
104901374079SSabrina Dubroca 	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
105001374079SSabrina Dubroca 	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
105101374079SSabrina Dubroca 
105201374079SSabrina Dubroca 	offload_ctx->ctx = ctx;
105301374079SSabrina Dubroca 
105401374079SSabrina Dubroca 	return offload_ctx;
105501374079SSabrina Dubroca }
105601374079SSabrina Dubroca 
tls_set_device_offload(struct sock * sk)10574f486699SSabrina Dubroca int tls_set_device_offload(struct sock *sk)
1058e8f69799SIlya Lesokhin {
1059e8f69799SIlya Lesokhin 	struct tls_record_info *start_marker_record;
1060d80a1b9dSBoris Pismenny 	struct tls_offload_context_tx *offload_ctx;
10614f486699SSabrina Dubroca 	const struct tls_cipher_desc *cipher_desc;
1062e8f69799SIlya Lesokhin 	struct tls_crypto_info *crypto_info;
10634f486699SSabrina Dubroca 	struct tls_prot_info *prot;
1064e8f69799SIlya Lesokhin 	struct net_device *netdev;
10654f486699SSabrina Dubroca 	struct tls_context *ctx;
10664f486699SSabrina Dubroca 	char *iv, *rec_seq;
106790962b48SJakub Kicinski 	int rc;
1068e8f69799SIlya Lesokhin 
10694f486699SSabrina Dubroca 	ctx = tls_get_ctx(sk);
10704f486699SSabrina Dubroca 	prot = &ctx->prot_info;
1071e8f69799SIlya Lesokhin 
107290962b48SJakub Kicinski 	if (ctx->priv_ctx_tx)
107390962b48SJakub Kicinski 		return -EEXIST;
1074e8f69799SIlya Lesokhin 
1075b1a6f56bSZiyang Xuan 	netdev = get_netdev_for_sock(sk);
1076b1a6f56bSZiyang Xuan 	if (!netdev) {
1077b1a6f56bSZiyang Xuan 		pr_err_ratelimited("%s: netdev not found\n", __func__);
1078b1a6f56bSZiyang Xuan 		return -EINVAL;
1079b1a6f56bSZiyang Xuan 	}
1080e8f69799SIlya Lesokhin 
1081b1a6f56bSZiyang Xuan 	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1082b1a6f56bSZiyang Xuan 		rc = -EOPNOTSUPP;
1083b1a6f56bSZiyang Xuan 		goto release_netdev;
1084e8f69799SIlya Lesokhin 	}
1085e8f69799SIlya Lesokhin 
108686029d10SSabrina Dubroca 	crypto_info = &ctx->crypto_send.info;
1087618bac45SJakub Kicinski 	if (crypto_info->version != TLS_1_2_VERSION) {
1088618bac45SJakub Kicinski 		rc = -EOPNOTSUPP;
1089b1a6f56bSZiyang Xuan 		goto release_netdev;
1090618bac45SJakub Kicinski 	}
1091618bac45SJakub Kicinski 
10928db44ab2SSabrina Dubroca 	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
10933524dd4dSSabrina Dubroca 	if (!cipher_desc || !cipher_desc->offloadable) {
109489fec474SJakub Kicinski 		rc = -EINVAL;
1095b1a6f56bSZiyang Xuan 		goto release_netdev;
109689fec474SJakub Kicinski 	}
109789fec474SJakub Kicinski 
1098b7c4f573SSabrina Dubroca 	rc = init_prot_info(prot, crypto_info, cipher_desc);
10991a074f76SSabrina Dubroca 	if (rc)
11001a074f76SSabrina Dubroca 		goto release_netdev;
11011a074f76SSabrina Dubroca 
11023524dd4dSSabrina Dubroca 	iv = crypto_info_iv(crypto_info, cipher_desc);
11033524dd4dSSabrina Dubroca 	rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
11043524dd4dSSabrina Dubroca 
11058db44ab2SSabrina Dubroca 	memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
11066d5029e5SSabrina Dubroca 	memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq);
1107e8f69799SIlya Lesokhin 
1108b1a6f56bSZiyang Xuan 	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1109b1a6f56bSZiyang Xuan 	if (!start_marker_record) {
1110b1a6f56bSZiyang Xuan 		rc = -ENOMEM;
11111c1cb311SSabrina Dubroca 		goto release_netdev;
1112b1a6f56bSZiyang Xuan 	}
1113b1a6f56bSZiyang Xuan 
111401374079SSabrina Dubroca 	offload_ctx = alloc_offload_ctx_tx(ctx);
1115b1a6f56bSZiyang Xuan 	if (!offload_ctx) {
1116b1a6f56bSZiyang Xuan 		rc = -ENOMEM;
1117b1a6f56bSZiyang Xuan 		goto free_marker_record;
1118b1a6f56bSZiyang Xuan 	}
1119b1a6f56bSZiyang Xuan 
1120e8f69799SIlya Lesokhin 	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1121e8f69799SIlya Lesokhin 	if (rc)
1122b1a6f56bSZiyang Xuan 		goto free_offload_ctx;
1123e8f69799SIlya Lesokhin 
1124e8f69799SIlya Lesokhin 	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1125e8f69799SIlya Lesokhin 	start_marker_record->len = 0;
1126e8f69799SIlya Lesokhin 	start_marker_record->num_frags = 0;
1127e8f69799SIlya Lesokhin 	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1128e8f69799SIlya Lesokhin 
1129e8f69799SIlya Lesokhin 	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1130e8f69799SIlya Lesokhin 	ctx->push_pending_record = tls_device_push_pending_record;
1131e8f69799SIlya Lesokhin 
1132e8f69799SIlya Lesokhin 	/* TLS offload is greatly simplified if we don't send
1133e8f69799SIlya Lesokhin 	 * SKBs where only part of the payload needs to be encrypted.
1134e8f69799SIlya Lesokhin 	 * So mark the last skb in the write queue as end of record.
1135e8f69799SIlya Lesokhin 	 */
1136*1be68a87SJakub Kicinski 	tcp_write_collapse_fence(sk);
1137e8f69799SIlya Lesokhin 
1138e8f69799SIlya Lesokhin 	/* Avoid offloading if the device is down
1139e8f69799SIlya Lesokhin 	 * We don't want to offload new flows after
1140e8f69799SIlya Lesokhin 	 * the NETDEV_DOWN event
11413544c98aSJakub Kicinski 	 *
11423544c98aSJakub Kicinski 	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
11433544c98aSJakub Kicinski 	 * handler thus protecting from the device going down before
11443544c98aSJakub Kicinski 	 * ctx was added to tls_device_list.
1145e8f69799SIlya Lesokhin 	 */
11463544c98aSJakub Kicinski 	down_read(&device_offload_lock);
1147e8f69799SIlya Lesokhin 	if (!(netdev->flags & IFF_UP)) {
1148e8f69799SIlya Lesokhin 		rc = -EINVAL;
11493544c98aSJakub Kicinski 		goto release_lock;
1150e8f69799SIlya Lesokhin 	}
1151e8f69799SIlya Lesokhin 
1152e8f69799SIlya Lesokhin 	ctx->priv_ctx_tx = offload_ctx;
1153e8f69799SIlya Lesokhin 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
115486029d10SSabrina Dubroca 					     &ctx->crypto_send.info,
1155e8f69799SIlya Lesokhin 					     tcp_sk(sk)->write_seq);
11568538d29cSJakub Kicinski 	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
11578538d29cSJakub Kicinski 				     tcp_sk(sk)->write_seq, rec_seq, rc);
1158e8f69799SIlya Lesokhin 	if (rc)
11593544c98aSJakub Kicinski 		goto release_lock;
1160e8f69799SIlya Lesokhin 
11614799ac81SBoris Pismenny 	tls_device_attach(ctx, sk, netdev);
11623544c98aSJakub Kicinski 	up_read(&device_offload_lock);
1163e8f69799SIlya Lesokhin 
1164ed3c9a2fSJakub Kicinski 	/* following this assignment tls_is_skb_tx_device_offloaded
1165e8f69799SIlya Lesokhin 	 * will return true and the context might be accessed
1166e8f69799SIlya Lesokhin 	 * by the netdev's xmit function.
1167e8f69799SIlya Lesokhin 	 */
11684799ac81SBoris Pismenny 	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
11694799ac81SBoris Pismenny 	dev_put(netdev);
117090962b48SJakub Kicinski 
117190962b48SJakub Kicinski 	return 0;
1172e8f69799SIlya Lesokhin 
1173e8f69799SIlya Lesokhin release_lock:
1174e8f69799SIlya Lesokhin 	up_read(&device_offload_lock);
1175e8f69799SIlya Lesokhin 	clean_acked_data_disable(inet_csk(sk));
1176e8f69799SIlya Lesokhin 	crypto_free_aead(offload_ctx->aead_send);
1177e8f69799SIlya Lesokhin free_offload_ctx:
1178e8f69799SIlya Lesokhin 	kfree(offload_ctx);
1179e8f69799SIlya Lesokhin 	ctx->priv_ctx_tx = NULL;
1180e8f69799SIlya Lesokhin free_marker_record:
1181e8f69799SIlya Lesokhin 	kfree(start_marker_record);
1182b1a6f56bSZiyang Xuan release_netdev:
1183b1a6f56bSZiyang Xuan 	dev_put(netdev);
1184e8f69799SIlya Lesokhin 	return rc;
1185e8f69799SIlya Lesokhin }
1186e8f69799SIlya Lesokhin 
tls_set_device_offload_rx(struct sock * sk,struct tls_context * ctx)11874799ac81SBoris Pismenny int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
11884799ac81SBoris Pismenny {
11898538d29cSJakub Kicinski 	struct tls12_crypto_info_aes_gcm_128 *info;
11904799ac81SBoris Pismenny 	struct tls_offload_context_rx *context;
11914799ac81SBoris Pismenny 	struct net_device *netdev;
11924799ac81SBoris Pismenny 	int rc = 0;
11934799ac81SBoris Pismenny 
1194618bac45SJakub Kicinski 	if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1195618bac45SJakub Kicinski 		return -EOPNOTSUPP;
1196618bac45SJakub Kicinski 
11974799ac81SBoris Pismenny 	netdev = get_netdev_for_sock(sk);
11984799ac81SBoris Pismenny 	if (!netdev) {
11994799ac81SBoris Pismenny 		pr_err_ratelimited("%s: netdev not found\n", __func__);
12003544c98aSJakub Kicinski 		return -EINVAL;
12014799ac81SBoris Pismenny 	}
12024799ac81SBoris Pismenny 
12034799ac81SBoris Pismenny 	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
12044a5cdc60SValentin Vidic 		rc = -EOPNOTSUPP;
12054799ac81SBoris Pismenny 		goto release_netdev;
12064799ac81SBoris Pismenny 	}
12074799ac81SBoris Pismenny 
12084799ac81SBoris Pismenny 	/* Avoid offloading if the device is down
12094799ac81SBoris Pismenny 	 * We don't want to offload new flows after
12104799ac81SBoris Pismenny 	 * the NETDEV_DOWN event
12113544c98aSJakub Kicinski 	 *
12123544c98aSJakub Kicinski 	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
12133544c98aSJakub Kicinski 	 * handler thus protecting from the device going down before
12143544c98aSJakub Kicinski 	 * ctx was added to tls_device_list.
12154799ac81SBoris Pismenny 	 */
12163544c98aSJakub Kicinski 	down_read(&device_offload_lock);
12174799ac81SBoris Pismenny 	if (!(netdev->flags & IFF_UP)) {
12184799ac81SBoris Pismenny 		rc = -EINVAL;
12193544c98aSJakub Kicinski 		goto release_lock;
12204799ac81SBoris Pismenny 	}
12214799ac81SBoris Pismenny 
12229f0c8245SSabrina Dubroca 	context = kzalloc(sizeof(*context), GFP_KERNEL);
12234799ac81SBoris Pismenny 	if (!context) {
12244799ac81SBoris Pismenny 		rc = -ENOMEM;
12253544c98aSJakub Kicinski 		goto release_lock;
12264799ac81SBoris Pismenny 	}
1227f953d33bSJakub Kicinski 	context->resync_nh_reset = 1;
12284799ac81SBoris Pismenny 
12294799ac81SBoris Pismenny 	ctx->priv_ctx_rx = context;
1230b6a30ec9SSabrina Dubroca 	rc = tls_set_sw_offload(sk, 0);
12314799ac81SBoris Pismenny 	if (rc)
12324799ac81SBoris Pismenny 		goto release_ctx;
12334799ac81SBoris Pismenny 
12344799ac81SBoris Pismenny 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
123586029d10SSabrina Dubroca 					     &ctx->crypto_recv.info,
12364799ac81SBoris Pismenny 					     tcp_sk(sk)->copied_seq);
12378538d29cSJakub Kicinski 	info = (void *)&ctx->crypto_recv.info;
12388538d29cSJakub Kicinski 	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
12398538d29cSJakub Kicinski 				     tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1240e49d268dSJakub Kicinski 	if (rc)
12414799ac81SBoris Pismenny 		goto free_sw_resources;
12424799ac81SBoris Pismenny 
12434799ac81SBoris Pismenny 	tls_device_attach(ctx, sk, netdev);
124490962b48SJakub Kicinski 	up_read(&device_offload_lock);
124590962b48SJakub Kicinski 
124690962b48SJakub Kicinski 	dev_put(netdev);
124790962b48SJakub Kicinski 
124890962b48SJakub Kicinski 	return 0;
12494799ac81SBoris Pismenny 
12504799ac81SBoris Pismenny free_sw_resources:
125162ef81d5SJakub Kicinski 	up_read(&device_offload_lock);
12524799ac81SBoris Pismenny 	tls_sw_free_resources_rx(sk);
125362ef81d5SJakub Kicinski 	down_read(&device_offload_lock);
12544799ac81SBoris Pismenny release_ctx:
12554799ac81SBoris Pismenny 	ctx->priv_ctx_rx = NULL;
12564799ac81SBoris Pismenny release_lock:
12574799ac81SBoris Pismenny 	up_read(&device_offload_lock);
12583544c98aSJakub Kicinski release_netdev:
12593544c98aSJakub Kicinski 	dev_put(netdev);
12604799ac81SBoris Pismenny 	return rc;
12614799ac81SBoris Pismenny }
12624799ac81SBoris Pismenny 
tls_device_offload_cleanup_rx(struct sock * sk)12634799ac81SBoris Pismenny void tls_device_offload_cleanup_rx(struct sock *sk)
12644799ac81SBoris Pismenny {
12654799ac81SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
12664799ac81SBoris Pismenny 	struct net_device *netdev;
12674799ac81SBoris Pismenny 
12684799ac81SBoris Pismenny 	down_read(&device_offload_lock);
126994ce3b64SMaxim Mikityanskiy 	netdev = rcu_dereference_protected(tls_ctx->netdev,
127094ce3b64SMaxim Mikityanskiy 					   lockdep_is_held(&device_offload_lock));
12714799ac81SBoris Pismenny 	if (!netdev)
12724799ac81SBoris Pismenny 		goto out;
12734799ac81SBoris Pismenny 
12744799ac81SBoris Pismenny 	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
12754799ac81SBoris Pismenny 					TLS_OFFLOAD_CTX_DIR_RX);
12764799ac81SBoris Pismenny 
12774799ac81SBoris Pismenny 	if (tls_ctx->tx_conf != TLS_HW) {
12784799ac81SBoris Pismenny 		dev_put(netdev);
127994ce3b64SMaxim Mikityanskiy 		rcu_assign_pointer(tls_ctx->netdev, NULL);
1280025cc2fbSMaxim Mikityanskiy 	} else {
1281025cc2fbSMaxim Mikityanskiy 		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
12824799ac81SBoris Pismenny 	}
12834799ac81SBoris Pismenny out:
12844799ac81SBoris Pismenny 	up_read(&device_offload_lock);
12854799ac81SBoris Pismenny 	tls_sw_release_resources_rx(sk);
12864799ac81SBoris Pismenny }
12874799ac81SBoris Pismenny 
tls_device_down(struct net_device * netdev)1288e8f69799SIlya Lesokhin static int tls_device_down(struct net_device *netdev)
1289e8f69799SIlya Lesokhin {
1290e8f69799SIlya Lesokhin 	struct tls_context *ctx, *tmp;
1291e8f69799SIlya Lesokhin 	unsigned long flags;
1292e8f69799SIlya Lesokhin 	LIST_HEAD(list);
1293e8f69799SIlya Lesokhin 
1294e8f69799SIlya Lesokhin 	/* Request a write lock to block new offload attempts */
1295e8f69799SIlya Lesokhin 	down_write(&device_offload_lock);
1296e8f69799SIlya Lesokhin 
1297e8f69799SIlya Lesokhin 	spin_lock_irqsave(&tls_device_lock, flags);
1298e8f69799SIlya Lesokhin 	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
129994ce3b64SMaxim Mikityanskiy 		struct net_device *ctx_netdev =
130094ce3b64SMaxim Mikityanskiy 			rcu_dereference_protected(ctx->netdev,
130194ce3b64SMaxim Mikityanskiy 						  lockdep_is_held(&device_offload_lock));
130294ce3b64SMaxim Mikityanskiy 
130394ce3b64SMaxim Mikityanskiy 		if (ctx_netdev != netdev ||
1304e8f69799SIlya Lesokhin 		    !refcount_inc_not_zero(&ctx->refcount))
1305e8f69799SIlya Lesokhin 			continue;
1306e8f69799SIlya Lesokhin 
1307e8f69799SIlya Lesokhin 		list_move(&ctx->list, &list);
1308e8f69799SIlya Lesokhin 	}
1309e8f69799SIlya Lesokhin 	spin_unlock_irqrestore(&tls_device_lock, flags);
1310e8f69799SIlya Lesokhin 
1311e8f69799SIlya Lesokhin 	list_for_each_entry_safe(ctx, tmp, &list, list)	{
1312c55dcdd4SMaxim Mikityanskiy 		/* Stop offloaded TX and switch to the fallback.
1313ed3c9a2fSJakub Kicinski 		 * tls_is_skb_tx_device_offloaded will return false.
1314c55dcdd4SMaxim Mikityanskiy 		 */
1315c55dcdd4SMaxim Mikityanskiy 		WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1316c55dcdd4SMaxim Mikityanskiy 
1317c55dcdd4SMaxim Mikityanskiy 		/* Stop the RX and TX resync.
1318c55dcdd4SMaxim Mikityanskiy 		 * tls_dev_resync must not be called after tls_dev_del.
1319c55dcdd4SMaxim Mikityanskiy 		 */
132094ce3b64SMaxim Mikityanskiy 		rcu_assign_pointer(ctx->netdev, NULL);
1321c55dcdd4SMaxim Mikityanskiy 
1322c55dcdd4SMaxim Mikityanskiy 		/* Start skipping the RX resync logic completely. */
1323c55dcdd4SMaxim Mikityanskiy 		set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1324c55dcdd4SMaxim Mikityanskiy 
1325c55dcdd4SMaxim Mikityanskiy 		/* Sync with inflight packets. After this point:
1326c55dcdd4SMaxim Mikityanskiy 		 * TX: no non-encrypted packets will be passed to the driver.
1327c55dcdd4SMaxim Mikityanskiy 		 * RX: resync requests from the driver will be ignored.
1328c55dcdd4SMaxim Mikityanskiy 		 */
1329c55dcdd4SMaxim Mikityanskiy 		synchronize_net();
1330c55dcdd4SMaxim Mikityanskiy 
1331c55dcdd4SMaxim Mikityanskiy 		/* Release the offload context on the driver side. */
13324799ac81SBoris Pismenny 		if (ctx->tx_conf == TLS_HW)
1333e8f69799SIlya Lesokhin 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1334e8f69799SIlya Lesokhin 							TLS_OFFLOAD_CTX_DIR_TX);
1335025cc2fbSMaxim Mikityanskiy 		if (ctx->rx_conf == TLS_HW &&
1336025cc2fbSMaxim Mikityanskiy 		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
13374799ac81SBoris Pismenny 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
13384799ac81SBoris Pismenny 							TLS_OFFLOAD_CTX_DIR_RX);
1339e8f69799SIlya Lesokhin 
1340c55dcdd4SMaxim Mikityanskiy 		dev_put(netdev);
1341c55dcdd4SMaxim Mikityanskiy 
1342c55dcdd4SMaxim Mikityanskiy 		/* Move the context to a separate list for two reasons:
1343c55dcdd4SMaxim Mikityanskiy 		 * 1. When the context is deallocated, list_del is called.
1344c55dcdd4SMaxim Mikityanskiy 		 * 2. It's no longer an offloaded context, so we don't want to
1345c55dcdd4SMaxim Mikityanskiy 		 *    run offload-specific code on this context.
1346c55dcdd4SMaxim Mikityanskiy 		 */
1347c55dcdd4SMaxim Mikityanskiy 		spin_lock_irqsave(&tls_device_lock, flags);
1348c55dcdd4SMaxim Mikityanskiy 		list_move_tail(&ctx->list, &tls_device_down_list);
1349c55dcdd4SMaxim Mikityanskiy 		spin_unlock_irqrestore(&tls_device_lock, flags);
1350c55dcdd4SMaxim Mikityanskiy 
1351c55dcdd4SMaxim Mikityanskiy 		/* Device contexts for RX and TX will be freed in on sk_destruct
1352c55dcdd4SMaxim Mikityanskiy 		 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
13533740651bSMaxim Mikityanskiy 		 * Now release the ref taken above.
1354c55dcdd4SMaxim Mikityanskiy 		 */
1355f6336724SMaxim Mikityanskiy 		if (refcount_dec_and_test(&ctx->refcount)) {
1356f6336724SMaxim Mikityanskiy 			/* sk_destruct ran after tls_device_down took a ref, and
1357f6336724SMaxim Mikityanskiy 			 * it returned early. Complete the destruction here.
1358f6336724SMaxim Mikityanskiy 			 */
1359f6336724SMaxim Mikityanskiy 			list_del(&ctx->list);
13603740651bSMaxim Mikityanskiy 			tls_device_free_ctx(ctx);
1361e8f69799SIlya Lesokhin 		}
1362f6336724SMaxim Mikityanskiy 	}
1363e8f69799SIlya Lesokhin 
1364e8f69799SIlya Lesokhin 	up_write(&device_offload_lock);
1365e8f69799SIlya Lesokhin 
13667adc91e0STariq Toukan 	flush_workqueue(destruct_wq);
1367e8f69799SIlya Lesokhin 
1368e8f69799SIlya Lesokhin 	return NOTIFY_DONE;
1369e8f69799SIlya Lesokhin }
1370e8f69799SIlya Lesokhin 
tls_dev_event(struct notifier_block * this,unsigned long event,void * ptr)1371e8f69799SIlya Lesokhin static int tls_dev_event(struct notifier_block *this, unsigned long event,
1372e8f69799SIlya Lesokhin 			 void *ptr)
1373e8f69799SIlya Lesokhin {
1374e8f69799SIlya Lesokhin 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1375e8f69799SIlya Lesokhin 
1376c3f4a6c3SJakub Kicinski 	if (!dev->tlsdev_ops &&
1377c3f4a6c3SJakub Kicinski 	    !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1378e8f69799SIlya Lesokhin 		return NOTIFY_DONE;
1379e8f69799SIlya Lesokhin 
1380e8f69799SIlya Lesokhin 	switch (event) {
1381e8f69799SIlya Lesokhin 	case NETDEV_REGISTER:
1382e8f69799SIlya Lesokhin 	case NETDEV_FEAT_CHANGE:
13834e5a7332STariq Toukan 		if (netif_is_bond_master(dev))
13844e5a7332STariq Toukan 			return NOTIFY_DONE;
13854799ac81SBoris Pismenny 		if ((dev->features & NETIF_F_HW_TLS_RX) &&
1386eeb2efafSJakub Kicinski 		    !dev->tlsdev_ops->tls_dev_resync)
13874799ac81SBoris Pismenny 			return NOTIFY_BAD;
13884799ac81SBoris Pismenny 
1389e8f69799SIlya Lesokhin 		if  (dev->tlsdev_ops &&
1390e8f69799SIlya Lesokhin 		     dev->tlsdev_ops->tls_dev_add &&
1391e8f69799SIlya Lesokhin 		     dev->tlsdev_ops->tls_dev_del)
1392e8f69799SIlya Lesokhin 			return NOTIFY_DONE;
1393e8f69799SIlya Lesokhin 		else
1394e8f69799SIlya Lesokhin 			return NOTIFY_BAD;
1395e8f69799SIlya Lesokhin 	case NETDEV_DOWN:
1396e8f69799SIlya Lesokhin 		return tls_device_down(dev);
1397e8f69799SIlya Lesokhin 	}
1398e8f69799SIlya Lesokhin 	return NOTIFY_DONE;
1399e8f69799SIlya Lesokhin }
1400e8f69799SIlya Lesokhin 
1401e8f69799SIlya Lesokhin static struct notifier_block tls_dev_notifier = {
1402e8f69799SIlya Lesokhin 	.notifier_call	= tls_dev_event,
1403e8f69799SIlya Lesokhin };
1404e8f69799SIlya Lesokhin 
tls_device_init(void)14053d8c51b2STariq Toukan int __init tls_device_init(void)
1406e8f69799SIlya Lesokhin {
14077adc91e0STariq Toukan 	int err;
14087adc91e0STariq Toukan 
14096b47808fSJakub Kicinski 	dummy_page = alloc_page(GFP_KERNEL);
14106b47808fSJakub Kicinski 	if (!dummy_page)
14117adc91e0STariq Toukan 		return -ENOMEM;
14127adc91e0STariq Toukan 
14136b47808fSJakub Kicinski 	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
14146b47808fSJakub Kicinski 	if (!destruct_wq) {
14156b47808fSJakub Kicinski 		err = -ENOMEM;
14166b47808fSJakub Kicinski 		goto err_free_dummy;
14176b47808fSJakub Kicinski 	}
14186b47808fSJakub Kicinski 
14197adc91e0STariq Toukan 	err = register_netdevice_notifier(&tls_dev_notifier);
14207adc91e0STariq Toukan 	if (err)
14216b47808fSJakub Kicinski 		goto err_destroy_wq;
14227adc91e0STariq Toukan 
14236b47808fSJakub Kicinski 	return 0;
14246b47808fSJakub Kicinski 
14256b47808fSJakub Kicinski err_destroy_wq:
14266b47808fSJakub Kicinski 	destroy_workqueue(destruct_wq);
14276b47808fSJakub Kicinski err_free_dummy:
14286b47808fSJakub Kicinski 	put_page(dummy_page);
14297adc91e0STariq Toukan 	return err;
1430e8f69799SIlya Lesokhin }
1431e8f69799SIlya Lesokhin 
tls_device_cleanup(void)1432e8f69799SIlya Lesokhin void __exit tls_device_cleanup(void)
1433e8f69799SIlya Lesokhin {
1434e8f69799SIlya Lesokhin 	unregister_netdevice_notifier(&tls_dev_notifier);
14357adc91e0STariq Toukan 	destroy_workqueue(destruct_wq);
1436494bc1d2SJakub Kicinski 	clean_acked_data_flush();
14376b47808fSJakub Kicinski 	put_page(dummy_page);
1438e8f69799SIlya Lesokhin }
1439