xref: /linux/net/ipv4/tcp_sigpool.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
18c73b263SDmitry Safonov // SPDX-License-Identifier: GPL-2.0-or-later
28c73b263SDmitry Safonov 
38c73b263SDmitry Safonov #include <crypto/hash.h>
48c73b263SDmitry Safonov #include <linux/cpu.h>
58c73b263SDmitry Safonov #include <linux/kref.h>
68c73b263SDmitry Safonov #include <linux/module.h>
78c73b263SDmitry Safonov #include <linux/mutex.h>
88c73b263SDmitry Safonov #include <linux/percpu.h>
98c73b263SDmitry Safonov #include <linux/workqueue.h>
108c73b263SDmitry Safonov #include <net/tcp.h>
118c73b263SDmitry Safonov 
128c73b263SDmitry Safonov static size_t __scratch_size;
13*585aa621SSebastian Andrzej Siewior struct sigpool_scratch {
14*585aa621SSebastian Andrzej Siewior 	local_lock_t bh_lock;
15*585aa621SSebastian Andrzej Siewior 	void __rcu *pad;
16*585aa621SSebastian Andrzej Siewior };
17*585aa621SSebastian Andrzej Siewior 
18*585aa621SSebastian Andrzej Siewior static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
19*585aa621SSebastian Andrzej Siewior 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
20*585aa621SSebastian Andrzej Siewior };
218c73b263SDmitry Safonov 
228c73b263SDmitry Safonov struct sigpool_entry {
238c73b263SDmitry Safonov 	struct crypto_ahash	*hash;
248c73b263SDmitry Safonov 	const char		*alg;
258c73b263SDmitry Safonov 	struct kref		kref;
268c73b263SDmitry Safonov 	uint16_t		needs_key:1,
278c73b263SDmitry Safonov 				reserved:15;
288c73b263SDmitry Safonov };
298c73b263SDmitry Safonov 
308c73b263SDmitry Safonov #define CPOOL_SIZE (PAGE_SIZE / sizeof(struct sigpool_entry))
318c73b263SDmitry Safonov static struct sigpool_entry cpool[CPOOL_SIZE];
328c73b263SDmitry Safonov static unsigned int cpool_populated;
338c73b263SDmitry Safonov static DEFINE_MUTEX(cpool_mutex);
348c73b263SDmitry Safonov 
358c73b263SDmitry Safonov /* Slow-path */
368c73b263SDmitry Safonov struct scratches_to_free {
378c73b263SDmitry Safonov 	struct rcu_head rcu;
388c73b263SDmitry Safonov 	unsigned int cnt;
398c73b263SDmitry Safonov 	void *scratches[];
408c73b263SDmitry Safonov };
418c73b263SDmitry Safonov 
free_old_scratches(struct rcu_head * head)428c73b263SDmitry Safonov static void free_old_scratches(struct rcu_head *head)
438c73b263SDmitry Safonov {
448c73b263SDmitry Safonov 	struct scratches_to_free *stf;
458c73b263SDmitry Safonov 
468c73b263SDmitry Safonov 	stf = container_of(head, struct scratches_to_free, rcu);
478c73b263SDmitry Safonov 	while (stf->cnt--)
488c73b263SDmitry Safonov 		kfree(stf->scratches[stf->cnt]);
498c73b263SDmitry Safonov 	kfree(stf);
508c73b263SDmitry Safonov }
518c73b263SDmitry Safonov 
528c73b263SDmitry Safonov /**
538c73b263SDmitry Safonov  * sigpool_reserve_scratch - re-allocates scratch buffer, slow-path
548c73b263SDmitry Safonov  * @size: request size for the scratch/temp buffer
558c73b263SDmitry Safonov  */
sigpool_reserve_scratch(size_t size)568c73b263SDmitry Safonov static int sigpool_reserve_scratch(size_t size)
578c73b263SDmitry Safonov {
588c73b263SDmitry Safonov 	struct scratches_to_free *stf;
598c73b263SDmitry Safonov 	size_t stf_sz = struct_size(stf, scratches, num_possible_cpus());
608c73b263SDmitry Safonov 	int cpu, err = 0;
618c73b263SDmitry Safonov 
628c73b263SDmitry Safonov 	lockdep_assert_held(&cpool_mutex);
638c73b263SDmitry Safonov 	if (__scratch_size >= size)
648c73b263SDmitry Safonov 		return 0;
658c73b263SDmitry Safonov 
668c73b263SDmitry Safonov 	stf = kmalloc(stf_sz, GFP_KERNEL);
678c73b263SDmitry Safonov 	if (!stf)
688c73b263SDmitry Safonov 		return -ENOMEM;
698c73b263SDmitry Safonov 	stf->cnt = 0;
708c73b263SDmitry Safonov 
718c73b263SDmitry Safonov 	size = max(size, __scratch_size);
728c73b263SDmitry Safonov 	cpus_read_lock();
738c73b263SDmitry Safonov 	for_each_possible_cpu(cpu) {
748c73b263SDmitry Safonov 		void *scratch, *old_scratch;
758c73b263SDmitry Safonov 
768c73b263SDmitry Safonov 		scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
778c73b263SDmitry Safonov 		if (!scratch) {
788c73b263SDmitry Safonov 			err = -ENOMEM;
798c73b263SDmitry Safonov 			break;
808c73b263SDmitry Safonov 		}
818c73b263SDmitry Safonov 
82*585aa621SSebastian Andrzej Siewior 		old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
838c73b263SDmitry Safonov 					scratch, lockdep_is_held(&cpool_mutex));
848c73b263SDmitry Safonov 		if (!cpu_online(cpu) || !old_scratch) {
858c73b263SDmitry Safonov 			kfree(old_scratch);
868c73b263SDmitry Safonov 			continue;
878c73b263SDmitry Safonov 		}
888c73b263SDmitry Safonov 		stf->scratches[stf->cnt++] = old_scratch;
898c73b263SDmitry Safonov 	}
908c73b263SDmitry Safonov 	cpus_read_unlock();
918c73b263SDmitry Safonov 	if (!err)
928c73b263SDmitry Safonov 		__scratch_size = size;
938c73b263SDmitry Safonov 
948c73b263SDmitry Safonov 	call_rcu(&stf->rcu, free_old_scratches);
958c73b263SDmitry Safonov 	return err;
968c73b263SDmitry Safonov }
978c73b263SDmitry Safonov 
sigpool_scratch_free(void)988c73b263SDmitry Safonov static void sigpool_scratch_free(void)
998c73b263SDmitry Safonov {
1008c73b263SDmitry Safonov 	int cpu;
1018c73b263SDmitry Safonov 
1028c73b263SDmitry Safonov 	for_each_possible_cpu(cpu)
103*585aa621SSebastian Andrzej Siewior 		kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
1048c73b263SDmitry Safonov 					  NULL, lockdep_is_held(&cpool_mutex)));
1058c73b263SDmitry Safonov 	__scratch_size = 0;
1068c73b263SDmitry Safonov }
1078c73b263SDmitry Safonov 
__cpool_try_clone(struct crypto_ahash * hash)1088c73b263SDmitry Safonov static int __cpool_try_clone(struct crypto_ahash *hash)
1098c73b263SDmitry Safonov {
1108c73b263SDmitry Safonov 	struct crypto_ahash *tmp;
1118c73b263SDmitry Safonov 
1128c73b263SDmitry Safonov 	tmp = crypto_clone_ahash(hash);
1138c73b263SDmitry Safonov 	if (IS_ERR(tmp))
1148c73b263SDmitry Safonov 		return PTR_ERR(tmp);
1158c73b263SDmitry Safonov 
1168c73b263SDmitry Safonov 	crypto_free_ahash(tmp);
1178c73b263SDmitry Safonov 	return 0;
1188c73b263SDmitry Safonov }
1198c73b263SDmitry Safonov 
__cpool_alloc_ahash(struct sigpool_entry * e,const char * alg)1208c73b263SDmitry Safonov static int __cpool_alloc_ahash(struct sigpool_entry *e, const char *alg)
1218c73b263SDmitry Safonov {
1228c73b263SDmitry Safonov 	struct crypto_ahash *cpu0_hash;
1238c73b263SDmitry Safonov 	int ret;
1248c73b263SDmitry Safonov 
1258c73b263SDmitry Safonov 	e->alg = kstrdup(alg, GFP_KERNEL);
1268c73b263SDmitry Safonov 	if (!e->alg)
1278c73b263SDmitry Safonov 		return -ENOMEM;
1288c73b263SDmitry Safonov 
1298c73b263SDmitry Safonov 	cpu0_hash = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
1308c73b263SDmitry Safonov 	if (IS_ERR(cpu0_hash)) {
1318c73b263SDmitry Safonov 		ret = PTR_ERR(cpu0_hash);
1328c73b263SDmitry Safonov 		goto out_free_alg;
1338c73b263SDmitry Safonov 	}
1348c73b263SDmitry Safonov 
1358c73b263SDmitry Safonov 	e->needs_key = crypto_ahash_get_flags(cpu0_hash) & CRYPTO_TFM_NEED_KEY;
1368c73b263SDmitry Safonov 
1378c73b263SDmitry Safonov 	ret = __cpool_try_clone(cpu0_hash);
1388c73b263SDmitry Safonov 	if (ret)
1398c73b263SDmitry Safonov 		goto out_free_cpu0_hash;
1408c73b263SDmitry Safonov 	e->hash = cpu0_hash;
1418c73b263SDmitry Safonov 	kref_init(&e->kref);
1428c73b263SDmitry Safonov 	return 0;
1438c73b263SDmitry Safonov 
1448c73b263SDmitry Safonov out_free_cpu0_hash:
1458c73b263SDmitry Safonov 	crypto_free_ahash(cpu0_hash);
1468c73b263SDmitry Safonov out_free_alg:
1478c73b263SDmitry Safonov 	kfree(e->alg);
1488c73b263SDmitry Safonov 	e->alg = NULL;
1498c73b263SDmitry Safonov 	return ret;
1508c73b263SDmitry Safonov }
1518c73b263SDmitry Safonov 
1528c73b263SDmitry Safonov /**
1538c73b263SDmitry Safonov  * tcp_sigpool_alloc_ahash - allocates pool for ahash requests
1548c73b263SDmitry Safonov  * @alg: name of async hash algorithm
1558c73b263SDmitry Safonov  * @scratch_size: reserve a tcp_sigpool::scratch buffer of this size
1568c73b263SDmitry Safonov  */
tcp_sigpool_alloc_ahash(const char * alg,size_t scratch_size)1578c73b263SDmitry Safonov int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size)
1588c73b263SDmitry Safonov {
1598c73b263SDmitry Safonov 	int i, ret;
1608c73b263SDmitry Safonov 
1618c73b263SDmitry Safonov 	/* slow-path */
1628c73b263SDmitry Safonov 	mutex_lock(&cpool_mutex);
1638c73b263SDmitry Safonov 	ret = sigpool_reserve_scratch(scratch_size);
1648c73b263SDmitry Safonov 	if (ret)
1658c73b263SDmitry Safonov 		goto out;
1668c73b263SDmitry Safonov 	for (i = 0; i < cpool_populated; i++) {
1678c73b263SDmitry Safonov 		if (!cpool[i].alg)
1688c73b263SDmitry Safonov 			continue;
1698c73b263SDmitry Safonov 		if (strcmp(cpool[i].alg, alg))
1708c73b263SDmitry Safonov 			continue;
1718c73b263SDmitry Safonov 
172b901a4e2SDmitry Safonov 		/* pairs with tcp_sigpool_release() */
173b901a4e2SDmitry Safonov 		if (!kref_get_unless_zero(&cpool[i].kref))
1748c73b263SDmitry Safonov 			kref_init(&cpool[i].kref);
1758c73b263SDmitry Safonov 		ret = i;
1768c73b263SDmitry Safonov 		goto out;
1778c73b263SDmitry Safonov 	}
1788c73b263SDmitry Safonov 
1798c73b263SDmitry Safonov 	for (i = 0; i < cpool_populated; i++) {
1808c73b263SDmitry Safonov 		if (!cpool[i].alg)
1818c73b263SDmitry Safonov 			break;
1828c73b263SDmitry Safonov 	}
1838c73b263SDmitry Safonov 	if (i >= CPOOL_SIZE) {
1848c73b263SDmitry Safonov 		ret = -ENOSPC;
1858c73b263SDmitry Safonov 		goto out;
1868c73b263SDmitry Safonov 	}
1878c73b263SDmitry Safonov 
1888c73b263SDmitry Safonov 	ret = __cpool_alloc_ahash(&cpool[i], alg);
1898c73b263SDmitry Safonov 	if (!ret) {
1908c73b263SDmitry Safonov 		ret = i;
1918c73b263SDmitry Safonov 		if (i == cpool_populated)
1928c73b263SDmitry Safonov 			cpool_populated++;
1938c73b263SDmitry Safonov 	}
1948c73b263SDmitry Safonov out:
1958c73b263SDmitry Safonov 	mutex_unlock(&cpool_mutex);
1968c73b263SDmitry Safonov 	return ret;
1978c73b263SDmitry Safonov }
1988c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_alloc_ahash);
1998c73b263SDmitry Safonov 
__cpool_free_entry(struct sigpool_entry * e)2008c73b263SDmitry Safonov static void __cpool_free_entry(struct sigpool_entry *e)
2018c73b263SDmitry Safonov {
2028c73b263SDmitry Safonov 	crypto_free_ahash(e->hash);
2038c73b263SDmitry Safonov 	kfree(e->alg);
2048c73b263SDmitry Safonov 	memset(e, 0, sizeof(*e));
2058c73b263SDmitry Safonov }
2068c73b263SDmitry Safonov 
cpool_cleanup_work_cb(struct work_struct * work)2078c73b263SDmitry Safonov static void cpool_cleanup_work_cb(struct work_struct *work)
2088c73b263SDmitry Safonov {
2098c73b263SDmitry Safonov 	bool free_scratch = true;
2108c73b263SDmitry Safonov 	unsigned int i;
2118c73b263SDmitry Safonov 
2128c73b263SDmitry Safonov 	mutex_lock(&cpool_mutex);
2138c73b263SDmitry Safonov 	for (i = 0; i < cpool_populated; i++) {
2148c73b263SDmitry Safonov 		if (kref_read(&cpool[i].kref) > 0) {
2158c73b263SDmitry Safonov 			free_scratch = false;
2168c73b263SDmitry Safonov 			continue;
2178c73b263SDmitry Safonov 		}
2188c73b263SDmitry Safonov 		if (!cpool[i].alg)
2198c73b263SDmitry Safonov 			continue;
2208c73b263SDmitry Safonov 		__cpool_free_entry(&cpool[i]);
2218c73b263SDmitry Safonov 	}
2228c73b263SDmitry Safonov 	if (free_scratch)
2238c73b263SDmitry Safonov 		sigpool_scratch_free();
2248c73b263SDmitry Safonov 	mutex_unlock(&cpool_mutex);
2258c73b263SDmitry Safonov }
2268c73b263SDmitry Safonov 
2278c73b263SDmitry Safonov static DECLARE_WORK(cpool_cleanup_work, cpool_cleanup_work_cb);
cpool_schedule_cleanup(struct kref * kref)2288c73b263SDmitry Safonov static void cpool_schedule_cleanup(struct kref *kref)
2298c73b263SDmitry Safonov {
2308c73b263SDmitry Safonov 	schedule_work(&cpool_cleanup_work);
2318c73b263SDmitry Safonov }
2328c73b263SDmitry Safonov 
2338c73b263SDmitry Safonov /**
2348c73b263SDmitry Safonov  * tcp_sigpool_release - decreases number of users for a pool. If it was
2358c73b263SDmitry Safonov  * the last user of the pool, releases any memory that was consumed.
2368c73b263SDmitry Safonov  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
2378c73b263SDmitry Safonov  */
tcp_sigpool_release(unsigned int id)2388c73b263SDmitry Safonov void tcp_sigpool_release(unsigned int id)
2398c73b263SDmitry Safonov {
24074da7792SDan Carpenter 	if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
2418c73b263SDmitry Safonov 		return;
2428c73b263SDmitry Safonov 
2438c73b263SDmitry Safonov 	/* slow-path */
2448c73b263SDmitry Safonov 	kref_put(&cpool[id].kref, cpool_schedule_cleanup);
2458c73b263SDmitry Safonov }
2468c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_release);
2478c73b263SDmitry Safonov 
2488c73b263SDmitry Safonov /**
2498c73b263SDmitry Safonov  * tcp_sigpool_get - increases number of users (refcounter) for a pool
2508c73b263SDmitry Safonov  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
2518c73b263SDmitry Safonov  */
tcp_sigpool_get(unsigned int id)2528c73b263SDmitry Safonov void tcp_sigpool_get(unsigned int id)
2538c73b263SDmitry Safonov {
25474da7792SDan Carpenter 	if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
2558c73b263SDmitry Safonov 		return;
2568c73b263SDmitry Safonov 	kref_get(&cpool[id].kref);
2578c73b263SDmitry Safonov }
2588c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_get);
2598c73b263SDmitry Safonov 
tcp_sigpool_start(unsigned int id,struct tcp_sigpool * c)2608c73b263SDmitry Safonov int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RCU_BH)
2618c73b263SDmitry Safonov {
2628c73b263SDmitry Safonov 	struct crypto_ahash *hash;
2638c73b263SDmitry Safonov 
2648c73b263SDmitry Safonov 	rcu_read_lock_bh();
26574da7792SDan Carpenter 	if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) {
2668c73b263SDmitry Safonov 		rcu_read_unlock_bh();
2678c73b263SDmitry Safonov 		return -EINVAL;
2688c73b263SDmitry Safonov 	}
2698c73b263SDmitry Safonov 
2708c73b263SDmitry Safonov 	hash = crypto_clone_ahash(cpool[id].hash);
2718c73b263SDmitry Safonov 	if (IS_ERR(hash)) {
2728c73b263SDmitry Safonov 		rcu_read_unlock_bh();
2738c73b263SDmitry Safonov 		return PTR_ERR(hash);
2748c73b263SDmitry Safonov 	}
2758c73b263SDmitry Safonov 
2768c73b263SDmitry Safonov 	c->req = ahash_request_alloc(hash, GFP_ATOMIC);
2778c73b263SDmitry Safonov 	if (!c->req) {
2788c73b263SDmitry Safonov 		crypto_free_ahash(hash);
2798c73b263SDmitry Safonov 		rcu_read_unlock_bh();
2808c73b263SDmitry Safonov 		return -ENOMEM;
2818c73b263SDmitry Safonov 	}
2828c73b263SDmitry Safonov 	ahash_request_set_callback(c->req, 0, NULL, NULL);
2838c73b263SDmitry Safonov 
2848c73b263SDmitry Safonov 	/* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
2858c73b263SDmitry Safonov 	 * valid (allocated) until tcp_sigpool_end().
2868c73b263SDmitry Safonov 	 */
287*585aa621SSebastian Andrzej Siewior 	local_lock_nested_bh(&sigpool_scratch.bh_lock);
288*585aa621SSebastian Andrzej Siewior 	c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
2898c73b263SDmitry Safonov 	return 0;
2908c73b263SDmitry Safonov }
2918c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_start);
2928c73b263SDmitry Safonov 
tcp_sigpool_end(struct tcp_sigpool * c)2938c73b263SDmitry Safonov void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
2948c73b263SDmitry Safonov {
2958c73b263SDmitry Safonov 	struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
2968c73b263SDmitry Safonov 
297*585aa621SSebastian Andrzej Siewior 	local_unlock_nested_bh(&sigpool_scratch.bh_lock);
2988c73b263SDmitry Safonov 	rcu_read_unlock_bh();
2998c73b263SDmitry Safonov 	ahash_request_free(c->req);
3008c73b263SDmitry Safonov 	crypto_free_ahash(hash);
3018c73b263SDmitry Safonov }
3028c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_end);
3038c73b263SDmitry Safonov 
3048c73b263SDmitry Safonov /**
3058c73b263SDmitry Safonov  * tcp_sigpool_algo - return algorithm of tcp_sigpool
3068c73b263SDmitry Safonov  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
3078c73b263SDmitry Safonov  * @buf: buffer to return name of algorithm
3088c73b263SDmitry Safonov  * @buf_len: size of @buf
3098c73b263SDmitry Safonov  */
tcp_sigpool_algo(unsigned int id,char * buf,size_t buf_len)3108c73b263SDmitry Safonov size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len)
3118c73b263SDmitry Safonov {
31274da7792SDan Carpenter 	if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
3138c73b263SDmitry Safonov 		return -EINVAL;
3148c73b263SDmitry Safonov 
3158c73b263SDmitry Safonov 	return strscpy(buf, cpool[id].alg, buf_len);
3168c73b263SDmitry Safonov }
3178c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_sigpool_algo);
3188c73b263SDmitry Safonov 
3198c73b263SDmitry Safonov /**
3208c73b263SDmitry Safonov  * tcp_sigpool_hash_skb_data - hash data in skb with initialized tcp_sigpool
3218c73b263SDmitry Safonov  * @hp: tcp_sigpool pointer
3228c73b263SDmitry Safonov  * @skb: buffer to add sign for
3238c73b263SDmitry Safonov  * @header_len: TCP header length for this segment
3248c73b263SDmitry Safonov  */
tcp_sigpool_hash_skb_data(struct tcp_sigpool * hp,const struct sk_buff * skb,unsigned int header_len)3258c73b263SDmitry Safonov int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
3268c73b263SDmitry Safonov 			      const struct sk_buff *skb,
3278c73b263SDmitry Safonov 			      unsigned int header_len)
3288c73b263SDmitry Safonov {
3298c73b263SDmitry Safonov 	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3308c73b263SDmitry Safonov 					   skb_headlen(skb) - header_len : 0;
3318c73b263SDmitry Safonov 	const struct skb_shared_info *shi = skb_shinfo(skb);
3328c73b263SDmitry Safonov 	const struct tcphdr *tp = tcp_hdr(skb);
3338c73b263SDmitry Safonov 	struct ahash_request *req = hp->req;
3348c73b263SDmitry Safonov 	struct sk_buff *frag_iter;
3358c73b263SDmitry Safonov 	struct scatterlist sg;
3368c73b263SDmitry Safonov 	unsigned int i;
3378c73b263SDmitry Safonov 
3388c73b263SDmitry Safonov 	sg_init_table(&sg, 1);
3398c73b263SDmitry Safonov 
3408c73b263SDmitry Safonov 	sg_set_buf(&sg, ((u8 *)tp) + header_len, head_data_len);
3418c73b263SDmitry Safonov 	ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3428c73b263SDmitry Safonov 	if (crypto_ahash_update(req))
3438c73b263SDmitry Safonov 		return 1;
3448c73b263SDmitry Safonov 
3458c73b263SDmitry Safonov 	for (i = 0; i < shi->nr_frags; ++i) {
3468c73b263SDmitry Safonov 		const skb_frag_t *f = &shi->frags[i];
3478c73b263SDmitry Safonov 		unsigned int offset = skb_frag_off(f);
3488c73b263SDmitry Safonov 		struct page *page;
3498c73b263SDmitry Safonov 
3508c73b263SDmitry Safonov 		page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3518c73b263SDmitry Safonov 		sg_set_page(&sg, page, skb_frag_size(f), offset_in_page(offset));
3528c73b263SDmitry Safonov 		ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3538c73b263SDmitry Safonov 		if (crypto_ahash_update(req))
3548c73b263SDmitry Safonov 			return 1;
3558c73b263SDmitry Safonov 	}
3568c73b263SDmitry Safonov 
3578c73b263SDmitry Safonov 	skb_walk_frags(skb, frag_iter)
3588c73b263SDmitry Safonov 		if (tcp_sigpool_hash_skb_data(hp, frag_iter, 0))
3598c73b263SDmitry Safonov 			return 1;
3608c73b263SDmitry Safonov 
3618c73b263SDmitry Safonov 	return 0;
3628c73b263SDmitry Safonov }
3638c73b263SDmitry Safonov EXPORT_SYMBOL(tcp_sigpool_hash_skb_data);
3648c73b263SDmitry Safonov 
3658c73b263SDmitry Safonov MODULE_LICENSE("GPL");
3668c73b263SDmitry Safonov MODULE_DESCRIPTION("Per-CPU pool of crypto requests");
367