xref: /linux/arch/arm64/crypto/polyval-ce-glue.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Glue code for POLYVAL using ARMv8 Crypto Extensions
4  *
5  * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
6  * Copyright (c) 2009 Intel Corp.
7  *   Author: Huang Ying <ying.huang@intel.com>
8  * Copyright 2021 Google LLC
9  */
10 
11 /*
12  * Glue code based on ghash-clmulni-intel_glue.c.
13  *
14  * This implementation of POLYVAL uses montgomery multiplication accelerated by
15  * ARMv8 Crypto Extensions instructions to implement the finite field operations.
16  */
17 
18 #include <crypto/algapi.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/internal/simd.h>
21 #include <crypto/polyval.h>
22 #include <linux/crypto.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/cpufeature.h>
27 #include <asm/neon.h>
28 #include <asm/simd.h>
29 
30 #define NUM_KEY_POWERS	8
31 
32 struct polyval_tfm_ctx {
33 	/*
34 	 * These powers must be in the order h^8, ..., h^1.
35 	 */
36 	u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE];
37 };
38 
39 struct polyval_desc_ctx {
40 	u8 buffer[POLYVAL_BLOCK_SIZE];
41 	u32 bytes;
42 };
43 
44 asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys,
45 	const u8 *in, size_t nblocks, u8 *accumulator);
46 asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2);
47 
48 static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
49 	const u8 *in, size_t nblocks, u8 *accumulator)
50 {
51 	if (likely(crypto_simd_usable())) {
52 		kernel_neon_begin();
53 		pmull_polyval_update(keys, in, nblocks, accumulator);
54 		kernel_neon_end();
55 	} else {
56 		polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in,
57 			nblocks, accumulator);
58 	}
59 }
60 
61 static void internal_polyval_mul(u8 *op1, const u8 *op2)
62 {
63 	if (likely(crypto_simd_usable())) {
64 		kernel_neon_begin();
65 		pmull_polyval_mul(op1, op2);
66 		kernel_neon_end();
67 	} else {
68 		polyval_mul_non4k(op1, op2);
69 	}
70 }
71 
72 static int polyval_arm64_setkey(struct crypto_shash *tfm,
73 			const u8 *key, unsigned int keylen)
74 {
75 	struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm);
76 	int i;
77 
78 	if (keylen != POLYVAL_BLOCK_SIZE)
79 		return -EINVAL;
80 
81 	memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE);
82 
83 	for (i = NUM_KEY_POWERS-2; i >= 0; i--) {
84 		memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE);
85 		internal_polyval_mul(tctx->key_powers[i],
86 				     tctx->key_powers[i+1]);
87 	}
88 
89 	return 0;
90 }
91 
92 static int polyval_arm64_init(struct shash_desc *desc)
93 {
94 	struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
95 
96 	memset(dctx, 0, sizeof(*dctx));
97 
98 	return 0;
99 }
100 
101 static int polyval_arm64_update(struct shash_desc *desc,
102 			 const u8 *src, unsigned int srclen)
103 {
104 	struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
105 	const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
106 	u8 *pos;
107 	unsigned int nblocks;
108 	unsigned int n;
109 
110 	if (dctx->bytes) {
111 		n = min(srclen, dctx->bytes);
112 		pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes;
113 
114 		dctx->bytes -= n;
115 		srclen -= n;
116 
117 		while (n--)
118 			*pos++ ^= *src++;
119 
120 		if (!dctx->bytes)
121 			internal_polyval_mul(dctx->buffer,
122 					    tctx->key_powers[NUM_KEY_POWERS-1]);
123 	}
124 
125 	while (srclen >= POLYVAL_BLOCK_SIZE) {
126 		/* allow rescheduling every 4K bytes */
127 		nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
128 		internal_polyval_update(tctx, src, nblocks, dctx->buffer);
129 		srclen -= nblocks * POLYVAL_BLOCK_SIZE;
130 		src += nblocks * POLYVAL_BLOCK_SIZE;
131 	}
132 
133 	if (srclen) {
134 		dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
135 		pos = dctx->buffer;
136 		while (srclen--)
137 			*pos++ ^= *src++;
138 	}
139 
140 	return 0;
141 }
142 
143 static int polyval_arm64_final(struct shash_desc *desc, u8 *dst)
144 {
145 	struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
146 	const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
147 
148 	if (dctx->bytes) {
149 		internal_polyval_mul(dctx->buffer,
150 				     tctx->key_powers[NUM_KEY_POWERS-1]);
151 	}
152 
153 	memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE);
154 
155 	return 0;
156 }
157 
158 static struct shash_alg polyval_alg = {
159 	.digestsize	= POLYVAL_DIGEST_SIZE,
160 	.init		= polyval_arm64_init,
161 	.update		= polyval_arm64_update,
162 	.final		= polyval_arm64_final,
163 	.setkey		= polyval_arm64_setkey,
164 	.descsize	= sizeof(struct polyval_desc_ctx),
165 	.base		= {
166 		.cra_name		= "polyval",
167 		.cra_driver_name	= "polyval-ce",
168 		.cra_priority		= 200,
169 		.cra_blocksize		= POLYVAL_BLOCK_SIZE,
170 		.cra_ctxsize		= sizeof(struct polyval_tfm_ctx),
171 		.cra_module		= THIS_MODULE,
172 	},
173 };
174 
175 static int __init polyval_ce_mod_init(void)
176 {
177 	return crypto_register_shash(&polyval_alg);
178 }
179 
180 static void __exit polyval_ce_mod_exit(void)
181 {
182 	crypto_unregister_shash(&polyval_alg);
183 }
184 
185 module_cpu_feature_match(PMULL, polyval_ce_mod_init)
186 module_exit(polyval_ce_mod_exit);
187 
188 MODULE_LICENSE("GPL");
189 MODULE_DESCRIPTION("POLYVAL hash function accelerated by ARMv8 Crypto Extensions");
190 MODULE_ALIAS_CRYPTO("polyval");
191 MODULE_ALIAS_CRYPTO("polyval-ce");
192