xref: /linux/lib/crypto/powerpc/aes.h (revision a4e573db06a4e8c519ec4c42f8e1249a0853367a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
4  * Copyright (C) 2015 International Business Machines Inc.
5  * Copyright 2026 Google LLC
6  */
7 #include <asm/simd.h>
8 #include <asm/switch_to.h>
9 #include <linux/cpufeature.h>
10 #include <linux/jump_label.h>
11 #include <linux/preempt.h>
12 #include <linux/uaccess.h>
13 
14 #ifdef CONFIG_SPE
15 
16 EXPORT_SYMBOL_GPL(ppc_expand_key_128);
17 EXPORT_SYMBOL_GPL(ppc_expand_key_192);
18 EXPORT_SYMBOL_GPL(ppc_expand_key_256);
19 EXPORT_SYMBOL_GPL(ppc_generate_decrypt_key);
20 EXPORT_SYMBOL_GPL(ppc_encrypt_ecb);
21 EXPORT_SYMBOL_GPL(ppc_decrypt_ecb);
22 EXPORT_SYMBOL_GPL(ppc_encrypt_cbc);
23 EXPORT_SYMBOL_GPL(ppc_decrypt_cbc);
24 EXPORT_SYMBOL_GPL(ppc_crypt_ctr);
25 EXPORT_SYMBOL_GPL(ppc_encrypt_xts);
26 EXPORT_SYMBOL_GPL(ppc_decrypt_xts);
27 
28 void ppc_encrypt_aes(u8 *out, const u8 *in, const u32 *key_enc, u32 rounds);
29 void ppc_decrypt_aes(u8 *out, const u8 *in, const u32 *key_dec, u32 rounds);
30 
31 static void spe_begin(void)
32 {
33 	/* disable preemption and save users SPE registers if required */
34 	preempt_disable();
35 	enable_kernel_spe();
36 }
37 
38 static void spe_end(void)
39 {
40 	disable_kernel_spe();
41 	/* reenable preemption */
42 	preempt_enable();
43 }
44 
45 static void aes_preparekey_arch(union aes_enckey_arch *k,
46 				union aes_invkey_arch *inv_k,
47 				const u8 *in_key, int key_len, int nrounds)
48 {
49 	if (key_len == AES_KEYSIZE_128)
50 		ppc_expand_key_128(k->spe_enc_key, in_key);
51 	else if (key_len == AES_KEYSIZE_192)
52 		ppc_expand_key_192(k->spe_enc_key, in_key);
53 	else
54 		ppc_expand_key_256(k->spe_enc_key, in_key);
55 
56 	if (inv_k)
57 		ppc_generate_decrypt_key(inv_k->spe_dec_key, k->spe_enc_key,
58 					 key_len);
59 }
60 
61 static void aes_encrypt_arch(const struct aes_enckey *key,
62 			     u8 out[AES_BLOCK_SIZE],
63 			     const u8 in[AES_BLOCK_SIZE])
64 {
65 	spe_begin();
66 	ppc_encrypt_aes(out, in, key->k.spe_enc_key, key->nrounds / 2 - 1);
67 	spe_end();
68 }
69 
70 static void aes_decrypt_arch(const struct aes_key *key,
71 			     u8 out[AES_BLOCK_SIZE],
72 			     const u8 in[AES_BLOCK_SIZE])
73 {
74 	spe_begin();
75 	ppc_decrypt_aes(out, in, key->inv_k.spe_dec_key, key->nrounds / 2 - 1);
76 	spe_end();
77 }
78 
79 #else /* CONFIG_SPE */
80 
81 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
82 
83 EXPORT_SYMBOL_GPL(aes_p8_set_encrypt_key);
84 EXPORT_SYMBOL_GPL(aes_p8_set_decrypt_key);
85 EXPORT_SYMBOL_GPL(aes_p8_encrypt);
86 EXPORT_SYMBOL_GPL(aes_p8_decrypt);
87 EXPORT_SYMBOL_GPL(aes_p8_cbc_encrypt);
88 EXPORT_SYMBOL_GPL(aes_p8_ctr32_encrypt_blocks);
89 EXPORT_SYMBOL_GPL(aes_p8_xts_encrypt);
90 EXPORT_SYMBOL_GPL(aes_p8_xts_decrypt);
91 
92 static inline bool is_vsx_format(const struct p8_aes_key *key)
93 {
94 	return key->nrounds != 0;
95 }
96 
97 /*
98  * Convert a round key from VSX to generic format by reflecting the 16 bytes,
99  * and (if apply_inv_mix=true) applying InvMixColumn to each column.
100  *
101  * It would be nice if the VSX and generic key formats would be compatible.  But
102  * that's very difficult to do, with the assembly code having been borrowed from
103  * OpenSSL and also targeted to POWER8 rather than POWER9.
104  *
105  * Fortunately, this conversion should only be needed in extremely rare cases,
106  * possibly not at all in practice.  It's just included for full correctness.
107  */
108 static void rndkey_from_vsx(u32 out[4], const u32 in[4], bool apply_inv_mix)
109 {
110 	u32 k0 = swab32(in[0]);
111 	u32 k1 = swab32(in[1]);
112 	u32 k2 = swab32(in[2]);
113 	u32 k3 = swab32(in[3]);
114 
115 	if (apply_inv_mix) {
116 		k0 = inv_mix_columns(k0);
117 		k1 = inv_mix_columns(k1);
118 		k2 = inv_mix_columns(k2);
119 		k3 = inv_mix_columns(k3);
120 	}
121 	out[0] = k3;
122 	out[1] = k2;
123 	out[2] = k1;
124 	out[3] = k0;
125 }
126 
127 static void aes_preparekey_arch(union aes_enckey_arch *k,
128 				union aes_invkey_arch *inv_k,
129 				const u8 *in_key, int key_len, int nrounds)
130 {
131 	const int keybits = 8 * key_len;
132 	int ret;
133 
134 	if (static_branch_likely(&have_vec_crypto) && likely(may_use_simd())) {
135 		preempt_disable();
136 		pagefault_disable();
137 		enable_kernel_vsx();
138 		ret = aes_p8_set_encrypt_key(in_key, keybits, &k->p8);
139 		/*
140 		 * aes_p8_set_encrypt_key() should never fail here, since the
141 		 * key length was already validated.
142 		 */
143 		WARN_ON_ONCE(ret);
144 		if (inv_k) {
145 			ret = aes_p8_set_decrypt_key(in_key, keybits,
146 						     &inv_k->p8);
147 			/* ... and likewise for aes_p8_set_decrypt_key(). */
148 			WARN_ON_ONCE(ret);
149 		}
150 		disable_kernel_vsx();
151 		pagefault_enable();
152 		preempt_enable();
153 	} else {
154 		aes_expandkey_generic(k->rndkeys,
155 				      inv_k ? inv_k->inv_rndkeys : NULL,
156 				      in_key, key_len);
157 		/* Mark the key as using the generic format. */
158 		k->p8.nrounds = 0;
159 		if (inv_k)
160 			inv_k->p8.nrounds = 0;
161 	}
162 }
163 
164 static void aes_encrypt_arch(const struct aes_enckey *key,
165 			     u8 out[AES_BLOCK_SIZE],
166 			     const u8 in[AES_BLOCK_SIZE])
167 {
168 	if (static_branch_likely(&have_vec_crypto) &&
169 	    likely(is_vsx_format(&key->k.p8) && may_use_simd())) {
170 		preempt_disable();
171 		pagefault_disable();
172 		enable_kernel_vsx();
173 		aes_p8_encrypt(in, out, &key->k.p8);
174 		disable_kernel_vsx();
175 		pagefault_enable();
176 		preempt_enable();
177 	} else if (unlikely(is_vsx_format(&key->k.p8))) {
178 		/*
179 		 * This handles (the hopefully extremely rare) case where a key
180 		 * was prepared using the VSX optimized format, then encryption
181 		 * is done in a context that cannot use VSX instructions.
182 		 */
183 		u32 rndkeys[AES_MAX_KEYLENGTH_U32];
184 
185 		for (int i = 0; i < 4 * (key->nrounds + 1); i += 4)
186 			rndkey_from_vsx(&rndkeys[i],
187 					&key->k.p8.rndkeys[i], false);
188 		aes_encrypt_generic(rndkeys, key->nrounds, out, in);
189 	} else {
190 		aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
191 	}
192 }
193 
194 static void aes_decrypt_arch(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
195 			     const u8 in[AES_BLOCK_SIZE])
196 {
197 	if (static_branch_likely(&have_vec_crypto) &&
198 	    likely(is_vsx_format(&key->inv_k.p8) && may_use_simd())) {
199 		preempt_disable();
200 		pagefault_disable();
201 		enable_kernel_vsx();
202 		aes_p8_decrypt(in, out, &key->inv_k.p8);
203 		disable_kernel_vsx();
204 		pagefault_enable();
205 		preempt_enable();
206 	} else if (unlikely(is_vsx_format(&key->inv_k.p8))) {
207 		/*
208 		 * This handles (the hopefully extremely rare) case where a key
209 		 * was prepared using the VSX optimized format, then decryption
210 		 * is done in a context that cannot use VSX instructions.
211 		 */
212 		u32 inv_rndkeys[AES_MAX_KEYLENGTH_U32];
213 		int i;
214 
215 		rndkey_from_vsx(&inv_rndkeys[0],
216 				&key->inv_k.p8.rndkeys[0], false);
217 		for (i = 4; i < 4 * key->nrounds; i += 4) {
218 			rndkey_from_vsx(&inv_rndkeys[i],
219 					&key->inv_k.p8.rndkeys[i], true);
220 		}
221 		rndkey_from_vsx(&inv_rndkeys[i],
222 				&key->inv_k.p8.rndkeys[i], false);
223 		aes_decrypt_generic(inv_rndkeys, key->nrounds, out, in);
224 	} else {
225 		aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds,
226 				    out, in);
227 	}
228 }
229 
230 #define aes_mod_init_arch aes_mod_init_arch
231 static void aes_mod_init_arch(void)
232 {
233 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
234 	    (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
235 		static_branch_enable(&have_vec_crypto);
236 }
237 
238 #endif /* !CONFIG_SPE */
239