xref: /linux/drivers/crypto/bcm/spu2.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * Copyright 2016 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation (the "GPL").
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License version 2 (GPLv2) for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * version 2 (GPLv2) along with this source code.
15  */
16 
17 /*
18  * This file works with the SPU2 version of the SPU. SPU2 has different message
19  * formats than the previous version of the SPU. All SPU message format
20  * differences should be hidden in the spux.c,h files.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 
26 #include "util.h"
27 #include "spu.h"
28 #include "spu2.h"
29 
30 #define SPU2_TX_STATUS_LEN  0	/* SPU2 has no STATUS in input packet */
31 
32 /*
33  * Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0
34  * register. Defaults to 2.
35  */
36 #define SPU2_RX_STATUS_LEN  2
37 
38 enum spu2_proto_sel {
39 	SPU2_PROTO_RESV = 0,
40 	SPU2_MACSEC_SECTAG8_ECB = 1,
41 	SPU2_MACSEC_SECTAG8_SCB = 2,
42 	SPU2_MACSEC_SECTAG16 = 3,
43 	SPU2_MACSEC_SECTAG16_8_XPN = 4,
44 	SPU2_IPSEC = 5,
45 	SPU2_IPSEC_ESN = 6,
46 	SPU2_TLS_CIPHER = 7,
47 	SPU2_TLS_AEAD = 8,
48 	SPU2_DTLS_CIPHER = 9,
49 	SPU2_DTLS_AEAD = 10
50 };
51 
52 char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
53 	"DES", "3DES"
54 };
55 
56 char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
57 	"CCM", "GCM"
58 };
59 
60 char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
61 	"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
62 	"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
63 	"SHA3-384", "SHA3-512"
64 };
65 
66 char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
67 	"Rabin", "CCM", "GCM", "Reserved"
68 };
69 
70 static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type)
71 {
72 	if (cipher_type >= SPU2_CIPHER_TYPE_LAST)
73 		return "Reserved";
74 	return spu2_cipher_type_names[cipher_type];
75 }
76 
77 static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode)
78 {
79 	if (cipher_mode >= SPU2_CIPHER_MODE_LAST)
80 		return "Reserved";
81 	return spu2_cipher_mode_names[cipher_mode];
82 }
83 
84 static char *spu2_hash_type_name(enum spu2_hash_type hash_type)
85 {
86 	if (hash_type >= SPU2_HASH_TYPE_LAST)
87 		return "Reserved";
88 	return spu2_hash_type_names[hash_type];
89 }
90 
91 static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode)
92 {
93 	if (hash_mode >= SPU2_HASH_MODE_LAST)
94 		return "Reserved";
95 	return spu2_hash_mode_names[hash_mode];
96 }
97 
98 /*
99  * Convert from a software cipher mode value to the corresponding value
100  * for SPU2.
101  */
102 static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode,
103 				  enum spu2_cipher_mode *spu2_mode)
104 {
105 	switch (cipher_mode) {
106 	case CIPHER_MODE_ECB:
107 		*spu2_mode = SPU2_CIPHER_MODE_ECB;
108 		break;
109 	case CIPHER_MODE_CBC:
110 		*spu2_mode = SPU2_CIPHER_MODE_CBC;
111 		break;
112 	case CIPHER_MODE_OFB:
113 		*spu2_mode = SPU2_CIPHER_MODE_OFB;
114 		break;
115 	case CIPHER_MODE_CFB:
116 		*spu2_mode = SPU2_CIPHER_MODE_CFB;
117 		break;
118 	case CIPHER_MODE_CTR:
119 		*spu2_mode = SPU2_CIPHER_MODE_CTR;
120 		break;
121 	case CIPHER_MODE_CCM:
122 		*spu2_mode = SPU2_CIPHER_MODE_CCM;
123 		break;
124 	case CIPHER_MODE_GCM:
125 		*spu2_mode = SPU2_CIPHER_MODE_GCM;
126 		break;
127 	case CIPHER_MODE_XTS:
128 		*spu2_mode = SPU2_CIPHER_MODE_XTS;
129 		break;
130 	default:
131 		return -EINVAL;
132 	}
133 	return 0;
134 }
135 
136 /**
137  * spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2
138  * cipher type and mode.
139  * @cipher_alg:  [in]  cipher algorithm value from software enumeration
140  * @cipher_mode: [in]  cipher mode value from software enumeration
141  * @cipher_type: [in]  cipher type value from software enumeration
142  * @spu2_type:   [out] cipher type value used by spu2 hardware
143  * @spu2_mode:   [out] cipher mode value used by spu2 hardware
144  *
145  * Return:  0 if successful
146  */
147 static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg,
148 			     enum spu_cipher_mode cipher_mode,
149 			     enum spu_cipher_type cipher_type,
150 			     enum spu2_cipher_type *spu2_type,
151 			     enum spu2_cipher_mode *spu2_mode)
152 {
153 	int err;
154 
155 	err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode);
156 	if (err) {
157 		flow_log("Invalid cipher mode %d\n", cipher_mode);
158 		return err;
159 	}
160 
161 	switch (cipher_alg) {
162 	case CIPHER_ALG_NONE:
163 		*spu2_type = SPU2_CIPHER_TYPE_NONE;
164 		break;
165 	case CIPHER_ALG_RC4:
166 		/* SPU2 does not support RC4 */
167 		err = -EINVAL;
168 		*spu2_type = SPU2_CIPHER_TYPE_NONE;
169 		break;
170 	case CIPHER_ALG_DES:
171 		*spu2_type = SPU2_CIPHER_TYPE_DES;
172 		break;
173 	case CIPHER_ALG_3DES:
174 		*spu2_type = SPU2_CIPHER_TYPE_3DES;
175 		break;
176 	case CIPHER_ALG_AES:
177 		switch (cipher_type) {
178 		case CIPHER_TYPE_AES128:
179 			*spu2_type = SPU2_CIPHER_TYPE_AES128;
180 			break;
181 		case CIPHER_TYPE_AES192:
182 			*spu2_type = SPU2_CIPHER_TYPE_AES192;
183 			break;
184 		case CIPHER_TYPE_AES256:
185 			*spu2_type = SPU2_CIPHER_TYPE_AES256;
186 			break;
187 		default:
188 			err = -EINVAL;
189 		}
190 		break;
191 	case CIPHER_ALG_LAST:
192 	default:
193 		err = -EINVAL;
194 		break;
195 	}
196 
197 	if (err)
198 		flow_log("Invalid cipher alg %d or type %d\n",
199 			 cipher_alg, cipher_type);
200 	return err;
201 }
202 
203 /*
204  * Convert from a software hash mode value to the corresponding value
205  * for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value.
206  */
207 static int spu2_hash_mode_xlate(enum hash_mode hash_mode,
208 				enum spu2_hash_mode *spu2_mode)
209 {
210 	switch (hash_mode) {
211 	case HASH_MODE_XCBC:
212 		*spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
213 		break;
214 	case HASH_MODE_CMAC:
215 		*spu2_mode = SPU2_HASH_MODE_CMAC;
216 		break;
217 	case HASH_MODE_HMAC:
218 		*spu2_mode = SPU2_HASH_MODE_HMAC;
219 		break;
220 	case HASH_MODE_CCM:
221 		*spu2_mode = SPU2_HASH_MODE_CCM;
222 		break;
223 	case HASH_MODE_GCM:
224 		*spu2_mode = SPU2_HASH_MODE_GCM;
225 		break;
226 	default:
227 		return -EINVAL;
228 	}
229 	return 0;
230 }
231 
232 /**
233  * spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type
234  * and mode.
235  * @hash_alg:  [in] hash algorithm value from software enumeration
236  * @hash_mode: [in] hash mode value from software enumeration
237  * @hash_type: [in] hash type value from software enumeration
238  * @ciph_type: [in] cipher type value from software enumeration
239  * @spu2_type: [out] hash type value used by SPU2 hardware
240  * @spu2_mode: [out] hash mode value used by SPU2 hardware
241  *
242  * Return:  0 if successful
243  */
244 static int
245 spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
246 		enum hash_type hash_type, enum spu_cipher_type ciph_type,
247 		enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode)
248 {
249 	int err;
250 
251 	err = spu2_hash_mode_xlate(hash_mode, spu2_mode);
252 	if (err) {
253 		flow_log("Invalid hash mode %d\n", hash_mode);
254 		return err;
255 	}
256 
257 	switch (hash_alg) {
258 	case HASH_ALG_NONE:
259 		*spu2_type = SPU2_HASH_TYPE_NONE;
260 		break;
261 	case HASH_ALG_MD5:
262 		*spu2_type = SPU2_HASH_TYPE_MD5;
263 		break;
264 	case HASH_ALG_SHA1:
265 		*spu2_type = SPU2_HASH_TYPE_SHA1;
266 		break;
267 	case HASH_ALG_SHA224:
268 		*spu2_type = SPU2_HASH_TYPE_SHA224;
269 		break;
270 	case HASH_ALG_SHA256:
271 		*spu2_type = SPU2_HASH_TYPE_SHA256;
272 		break;
273 	case HASH_ALG_SHA384:
274 		*spu2_type = SPU2_HASH_TYPE_SHA384;
275 		break;
276 	case HASH_ALG_SHA512:
277 		*spu2_type = SPU2_HASH_TYPE_SHA512;
278 		break;
279 	case HASH_ALG_AES:
280 		switch (ciph_type) {
281 		case CIPHER_TYPE_AES128:
282 			*spu2_type = SPU2_HASH_TYPE_AES128;
283 			break;
284 		case CIPHER_TYPE_AES192:
285 			*spu2_type = SPU2_HASH_TYPE_AES192;
286 			break;
287 		case CIPHER_TYPE_AES256:
288 			*spu2_type = SPU2_HASH_TYPE_AES256;
289 			break;
290 		default:
291 			err = -EINVAL;
292 		}
293 		break;
294 	case HASH_ALG_SHA3_224:
295 		*spu2_type = SPU2_HASH_TYPE_SHA3_224;
296 		break;
297 	case HASH_ALG_SHA3_256:
298 		*spu2_type = SPU2_HASH_TYPE_SHA3_256;
299 		break;
300 	case HASH_ALG_SHA3_384:
301 		*spu2_type = SPU2_HASH_TYPE_SHA3_384;
302 		break;
303 	case HASH_ALG_SHA3_512:
304 		*spu2_type = SPU2_HASH_TYPE_SHA3_512;
305 		break;
306 	case HASH_ALG_LAST:
307 	default:
308 		err = -EINVAL;
309 		break;
310 	}
311 
312 	if (err)
313 		flow_log("Invalid hash alg %d or type %d\n",
314 			 hash_alg, hash_type);
315 	return err;
316 }
317 
318 /* Dump FMD ctrl0. The ctrl0 input is in host byte order */
319 static void spu2_dump_fmd_ctrl0(u64 ctrl0)
320 {
321 	enum spu2_cipher_type ciph_type;
322 	enum spu2_cipher_mode ciph_mode;
323 	enum spu2_hash_type hash_type;
324 	enum spu2_hash_mode hash_mode;
325 	char *ciph_name;
326 	char *ciph_mode_name;
327 	char *hash_name;
328 	char *hash_mode_name;
329 	u8 cfb;
330 	u8 proto;
331 
332 	packet_log(" FMD CTRL0 %#16llx\n", ctrl0);
333 	if (ctrl0 & SPU2_CIPH_ENCRYPT_EN)
334 		packet_log("  encrypt\n");
335 	else
336 		packet_log("  decrypt\n");
337 
338 	ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT;
339 	ciph_name = spu2_ciph_type_name(ciph_type);
340 	packet_log("  Cipher type: %s\n", ciph_name);
341 
342 	if (ciph_type != SPU2_CIPHER_TYPE_NONE) {
343 		ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT;
344 		ciph_mode_name = spu2_ciph_mode_name(ciph_mode);
345 		packet_log("  Cipher mode: %s\n", ciph_mode_name);
346 	}
347 
348 	cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT;
349 	packet_log("  CFB %#x\n", cfb);
350 
351 	proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT;
352 	packet_log("  protocol %#x\n", proto);
353 
354 	if (ctrl0 & SPU2_HASH_FIRST)
355 		packet_log("  hash first\n");
356 	else
357 		packet_log("  cipher first\n");
358 
359 	if (ctrl0 & SPU2_CHK_TAG)
360 		packet_log("  check tag\n");
361 
362 	hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT;
363 	hash_name = spu2_hash_type_name(hash_type);
364 	packet_log("  Hash type: %s\n", hash_name);
365 
366 	if (hash_type != SPU2_HASH_TYPE_NONE) {
367 		hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT;
368 		hash_mode_name = spu2_hash_mode_name(hash_mode);
369 		packet_log("  Hash mode: %s\n", hash_mode_name);
370 	}
371 
372 	if (ctrl0 & SPU2_CIPH_PAD_EN) {
373 		packet_log("  Cipher pad: %#2llx\n",
374 			   (ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT);
375 	}
376 }
377 
378 /* Dump FMD ctrl1. The ctrl1 input is in host byte order */
379 static void spu2_dump_fmd_ctrl1(u64 ctrl1)
380 {
381 	u8 hash_key_len;
382 	u8 ciph_key_len;
383 	u8 ret_iv_len;
384 	u8 iv_offset;
385 	u8 iv_len;
386 	u8 hash_tag_len;
387 	u8 ret_md;
388 
389 	packet_log(" FMD CTRL1 %#16llx\n", ctrl1);
390 	if (ctrl1 & SPU2_TAG_LOC)
391 		packet_log("  Tag after payload\n");
392 
393 	packet_log("  Msg includes ");
394 	if (ctrl1 & SPU2_HAS_FR_DATA)
395 		packet_log("FD ");
396 	if (ctrl1 & SPU2_HAS_AAD1)
397 		packet_log("AAD1 ");
398 	if (ctrl1 & SPU2_HAS_NAAD)
399 		packet_log("NAAD ");
400 	if (ctrl1 & SPU2_HAS_AAD2)
401 		packet_log("AAD2 ");
402 	if (ctrl1 & SPU2_HAS_ESN)
403 		packet_log("ESN ");
404 	packet_log("\n");
405 
406 	hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
407 	packet_log("  Hash key len %u\n", hash_key_len);
408 
409 	ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
410 	packet_log("  Cipher key len %u\n", ciph_key_len);
411 
412 	if (ctrl1 & SPU2_GENIV)
413 		packet_log("  Generate IV\n");
414 
415 	if (ctrl1 & SPU2_HASH_IV)
416 		packet_log("  IV included in hash\n");
417 
418 	if (ctrl1 & SPU2_RET_IV)
419 		packet_log("  Return IV in output before payload\n");
420 
421 	ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT;
422 	packet_log("  Length of returned IV %u bytes\n",
423 		   ret_iv_len ? ret_iv_len : 16);
424 
425 	iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT;
426 	packet_log("  IV offset %u\n", iv_offset);
427 
428 	iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
429 	packet_log("  Input IV len %u bytes\n", iv_len);
430 
431 	hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT;
432 	packet_log("  Hash tag length %u bytes\n", hash_tag_len);
433 
434 	packet_log("  Return ");
435 	ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT;
436 	if (ret_md)
437 		packet_log("FMD ");
438 	if (ret_md == SPU2_RET_FMD_OMD)
439 		packet_log("OMD ");
440 	else if (ret_md == SPU2_RET_FMD_OMD_IV)
441 		packet_log("OMD IV ");
442 	if (ctrl1 & SPU2_RETURN_FD)
443 		packet_log("FD ");
444 	if (ctrl1 & SPU2_RETURN_AAD1)
445 		packet_log("AAD1 ");
446 	if (ctrl1 & SPU2_RETURN_NAAD)
447 		packet_log("NAAD ");
448 	if (ctrl1 & SPU2_RETURN_AAD2)
449 		packet_log("AAD2 ");
450 	if (ctrl1 & SPU2_RETURN_PAY)
451 		packet_log("Payload");
452 	packet_log("\n");
453 }
454 
455 /* Dump FMD ctrl2. The ctrl2 input is in host byte order */
456 static void spu2_dump_fmd_ctrl2(u64 ctrl2)
457 {
458 	packet_log(" FMD CTRL2 %#16llx\n", ctrl2);
459 
460 	packet_log("  AAD1 offset %llu length %llu bytes\n",
461 		   ctrl2 & SPU2_AAD1_OFFSET,
462 		   (ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT);
463 	packet_log("  AAD2 offset %llu\n",
464 		   (ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT);
465 	packet_log("  Payload offset %llu\n",
466 		   (ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT);
467 }
468 
469 /* Dump FMD ctrl3. The ctrl3 input is in host byte order */
470 static void spu2_dump_fmd_ctrl3(u64 ctrl3)
471 {
472 	packet_log(" FMD CTRL3 %#16llx\n", ctrl3);
473 
474 	packet_log("  Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN);
475 	packet_log("  TLS length %llu bytes\n",
476 		   (ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT);
477 }
478 
479 static void spu2_dump_fmd(struct SPU2_FMD *fmd)
480 {
481 	spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0));
482 	spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1));
483 	spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2));
484 	spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3));
485 }
486 
487 static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
488 			  u16 hash_iv_len, u16 ciph_iv_len)
489 {
490 	u8 *ptr = omd;
491 
492 	packet_log(" OMD:\n");
493 
494 	if (hash_key_len) {
495 		packet_log("  Hash Key Length %u bytes\n", hash_key_len);
496 		packet_dump("  KEY: ", ptr, hash_key_len);
497 		ptr += hash_key_len;
498 	}
499 
500 	if (ciph_key_len) {
501 		packet_log("  Cipher Key Length %u bytes\n", ciph_key_len);
502 		packet_dump("  KEY: ", ptr, ciph_key_len);
503 		ptr += ciph_key_len;
504 	}
505 
506 	if (hash_iv_len) {
507 		packet_log("  Hash IV Length %u bytes\n", hash_iv_len);
508 		packet_dump("  hash IV: ", ptr, hash_iv_len);
509 		ptr += ciph_key_len;
510 	}
511 
512 	if (ciph_iv_len) {
513 		packet_log("  Cipher IV Length %u bytes\n", ciph_iv_len);
514 		packet_dump("  cipher IV: ", ptr, ciph_iv_len);
515 	}
516 }
517 
518 /* Dump a SPU2 header for debug */
519 void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
520 {
521 	struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf;
522 	u8 *omd;
523 	u64 ctrl1;
524 	u16 hash_key_len;
525 	u16 ciph_key_len;
526 	u16 hash_iv_len;
527 	u16 ciph_iv_len;
528 	u16 omd_len;
529 
530 	packet_log("\n");
531 	packet_log("SPU2 message header %p len: %u\n", buf, buf_len);
532 
533 	spu2_dump_fmd(fmd);
534 	omd = (u8 *)(fmd + 1);
535 
536 	ctrl1 = le64_to_cpu(fmd->ctrl1);
537 	hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
538 	ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
539 	hash_iv_len = 0;
540 	ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
541 	spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len,
542 		      ciph_iv_len);
543 
544 	/* Double check sanity */
545 	omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len;
546 	if (FMD_SIZE + omd_len != buf_len) {
547 		packet_log
548 		    (" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n",
549 		     buf_len, FMD_SIZE + omd_len);
550 	}
551 	packet_log("\n");
552 }
553 
554 /**
555  * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
556  * subsequent ablkcipher requests for this context.
557  * @spu2_cipher_type:  Cipher algorithm
558  * @spu2_mode:         Cipher mode
559  * @cipher_key_len:    Length of cipher key, in bytes
560  * @cipher_iv_len:     Length of cipher initialization vector, in bytes
561  *
562  * Return:  0 (success)
563  */
564 static int spu2_fmd_init(struct SPU2_FMD *fmd,
565 			 enum spu2_cipher_type spu2_type,
566 			 enum spu2_cipher_mode spu2_mode,
567 			 u32 cipher_key_len, u32 cipher_iv_len)
568 {
569 	u64 ctrl0;
570 	u64 ctrl1;
571 	u64 ctrl2;
572 	u64 ctrl3;
573 	u32 aad1_offset;
574 	u32 aad2_offset;
575 	u16 aad1_len = 0;
576 	u64 payload_offset;
577 
578 	ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) |
579 	    (spu2_mode << SPU2_CIPH_MODE_SHIFT);
580 
581 	ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) |
582 	    ((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) |
583 	    ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY;
584 
585 	/*
586 	 * AAD1 offset is from start of FD. FD length is always 0 for this
587 	 * driver. So AAD1_offset is always 0.
588 	 */
589 	aad1_offset = 0;
590 	aad2_offset = aad1_offset;
591 	payload_offset = 0;
592 	ctrl2 = aad1_offset |
593 	    (aad1_len << SPU2_AAD1_LEN_SHIFT) |
594 	    (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
595 	    (payload_offset << SPU2_PL_OFFSET_SHIFT);
596 
597 	ctrl3 = 0;
598 
599 	fmd->ctrl0 = cpu_to_le64(ctrl0);
600 	fmd->ctrl1 = cpu_to_le64(ctrl1);
601 	fmd->ctrl2 = cpu_to_le64(ctrl2);
602 	fmd->ctrl3 = cpu_to_le64(ctrl3);
603 
604 	return 0;
605 }
606 
607 /**
608  * spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of
609  * SPU request packet.
610  * @fmd:            Start of FMD field to be written
611  * @is_inbound:     true if decrypting. false if encrypting.
612  * @authFirst:      true if alg authenticates before encrypting
613  * @protocol:       protocol selector
614  * @cipher_type:    cipher algorithm
615  * @cipher_mode:    cipher mode
616  * @auth_type:      authentication type
617  * @auth_mode:      authentication mode
618  */
619 static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
620 				 bool is_inbound, bool auth_first,
621 				 enum spu2_proto_sel protocol,
622 				 enum spu2_cipher_type cipher_type,
623 				 enum spu2_cipher_mode cipher_mode,
624 				 enum spu2_hash_type auth_type,
625 				 enum spu2_hash_mode auth_mode)
626 {
627 	u64 ctrl0 = 0;
628 
629 	if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound)
630 		ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
631 
632 	ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
633 	    ((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT);
634 
635 	if (protocol)
636 		ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT;
637 
638 	if (auth_first)
639 		ctrl0 |= SPU2_HASH_FIRST;
640 
641 	if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE))
642 		ctrl0 |= SPU2_CHK_TAG;
643 
644 	ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) |
645 		  ((u64)auth_mode << SPU2_HASH_MODE_SHIFT));
646 
647 	fmd->ctrl0 = cpu_to_le64(ctrl0);
648 }
649 
650 /**
651  * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
652  * SPU request packet.
653  * @fmd:            Start of FMD field to be written
654  * @assoc_size:     Length of additional associated data, in bytes
655  * @auth_key_len:   Length of authentication key, in bytes
656  * @cipher_key_len: Length of cipher key, in bytes
657  * @gen_iv:         If true, hw generates IV and returns in response
658  * @hash_iv:        IV participates in hash. Used for IPSEC and TLS.
659  * @return_iv:      Return IV in output packet before payload
660  * @ret_iv_len:     Length of IV returned from SPU, in bytes
661  * @ret_iv_offset:  Offset into full IV of start of returned IV
662  * @cipher_iv_len:  Length of input cipher IV, in bytes
663  * @digest_size:    Length of digest (aka, hash tag or ICV), in bytes
664  * @return_payload: Return payload in SPU response
665  * @return_md : return metadata in SPU response
666  *
667  * Packet can have AAD2 w/o AAD1. For algorithms currently supported,
668  * associated data goes in AAD2.
669  */
670 static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound,
671 				 u64 assoc_size,
672 				 u64 auth_key_len, u64 cipher_key_len,
673 				 bool gen_iv, bool hash_iv, bool return_iv,
674 				 u64 ret_iv_len, u64 ret_iv_offset,
675 				 u64 cipher_iv_len, u64 digest_size,
676 				 bool return_payload, bool return_md)
677 {
678 	u64 ctrl1 = 0;
679 
680 	if (is_inbound && digest_size)
681 		ctrl1 |= SPU2_TAG_LOC;
682 
683 	if (assoc_size) {
684 		ctrl1 |= SPU2_HAS_AAD2;
685 		ctrl1 |= SPU2_RETURN_AAD2;  /* need aad2 for gcm aes esp */
686 	}
687 
688 	if (auth_key_len)
689 		ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
690 			  SPU2_HASH_KEY_LEN);
691 
692 	if (cipher_key_len)
693 		ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
694 			  SPU2_CIPH_KEY_LEN);
695 
696 	if (gen_iv)
697 		ctrl1 |= SPU2_GENIV;
698 
699 	if (hash_iv)
700 		ctrl1 |= SPU2_HASH_IV;
701 
702 	if (return_iv) {
703 		ctrl1 |= SPU2_RET_IV;
704 		ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
705 		ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
706 	}
707 
708 	ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
709 
710 	if (digest_size)
711 		ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
712 			  SPU2_HASH_TAG_LEN);
713 
714 	/* Let's ask for the output pkt to include FMD, but don't need to
715 	 * get keys and IVs back in OMD.
716 	 */
717 	if (return_md)
718 		ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
719 	else
720 		ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
721 
722 	/* Crypto API does not get assoc data back. So no need for AAD2. */
723 
724 	if (return_payload)
725 		ctrl1 |= SPU2_RETURN_PAY;
726 
727 	fmd->ctrl1 = cpu_to_le64(ctrl1);
728 }
729 
730 /**
731  * spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of
732  * SPU2 header.
733  * @fmd:            Start of FMD field to be written
734  * @cipher_offset:  Number of bytes from Start of Packet (end of FD field) where
735  *                  data to be encrypted or decrypted begins
736  * @auth_key_len:   Length of authentication key, in bytes
737  * @auth_iv_len:    Length of authentication initialization vector, in bytes
738  * @cipher_key_len: Length of cipher key, in bytes
739  * @cipher_iv_len:  Length of cipher IV, in bytes
740  */
741 static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset,
742 				 u64 auth_key_len, u64 auth_iv_len,
743 				 u64 cipher_key_len, u64 cipher_iv_len)
744 {
745 	u64 ctrl2;
746 	u64 aad1_offset;
747 	u64 aad2_offset;
748 	u16 aad1_len = 0;
749 	u64 payload_offset;
750 
751 	/* AAD1 offset is from start of FD. FD length always 0. */
752 	aad1_offset = 0;
753 
754 	aad2_offset = aad1_offset;
755 	payload_offset = cipher_offset;
756 	ctrl2 = aad1_offset |
757 	    (aad1_len << SPU2_AAD1_LEN_SHIFT) |
758 	    (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
759 	    (payload_offset << SPU2_PL_OFFSET_SHIFT);
760 
761 	fmd->ctrl2 = cpu_to_le64(ctrl2);
762 }
763 
764 /**
765  * spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD
766  * @fmd:          Fixed meta data. First field in SPU2 msg header.
767  * @payload_len:  Length of payload, in bytes
768  */
769 static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len)
770 {
771 	u64 ctrl3;
772 
773 	ctrl3 = payload_len & SPU2_PL_LEN;
774 
775 	fmd->ctrl3 = cpu_to_le64(ctrl3);
776 }
777 
778 /**
779  * spu2_ctx_max_payload() - Determine the maximum length of the payload for a
780  * SPU message for a given cipher and hash alg context.
781  * @cipher_alg:		The cipher algorithm
782  * @cipher_mode:	The cipher mode
783  * @blocksize:		The size of a block of data for this algo
784  *
785  * For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of
786  * FMD and just keeps computing until it receives a DMA descriptor with the EOF
787  * flag set. So we consider the max payload to be infinite. AES CCM is an
788  * exception.
789  *
790  * Return: Max payload length in bytes
791  */
792 u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
793 			 enum spu_cipher_mode cipher_mode,
794 			 unsigned int blocksize)
795 {
796 	if ((cipher_alg == CIPHER_ALG_AES) &&
797 	    (cipher_mode == CIPHER_MODE_CCM)) {
798 		u32 excess = SPU2_MAX_PAYLOAD % blocksize;
799 
800 		return SPU2_MAX_PAYLOAD - excess;
801 	} else {
802 		return SPU_MAX_PAYLOAD_INF;
803 	}
804 }
805 
806 /**
807  * spu_payload_length() -  Given a SPU2 message header, extract the payload
808  * length.
809  * @spu_hdr:  Start of SPU message header (FMD)
810  *
811  * Return: payload length, in bytes
812  */
813 u32 spu2_payload_length(u8 *spu_hdr)
814 {
815 	struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr;
816 	u32 pl_len;
817 	u64 ctrl3;
818 
819 	ctrl3 = le64_to_cpu(fmd->ctrl3);
820 	pl_len = ctrl3 & SPU2_PL_LEN;
821 
822 	return pl_len;
823 }
824 
825 /**
826  * spu_response_hdr_len() - Determine the expected length of a SPU response
827  * header.
828  * @auth_key_len:  Length of authentication key, in bytes
829  * @enc_key_len:   Length of encryption key, in bytes
830  *
831  * For SPU2, includes just FMD. OMD is never requested.
832  *
833  * Return: Length of FMD, in bytes
834  */
835 u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
836 {
837 	return FMD_SIZE;
838 }
839 
840 /**
841  * spu_hash_pad_len() - Calculate the length of hash padding required to extend
842  * data to a full block size.
843  * @hash_alg:        hash algorithm
844  * @hash_mode:       hash mode
845  * @chunksize:       length of data, in bytes
846  * @hash_block_size: size of a hash block, in bytes
847  *
848  * SPU2 hardware does all hash padding
849  *
850  * Return:  length of hash pad in bytes
851  */
852 u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
853 		      u32 chunksize, u16 hash_block_size)
854 {
855 	return 0;
856 }
857 
858 /**
859  * spu2_gcm_ccm_padlen() -  Determine the length of GCM/CCM padding for either
860  * the AAD field or the data.
861  *
862  * Return:  0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
863  */
864 u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
865 			 unsigned int data_size)
866 {
867 	return 0;
868 }
869 
870 /**
871  * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
872  * associated data in a SPU2 output packet.
873  * @cipher_mode:   cipher mode
874  * @assoc_len:     length of additional associated data, in bytes
875  * @iv_len:        length of initialization vector, in bytes
876  * @is_encrypt:    true if encrypting. false if decrypt.
877  *
878  * Return: Length of buffer to catch associated data in response
879  */
880 u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
881 			unsigned int assoc_len, unsigned int iv_len,
882 			bool is_encrypt)
883 {
884 	u32 resp_len = assoc_len;
885 
886 	if (is_encrypt)
887 		/* gcm aes esp has to write 8-byte IV in response */
888 		resp_len += iv_len;
889 	return resp_len;
890 }
891 
892 /*
893  * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
894  * in a SPU request after the AAD and before the payload.
895  * @cipher_mode:  cipher mode
896  * @iv_ctr_len:   initialization vector length in bytes
897  *
898  * For SPU2, AEAD IV is included in OMD and does not need to be repeated
899  * prior to the payload.
900  *
901  * Return: Length of AEAD IV in bytes
902  */
903 u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
904 {
905 	return 0;
906 }
907 
908 /**
909  * spu2_hash_type() - Determine the type of hash operation.
910  * @src_sent:  The number of bytes in the current request that have already
911  *             been sent to the SPU to be hashed.
912  *
913  * SPU2 always does a FULL hash operation
914  */
915 enum hash_type spu2_hash_type(u32 src_sent)
916 {
917 	return HASH_TYPE_FULL;
918 }
919 
920 /**
921  * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
922  * return.
923  * alg_digest_size: Number of bytes in the final digest for the given algo
924  * alg:             The hash algorithm
925  * htype:           Type of hash operation (init, update, full, etc)
926  *
927  */
928 u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
929 		     enum hash_type htype)
930 {
931 	return alg_digest_size;
932 }
933 
934 /**
935  * spu_create_request() - Build a SPU2 request message header, includint FMD and
936  * OMD.
937  * @spu_hdr: Start of buffer where SPU request header is to be written
938  * @req_opts: SPU request message options
939  * @cipher_parms: Parameters related to cipher algorithm
940  * @hash_parms:   Parameters related to hash algorithm
941  * @aead_parms:   Parameters related to AEAD operation
942  * @data_size:    Length of data to be encrypted or authenticated. If AEAD, does
943  *		  not include length of AAD.
944  *
945  * Construct the message starting at spu_hdr. Caller should allocate this buffer
946  * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
947  *
948  * Return: the length of the SPU header in bytes. 0 if an error occurs.
949  */
950 u32 spu2_create_request(u8 *spu_hdr,
951 			struct spu_request_opts *req_opts,
952 			struct spu_cipher_parms *cipher_parms,
953 			struct spu_hash_parms *hash_parms,
954 			struct spu_aead_parms *aead_parms,
955 			unsigned int data_size)
956 {
957 	struct SPU2_FMD *fmd;
958 	u8 *ptr;
959 	unsigned int buf_len;
960 	int err;
961 	enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
962 	enum spu2_cipher_mode spu2_ciph_mode;
963 	enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
964 	enum spu2_hash_mode spu2_auth_mode;
965 	bool return_md = true;
966 	enum spu2_proto_sel proto = SPU2_PROTO_RESV;
967 
968 	/* size of the payload */
969 	unsigned int payload_len =
970 	    hash_parms->prebuf_len + data_size + hash_parms->pad_len -
971 	    ((req_opts->is_aead && req_opts->is_inbound) ?
972 	     hash_parms->digestsize : 0);
973 
974 	/* offset of prebuf or data from start of AAD2 */
975 	unsigned int cipher_offset = aead_parms->assoc_size +
976 			aead_parms->aad_pad_len + aead_parms->iv_len;
977 
978 #ifdef DEBUG
979 	/* total size of the data following OMD (without STAT word padding) */
980 	unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
981 						 aead_parms->iv_len,
982 						 hash_parms->prebuf_len,
983 						 data_size,
984 						 aead_parms->aad_pad_len,
985 						 aead_parms->data_pad_len,
986 						 hash_parms->pad_len);
987 #endif
988 	unsigned int assoc_size = aead_parms->assoc_size;
989 
990 	if (req_opts->is_aead &&
991 	    (cipher_parms->alg == CIPHER_ALG_AES) &&
992 	    (cipher_parms->mode == CIPHER_MODE_GCM))
993 		/*
994 		 * On SPU 2, aes gcm cipher first on encrypt, auth first on
995 		 * decrypt
996 		 */
997 		req_opts->auth_first = req_opts->is_inbound;
998 
999 	/* and do opposite for ccm (auth 1st on encrypt) */
1000 	if (req_opts->is_aead &&
1001 	    (cipher_parms->alg == CIPHER_ALG_AES) &&
1002 	    (cipher_parms->mode == CIPHER_MODE_CCM))
1003 		req_opts->auth_first = !req_opts->is_inbound;
1004 
1005 	flow_log("%s()\n", __func__);
1006 	flow_log("  in:%u authFirst:%u\n",
1007 		 req_opts->is_inbound, req_opts->auth_first);
1008 	flow_log("  cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
1009 		 cipher_parms->mode, cipher_parms->type);
1010 	flow_log("  is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
1011 	flow_log("    key: %d\n", cipher_parms->key_len);
1012 	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
1013 	flow_log("    iv: %d\n", cipher_parms->iv_len);
1014 	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
1015 	flow_log("  auth alg:%u mode:%u type %u\n",
1016 		 hash_parms->alg, hash_parms->mode, hash_parms->type);
1017 	flow_log("  digestsize: %u\n", hash_parms->digestsize);
1018 	flow_log("  authkey: %d\n", hash_parms->key_len);
1019 	flow_dump("  authkey: ", hash_parms->key_buf, hash_parms->key_len);
1020 	flow_log("  assoc_size:%u\n", assoc_size);
1021 	flow_log("  prebuf_len:%u\n", hash_parms->prebuf_len);
1022 	flow_log("  data_size:%u\n", data_size);
1023 	flow_log("  hash_pad_len:%u\n", hash_parms->pad_len);
1024 	flow_log("  real_db_size:%u\n", real_db_size);
1025 	flow_log("  cipher_offset:%u payload_len:%u\n",
1026 		 cipher_offset, payload_len);
1027 	flow_log("  aead_iv: %u\n", aead_parms->iv_len);
1028 
1029 	/* Convert to spu2 values for cipher alg, hash alg */
1030 	err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
1031 				cipher_parms->type,
1032 				&spu2_ciph_type, &spu2_ciph_mode);
1033 
1034 	/* If we are doing GCM hashing only - either via rfc4543 transform
1035 	 * or because we happen to do GCM with AAD only and no payload - we
1036 	 * need to configure hardware to use hash key rather than cipher key
1037 	 * and put data into payload.  This is because unlike SPU-M, running
1038 	 * GCM cipher with 0 size payload is not permitted.
1039 	 */
1040 	if ((req_opts->is_rfc4543) ||
1041 	    ((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) &&
1042 	    (payload_len == 0))) {
1043 		/* Use hashing (only) and set up hash key */
1044 		spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
1045 		hash_parms->key_len = cipher_parms->key_len;
1046 		memcpy(hash_parms->key_buf, cipher_parms->key_buf,
1047 		       cipher_parms->key_len);
1048 		cipher_parms->key_len = 0;
1049 
1050 		if (req_opts->is_rfc4543)
1051 			payload_len += assoc_size;
1052 		else
1053 			payload_len = assoc_size;
1054 		cipher_offset = 0;
1055 		assoc_size = 0;
1056 	}
1057 
1058 	if (err)
1059 		return 0;
1060 
1061 	flow_log("spu2 cipher type %s, cipher mode %s\n",
1062 		 spu2_ciph_type_name(spu2_ciph_type),
1063 		 spu2_ciph_mode_name(spu2_ciph_mode));
1064 
1065 	err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode,
1066 			      hash_parms->type,
1067 			      cipher_parms->type,
1068 			      &spu2_auth_type, &spu2_auth_mode);
1069 	if (err)
1070 		return 0;
1071 
1072 	flow_log("spu2 hash type %s, hash mode %s\n",
1073 		 spu2_hash_type_name(spu2_auth_type),
1074 		 spu2_hash_mode_name(spu2_auth_mode));
1075 
1076 	fmd = (struct SPU2_FMD *)spu_hdr;
1077 
1078 	spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first,
1079 			     proto, spu2_ciph_type, spu2_ciph_mode,
1080 			     spu2_auth_type, spu2_auth_mode);
1081 
1082 	spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size,
1083 			     hash_parms->key_len, cipher_parms->key_len,
1084 			     false, false,
1085 			     aead_parms->return_iv, aead_parms->ret_iv_len,
1086 			     aead_parms->ret_iv_off,
1087 			     cipher_parms->iv_len, hash_parms->digestsize,
1088 			     !req_opts->bd_suppress, return_md);
1089 
1090 	spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0,
1091 			     cipher_parms->key_len, cipher_parms->iv_len);
1092 
1093 	spu2_fmd_ctrl3_write(fmd, payload_len);
1094 
1095 	ptr = (u8 *)(fmd + 1);
1096 	buf_len = sizeof(struct SPU2_FMD);
1097 
1098 	/* Write OMD */
1099 	if (hash_parms->key_len) {
1100 		memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
1101 		ptr += hash_parms->key_len;
1102 		buf_len += hash_parms->key_len;
1103 	}
1104 	if (cipher_parms->key_len) {
1105 		memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len);
1106 		ptr += cipher_parms->key_len;
1107 		buf_len += cipher_parms->key_len;
1108 	}
1109 	if (cipher_parms->iv_len) {
1110 		memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
1111 		ptr += cipher_parms->iv_len;
1112 		buf_len += cipher_parms->iv_len;
1113 	}
1114 
1115 	packet_dump("  SPU request header: ", spu_hdr, buf_len);
1116 
1117 	return buf_len;
1118 }
1119 
1120 /**
1121  * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
1122  * including FMD and OMD.
1123  * @spu_hdr:       Location of start of SPU request (FMD field)
1124  * @cipher_parms:  Parameters describing cipher request
1125  *
1126  * Called at setkey time to initialize a msg header that can be reused for all
1127  * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
1128  * Caller should allocate this buffer in DMA-able memory at least
1129  * SPU_HEADER_ALLOC_LEN bytes long.
1130  *
1131  * Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an
1132  * error occurs.
1133  */
1134 u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
1135 {
1136 	struct SPU2_FMD *fmd;
1137 	u8 *omd;
1138 	enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE;
1139 	enum spu2_cipher_mode spu2_mode;
1140 	int err;
1141 
1142 	flow_log("%s()\n", __func__);
1143 	flow_log("  cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
1144 		 cipher_parms->mode, cipher_parms->type);
1145 	flow_log("  cipher_iv_len: %u\n", cipher_parms->iv_len);
1146 	flow_log("    key: %d\n", cipher_parms->key_len);
1147 	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
1148 
1149 	/* Convert to spu2 values */
1150 	err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
1151 				cipher_parms->type, &spu2_type, &spu2_mode);
1152 	if (err)
1153 		return 0;
1154 
1155 	flow_log("spu2 cipher type %s, cipher mode %s\n",
1156 		 spu2_ciph_type_name(spu2_type),
1157 		 spu2_ciph_mode_name(spu2_mode));
1158 
1159 	/* Construct the FMD header */
1160 	fmd = (struct SPU2_FMD *)spu_hdr;
1161 	err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len,
1162 			    cipher_parms->iv_len);
1163 	if (err)
1164 		return 0;
1165 
1166 	/* Write cipher key to OMD */
1167 	omd = (u8 *)(fmd + 1);
1168 	if (cipher_parms->key_buf && cipher_parms->key_len)
1169 		memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len);
1170 
1171 	packet_dump("  SPU request header: ", spu_hdr,
1172 		    FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len);
1173 
1174 	return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len;
1175 }
1176 
1177 /**
1178  * spu_cipher_req_finish() - Finish building a SPU request message header for a
1179  * block cipher request.
1180  * @spu_hdr:         Start of the request message header (MH field)
1181  * @spu_req_hdr_len: Length in bytes of the SPU request header
1182  * @isInbound:       0 encrypt, 1 decrypt
1183  * @cipher_parms:    Parameters describing cipher operation to be performed
1184  * @update_key:      If true, rewrite the cipher key in SCTX
1185  * @data_size:       Length of the data in the BD field
1186  *
1187  * Assumes much of the header was already filled in at setkey() time in
1188  * spu_cipher_req_init().
1189  * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
1190  * request for a non-first chunk, we use the 260-byte SUPDT field from the
1191  * previous response as the key. update_key is true for this case. Unused in all
1192  * other cases.
1193  */
1194 void spu2_cipher_req_finish(u8 *spu_hdr,
1195 			    u16 spu_req_hdr_len,
1196 			    unsigned int is_inbound,
1197 			    struct spu_cipher_parms *cipher_parms,
1198 			    bool update_key,
1199 			    unsigned int data_size)
1200 {
1201 	struct SPU2_FMD *fmd;
1202 	u8 *omd;		/* start of optional metadata */
1203 	u64 ctrl0;
1204 	u64 ctrl3;
1205 
1206 	flow_log("%s()\n", __func__);
1207 	flow_log(" in: %u\n", is_inbound);
1208 	flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
1209 		 cipher_parms->type);
1210 	if (update_key) {
1211 		flow_log(" cipher key len: %u\n", cipher_parms->key_len);
1212 		flow_dump("  key: ", cipher_parms->key_buf,
1213 			  cipher_parms->key_len);
1214 	}
1215 	flow_log(" iv len: %d\n", cipher_parms->iv_len);
1216 	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
1217 	flow_log(" data_size: %u\n", data_size);
1218 
1219 	fmd = (struct SPU2_FMD *)spu_hdr;
1220 	omd = (u8 *)(fmd + 1);
1221 
1222 	/*
1223 	 * FMD ctrl0 was initialized at setkey time. update it to indicate
1224 	 * whether we are encrypting or decrypting.
1225 	 */
1226 	ctrl0 = le64_to_cpu(fmd->ctrl0);
1227 	if (is_inbound)
1228 		ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN;	/* decrypt */
1229 	else
1230 		ctrl0 |= SPU2_CIPH_ENCRYPT_EN;	/* encrypt */
1231 	fmd->ctrl0 = cpu_to_le64(ctrl0);
1232 
1233 	if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) {
1234 		/* cipher iv provided so put it in here */
1235 		memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf,
1236 		       cipher_parms->iv_len);
1237 	}
1238 
1239 	ctrl3 = le64_to_cpu(fmd->ctrl3);
1240 	data_size &= SPU2_PL_LEN;
1241 	ctrl3 |= data_size;
1242 	fmd->ctrl3 = cpu_to_le64(ctrl3);
1243 
1244 	packet_dump("  SPU request header: ", spu_hdr, spu_req_hdr_len);
1245 }
1246 
1247 /**
1248  * spu_request_pad() - Create pad bytes at the end of the data.
1249  * @pad_start:      Start of buffer where pad bytes are to be written
1250  * @gcm_padding:    Length of GCM padding, in bytes
1251  * @hash_pad_len:   Number of bytes of padding extend data to full block
1252  * @auth_alg:       Authentication algorithm
1253  * @auth_mode:      Authentication mode
1254  * @total_sent:     Length inserted at end of hash pad
1255  * @status_padding: Number of bytes of padding to align STATUS word
1256  *
1257  * There may be three forms of pad:
1258  *  1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment
1259  *  2. hash pad - pad to a block length, with 0x80 data terminator and
1260  *                size at the end
1261  *  3. STAT pad - to ensure the STAT field is 4-byte aligned
1262  */
1263 void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
1264 		      enum hash_alg auth_alg, enum hash_mode auth_mode,
1265 		      unsigned int total_sent, u32 status_padding)
1266 {
1267 	u8 *ptr = pad_start;
1268 
1269 	/* fix data alignent for GCM */
1270 	if (gcm_padding > 0) {
1271 		flow_log("  GCM: padding to 16 byte alignment: %u bytes\n",
1272 			 gcm_padding);
1273 		memset(ptr, 0, gcm_padding);
1274 		ptr += gcm_padding;
1275 	}
1276 
1277 	if (hash_pad_len > 0) {
1278 		/* clear the padding section */
1279 		memset(ptr, 0, hash_pad_len);
1280 
1281 		/* terminate the data */
1282 		*ptr = 0x80;
1283 		ptr += (hash_pad_len - sizeof(u64));
1284 
1285 		/* add the size at the end as required per alg */
1286 		if (auth_alg == HASH_ALG_MD5)
1287 			*(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
1288 		else		/* SHA1, SHA2-224, SHA2-256 */
1289 			*(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
1290 		ptr += sizeof(u64);
1291 	}
1292 
1293 	/* pad to a 4byte alignment for STAT */
1294 	if (status_padding > 0) {
1295 		flow_log("  STAT: padding to 4 byte alignment: %u bytes\n",
1296 			 status_padding);
1297 
1298 		memset(ptr, 0, status_padding);
1299 		ptr += status_padding;
1300 	}
1301 }
1302 
1303 /**
1304  * spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS
1305  * tweak field in the packet payload (it uses IV instead)
1306  *
1307  * Return: 0
1308  */
1309 u8 spu2_xts_tweak_in_payload(void)
1310 {
1311 	return 0;
1312 }
1313 
1314 /**
1315  * spu2_tx_status_len() - Return the length of the STATUS field in a SPU
1316  * response message.
1317  *
1318  * Return: Length of STATUS field in bytes.
1319  */
1320 u8 spu2_tx_status_len(void)
1321 {
1322 	return SPU2_TX_STATUS_LEN;
1323 }
1324 
1325 /**
1326  * spu2_rx_status_len() - Return the length of the STATUS field in a SPU
1327  * response message.
1328  *
1329  * Return: Length of STATUS field in bytes.
1330  */
1331 u8 spu2_rx_status_len(void)
1332 {
1333 	return SPU2_RX_STATUS_LEN;
1334 }
1335 
1336 /**
1337  * spu_status_process() - Process the status from a SPU response message.
1338  * @statp:  start of STATUS word
1339  *
1340  * Return:  0 - if status is good and response should be processed
1341  *         !0 - status indicates an error and response is invalid
1342  */
1343 int spu2_status_process(u8 *statp)
1344 {
1345 	/* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */
1346 	u16 status = le16_to_cpu(*(__le16 *)statp);
1347 
1348 	if (status == 0)
1349 		return 0;
1350 
1351 	flow_log("rx status is %#x\n", status);
1352 	if (status == SPU2_INVALID_ICV)
1353 		return SPU_INVALID_ICV;
1354 
1355 	return -EBADMSG;
1356 }
1357 
1358 /**
1359  * spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1360  *
1361  * @digestsize:		Digest size of this request
1362  * @cipher_parms:	(pointer to) cipher parmaeters, includes IV buf & IV len
1363  * @assoclen:		Length of AAD data
1364  * @chunksize:		length of input data to be sent in this req
1365  * @is_encrypt:		true if this is an output/encrypt operation
1366  * @is_esp:		true if this is an ESP / RFC4309 operation
1367  *
1368  */
1369 void spu2_ccm_update_iv(unsigned int digestsize,
1370 			struct spu_cipher_parms *cipher_parms,
1371 			unsigned int assoclen, unsigned int chunksize,
1372 			bool is_encrypt, bool is_esp)
1373 {
1374 	int L;  /* size of length field, in bytes */
1375 
1376 	/*
1377 	 * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
1378 	 * testmgr contains (L-1) in bottom 3 bits of first byte,
1379 	 * per RFC 3610.
1380 	 */
1381 	if (is_esp)
1382 		L = CCM_ESP_L_VALUE;
1383 	else
1384 		L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
1385 		      CCM_B0_L_PRIME_SHIFT) + 1;
1386 
1387 	/* SPU2 doesn't want these length bytes nor the first byte... */
1388 	cipher_parms->iv_len -= (1 + L);
1389 	memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1],
1390 		cipher_parms->iv_len);
1391 }
1392 
1393 /**
1394  * spu2_wordalign_padlen() - SPU2 does not require padding.
1395  * @data_size: length of data field in bytes
1396  *
1397  * Return: length of status field padding, in bytes (always 0 on SPU2)
1398  */
1399 u32 spu2_wordalign_padlen(u32 data_size)
1400 {
1401 	return 0;
1402 }
1403