xref: /linux/arch/riscv/crypto/aes-riscv64-zvkned-zvkb.S (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2//
3// This file is dual-licensed, meaning that you can use it under your
4// choice of either of the following two licenses:
5//
6// Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
7//
8// Licensed under the Apache License 2.0 (the "License"). You can obtain
9// a copy in the file LICENSE in the source distribution or at
10// https://www.openssl.org/source/license.html
11//
12// or
13//
14// Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
15// Copyright 2024 Google LLC
16// All rights reserved.
17//
18// Redistribution and use in source and binary forms, with or without
19// modification, are permitted provided that the following conditions
20// are met:
21// 1. Redistributions of source code must retain the above copyright
22//    notice, this list of conditions and the following disclaimer.
23// 2. Redistributions in binary form must reproduce the above copyright
24//    notice, this list of conditions and the following disclaimer in the
25//    documentation and/or other materials provided with the distribution.
26//
27// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
39// The generated code of this file depends on the following RISC-V extensions:
40// - RV64I
41// - RISC-V Vector ('V') with VLEN >= 128
42// - RISC-V Vector AES block cipher extension ('Zvkned')
43// - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
44
45#include <linux/linkage.h>
46
47.text
48.option arch, +zvkned, +zvkb
49
50#include "aes-macros.S"
51
52#define KEYP		a0
53#define INP		a1
54#define OUTP		a2
55#define LEN		a3
56#define IVP		a4
57
58#define LEN32		a5
59#define VL_E32		a6
60#define VL_BLOCKS	a7
61
62.macro	aes_ctr32_crypt	keylen
63	// LEN32 = number of blocks, rounded up, in 32-bit words.
64	addi		t0, LEN, 15
65	srli		t0, t0, 4
66	slli		LEN32, t0, 2
67
68	// Create a mask that selects the last 32-bit word of each 128-bit
69	// block.  This is the word that contains the (big-endian) counter.
70	li		t0, 0x88
71	vsetvli		t1, zero, e8, m1, ta, ma
72	vmv.v.x		v0, t0
73
74	// Load the IV into v31.  The last 32-bit word contains the counter.
75	vsetivli	zero, 4, e32, m1, ta, ma
76	vle32.v		v31, (IVP)
77
78	// Convert the big-endian counter into little-endian.
79	vsetivli	zero, 4, e32, m1, ta, mu
80	vrev8.v		v31, v31, v0.t
81
82	// Splat the IV to v16 (with LMUL=4).  The number of copies is the
83	// maximum number of blocks that will be processed per iteration.
84	vsetvli		zero, LEN32, e32, m4, ta, ma
85	vmv.v.i		v16, 0
86	vaesz.vs	v16, v31
87
88	// v20 = [x, x, x, 0, x, x, x, 1, ...]
89	viota.m		v20, v0, v0.t
90	// v16 = [IV0, IV1, IV2, counter+0, IV0, IV1, IV2, counter+1, ...]
91	vsetvli		VL_E32, LEN32, e32, m4, ta, mu
92	vadd.vv		v16, v16, v20, v0.t
93
94	j 2f
951:
96	// Set the number of blocks to process in this iteration.  vl=VL_E32 is
97	// the length in 32-bit words, i.e. 4 times the number of blocks.
98	vsetvli		VL_E32, LEN32, e32, m4, ta, mu
99
100	// Increment the counters by the number of blocks processed in the
101	// previous iteration.
102	vadd.vx		v16, v16, VL_BLOCKS, v0.t
1032:
104	// Prepare the AES inputs into v24.
105	vmv.v.v		v24, v16
106	vrev8.v		v24, v24, v0.t	// Convert counters back to big-endian.
107
108	// Encrypt the AES inputs to create the next portion of the keystream.
109	aes_encrypt	v24, \keylen
110
111	// XOR the data with the keystream.
112	vsetvli		t0, LEN, e8, m4, ta, ma
113	vle8.v		v20, (INP)
114	vxor.vv		v20, v20, v24
115	vse8.v		v20, (OUTP)
116
117	// Advance the pointers and update the remaining length.
118	add		INP, INP, t0
119	add		OUTP, OUTP, t0
120	sub		LEN, LEN, t0
121	sub		LEN32, LEN32, VL_E32
122	srli		VL_BLOCKS, VL_E32, 2
123
124	// Repeat if more data remains.
125	bnez		LEN, 1b
126
127	// Update *IVP to contain the next counter.
128	vsetivli	zero, 4, e32, m1, ta, mu
129	vadd.vx		v16, v16, VL_BLOCKS, v0.t
130	vrev8.v		v16, v16, v0.t	// Convert counters back to big-endian.
131	vse32.v		v16, (IVP)
132
133	ret
134.endm
135
136// void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
137//				    const u8 *in, u8 *out, size_t len,
138//				    u8 iv[16]);
139SYM_FUNC_START(aes_ctr32_crypt_zvkned_zvkb)
140	aes_begin	KEYP, 128f, 192f
141	aes_ctr32_crypt	256
142128:
143	aes_ctr32_crypt	128
144192:
145	aes_ctr32_crypt	192
146SYM_FUNC_END(aes_ctr32_crypt_zvkned_zvkb)
147