xref: /linux/drivers/crypto/nx/nx-sha256.c (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /**
2  * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2011-2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21 
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/module.h>
25 #include <asm/vio.h>
26 #include <asm/byteorder.h>
27 
28 #include "nx_csbcpb.h"
29 #include "nx.h"
30 
31 
32 static int nx_sha256_init(struct shash_desc *desc)
33 {
34 	struct sha256_state *sctx = shash_desc_ctx(desc);
35 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
36 	int len;
37 	int rc;
38 
39 	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
40 
41 	memset(sctx, 0, sizeof *sctx);
42 
43 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
44 
45 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
46 
47 	len = SHA256_DIGEST_SIZE;
48 	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
49 				  &nx_ctx->op.outlen,
50 				  &len,
51 				  (u8 *) sctx->state,
52 				  NX_DS_SHA256);
53 
54 	if (rc)
55 		goto out;
56 
57 	sctx->state[0] = __cpu_to_be32(SHA256_H0);
58 	sctx->state[1] = __cpu_to_be32(SHA256_H1);
59 	sctx->state[2] = __cpu_to_be32(SHA256_H2);
60 	sctx->state[3] = __cpu_to_be32(SHA256_H3);
61 	sctx->state[4] = __cpu_to_be32(SHA256_H4);
62 	sctx->state[5] = __cpu_to_be32(SHA256_H5);
63 	sctx->state[6] = __cpu_to_be32(SHA256_H6);
64 	sctx->state[7] = __cpu_to_be32(SHA256_H7);
65 	sctx->count = 0;
66 
67 out:
68 	return 0;
69 }
70 
71 static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
72 			    unsigned int len)
73 {
74 	struct sha256_state *sctx = shash_desc_ctx(desc);
75 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
76 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
77 	u64 to_process = 0, leftover, total;
78 	unsigned long irq_flags;
79 	int rc = 0;
80 	int data_len;
81 	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
82 
83 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
84 
85 	/* 2 cases for total data len:
86 	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
87 	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
88 	 */
89 	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
90 	if (total < SHA256_BLOCK_SIZE) {
91 		memcpy(sctx->buf + buf_len, data, len);
92 		sctx->count += len;
93 		goto out;
94 	}
95 
96 	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
97 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
98 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
99 
100 	do {
101 		/*
102 		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
103 		 * this update. This value is also restricted by the sg list
104 		 * limits.
105 		 */
106 		to_process = total - to_process;
107 		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
108 
109 		if (buf_len) {
110 			data_len = buf_len;
111 			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
112 						  &nx_ctx->op.inlen,
113 						  &data_len,
114 						  (u8 *) sctx->buf,
115 						  NX_DS_SHA256);
116 
117 			if (rc || data_len != buf_len)
118 				goto out;
119 		}
120 
121 		data_len = to_process - buf_len;
122 		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
123 					  &nx_ctx->op.inlen,
124 					  &data_len,
125 					  (u8 *) data,
126 					  NX_DS_SHA256);
127 
128 		if (rc)
129 			goto out;
130 
131 		to_process = (data_len + buf_len);
132 		leftover = total - to_process;
133 
134 		/*
135 		 * we've hit the nx chip previously and we're updating
136 		 * again, so copy over the partial digest.
137 		 */
138 		memcpy(csbcpb->cpb.sha256.input_partial_digest,
139 			       csbcpb->cpb.sha256.message_digest,
140 			       SHA256_DIGEST_SIZE);
141 
142 		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
143 			rc = -EINVAL;
144 			goto out;
145 		}
146 
147 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
148 				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
149 		if (rc)
150 			goto out;
151 
152 		atomic_inc(&(nx_ctx->stats->sha256_ops));
153 
154 		total -= to_process;
155 		data += to_process - buf_len;
156 		buf_len = 0;
157 
158 	} while (leftover >= SHA256_BLOCK_SIZE);
159 
160 	/* copy the leftover back into the state struct */
161 	if (leftover)
162 		memcpy(sctx->buf, data, leftover);
163 
164 	sctx->count += len;
165 	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
166 out:
167 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
168 	return rc;
169 }
170 
171 static int nx_sha256_final(struct shash_desc *desc, u8 *out)
172 {
173 	struct sha256_state *sctx = shash_desc_ctx(desc);
174 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
175 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
176 	unsigned long irq_flags;
177 	int rc;
178 	int len;
179 
180 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
181 
182 	/* final is represented by continuing the operation and indicating that
183 	 * this is not an intermediate operation */
184 	if (sctx->count >= SHA256_BLOCK_SIZE) {
185 		/* we've hit the nx chip previously, now we're finalizing,
186 		 * so copy over the partial digest */
187 		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
188 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
189 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
190 	} else {
191 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
192 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
193 	}
194 
195 	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
196 
197 	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
198 	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
199 				  &nx_ctx->op.inlen,
200 				  &len,
201 				  (u8 *) sctx->buf,
202 				  NX_DS_SHA256);
203 
204 	if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
205 		goto out;
206 
207 	len = SHA256_DIGEST_SIZE;
208 	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
209 				  &nx_ctx->op.outlen,
210 				  &len,
211 				  out,
212 				  NX_DS_SHA256);
213 
214 	if (rc || len != SHA256_DIGEST_SIZE)
215 		goto out;
216 
217 	if (!nx_ctx->op.outlen) {
218 		rc = -EINVAL;
219 		goto out;
220 	}
221 
222 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
223 			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
224 	if (rc)
225 		goto out;
226 
227 	atomic_inc(&(nx_ctx->stats->sha256_ops));
228 
229 	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
230 	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
231 out:
232 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
233 	return rc;
234 }
235 
236 static int nx_sha256_export(struct shash_desc *desc, void *out)
237 {
238 	struct sha256_state *sctx = shash_desc_ctx(desc);
239 
240 	memcpy(out, sctx, sizeof(*sctx));
241 
242 	return 0;
243 }
244 
245 static int nx_sha256_import(struct shash_desc *desc, const void *in)
246 {
247 	struct sha256_state *sctx = shash_desc_ctx(desc);
248 
249 	memcpy(sctx, in, sizeof(*sctx));
250 
251 	return 0;
252 }
253 
254 struct shash_alg nx_shash_sha256_alg = {
255 	.digestsize = SHA256_DIGEST_SIZE,
256 	.init       = nx_sha256_init,
257 	.update     = nx_sha256_update,
258 	.final      = nx_sha256_final,
259 	.export     = nx_sha256_export,
260 	.import     = nx_sha256_import,
261 	.descsize   = sizeof(struct sha256_state),
262 	.statesize  = sizeof(struct sha256_state),
263 	.base       = {
264 		.cra_name        = "sha256",
265 		.cra_driver_name = "sha256-nx",
266 		.cra_priority    = 300,
267 		.cra_flags       = CRYPTO_ALG_TYPE_SHASH,
268 		.cra_blocksize   = SHA256_BLOCK_SIZE,
269 		.cra_module      = THIS_MODULE,
270 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
271 		.cra_init        = nx_crypto_ctx_sha_init,
272 		.cra_exit        = nx_crypto_ctx_exit,
273 	}
274 };
275