xref: /linux/drivers/crypto/cavium/cpt/cptvf_algs.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 
2 /*
3  * Copyright (C) 2016 Cavium, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License
7  * as published by the Free Software Foundation.
8  */
9 
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/crypto_wq.h>
15 #include <crypto/des.h>
16 #include <crypto/xts.h>
17 #include <linux/crypto.h>
18 #include <linux/err.h>
19 #include <linux/list.h>
20 #include <linux/scatterlist.h>
21 
22 #include "cptvf.h"
23 #include "cptvf_algs.h"
24 
25 struct cpt_device_handle {
26 	void *cdev[MAX_DEVICES];
27 	u32 dev_count;
28 };
29 
30 static struct cpt_device_handle dev_handle;
31 
32 static void cvm_callback(u32 status, void *arg)
33 {
34 	struct crypto_async_request *req = (struct crypto_async_request *)arg;
35 
36 	req->complete(req, !status);
37 }
38 
39 static inline void update_input_iv(struct cpt_request_info *req_info,
40 				   u8 *iv, u32 enc_iv_len,
41 				   u32 *argcnt)
42 {
43 	/* Setting the iv information */
44 	req_info->in[*argcnt].vptr = (void *)iv;
45 	req_info->in[*argcnt].size = enc_iv_len;
46 	req_info->req.dlen += enc_iv_len;
47 
48 	++(*argcnt);
49 }
50 
51 static inline void update_output_iv(struct cpt_request_info *req_info,
52 				    u8 *iv, u32 enc_iv_len,
53 				    u32 *argcnt)
54 {
55 	/* Setting the iv information */
56 	req_info->out[*argcnt].vptr = (void *)iv;
57 	req_info->out[*argcnt].size = enc_iv_len;
58 	req_info->rlen += enc_iv_len;
59 
60 	++(*argcnt);
61 }
62 
63 static inline void update_input_data(struct cpt_request_info *req_info,
64 				     struct scatterlist *inp_sg,
65 				     u32 nbytes, u32 *argcnt)
66 {
67 	req_info->req.dlen += nbytes;
68 
69 	while (nbytes) {
70 		u32 len = min(nbytes, inp_sg->length);
71 		u8 *ptr = sg_virt(inp_sg);
72 
73 		req_info->in[*argcnt].vptr = (void *)ptr;
74 		req_info->in[*argcnt].size = len;
75 		nbytes -= len;
76 
77 		++(*argcnt);
78 		++inp_sg;
79 	}
80 }
81 
82 static inline void update_output_data(struct cpt_request_info *req_info,
83 				      struct scatterlist *outp_sg,
84 				      u32 nbytes, u32 *argcnt)
85 {
86 	req_info->rlen += nbytes;
87 
88 	while (nbytes) {
89 		u32 len = min(nbytes, outp_sg->length);
90 		u8 *ptr = sg_virt(outp_sg);
91 
92 		req_info->out[*argcnt].vptr = (void *)ptr;
93 		req_info->out[*argcnt].size = len;
94 		nbytes -= len;
95 		++(*argcnt);
96 		++outp_sg;
97 	}
98 }
99 
100 static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
101 				 u32 cipher_type, u32 aes_key_type,
102 				 u32 *argcnt)
103 {
104 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
105 	struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
106 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
107 	struct fc_context *fctx = &rctx->fctx;
108 	u64 *offset_control = &rctx->control_word;
109 	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
110 	struct cpt_request_info *req_info = &rctx->cpt_req;
111 	u64 *ctrl_flags = NULL;
112 
113 	req_info->ctrl.s.grp = 0;
114 	req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
115 	req_info->ctrl.s.se_req = SE_CORE_REQ;
116 
117 	req_info->req.opcode.s.major = MAJOR_OP_FC |
118 					DMA_MODE_FLAG(DMA_GATHER_SCATTER);
119 	if (enc)
120 		req_info->req.opcode.s.minor = 2;
121 	else
122 		req_info->req.opcode.s.minor = 3;
123 
124 	req_info->req.param1 = req->nbytes; /* Encryption Data length */
125 	req_info->req.param2 = 0; /*Auth data length */
126 
127 	fctx->enc.enc_ctrl.e.enc_cipher = cipher_type;
128 	fctx->enc.enc_ctrl.e.aes_key = aes_key_type;
129 	fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
130 
131 	if (cipher_type == AES_XTS)
132 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
133 	else
134 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
135 	ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
136 	*ctrl_flags = cpu_to_be64(*ctrl_flags);
137 
138 	*offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
139 	/* Storing  Packet Data Information in offset
140 	 * Control Word First 8 bytes
141 	 */
142 	req_info->in[*argcnt].vptr = (u8 *)offset_control;
143 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
144 	req_info->req.dlen += CONTROL_WORD_LEN;
145 	++(*argcnt);
146 
147 	req_info->in[*argcnt].vptr = (u8 *)fctx;
148 	req_info->in[*argcnt].size = sizeof(struct fc_context);
149 	req_info->req.dlen += sizeof(struct fc_context);
150 
151 	++(*argcnt);
152 
153 	return 0;
154 }
155 
156 static inline u32 create_input_list(struct ablkcipher_request  *req, u32 enc,
157 				    u32 cipher_type, u32 aes_key_type,
158 				    u32 enc_iv_len)
159 {
160 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
161 	struct cpt_request_info *req_info = &rctx->cpt_req;
162 	u32 argcnt =  0;
163 
164 	create_ctx_hdr(req, enc, cipher_type, aes_key_type, &argcnt);
165 	update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
166 	update_input_data(req_info, req->src, req->nbytes, &argcnt);
167 	req_info->incnt = argcnt;
168 
169 	return 0;
170 }
171 
172 static inline void store_cb_info(struct ablkcipher_request *req,
173 				 struct cpt_request_info *req_info)
174 {
175 	req_info->callback = (void *)cvm_callback;
176 	req_info->callback_arg = (void *)&req->base;
177 }
178 
179 static inline void create_output_list(struct ablkcipher_request *req,
180 				      u32 cipher_type,
181 				      u32 enc_iv_len)
182 {
183 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
184 	struct cpt_request_info *req_info = &rctx->cpt_req;
185 	u32 argcnt = 0;
186 
187 	/* OUTPUT Buffer Processing
188 	 * AES encryption/decryption output would be
189 	 * received in the following format
190 	 *
191 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
192 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
193 	 */
194 	/* Reading IV information */
195 	update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
196 	update_output_data(req_info, req->dst, req->nbytes, &argcnt);
197 	req_info->outcnt = argcnt;
198 }
199 
200 static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
201 			      u32 cipher_type)
202 {
203 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
204 	struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
205 	u32 key_type = AES_128_BIT;
206 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
207 	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
208 	struct fc_context *fctx = &rctx->fctx;
209 	struct cpt_request_info *req_info = &rctx->cpt_req;
210 	void *cdev = NULL;
211 	int status;
212 
213 	switch (ctx->key_len) {
214 	case 16:
215 		key_type = AES_128_BIT;
216 		break;
217 	case 24:
218 		key_type = AES_192_BIT;
219 		break;
220 	case 32:
221 		if (cipher_type == AES_XTS)
222 			key_type = AES_128_BIT;
223 		else
224 			key_type = AES_256_BIT;
225 		break;
226 	case 64:
227 		if (cipher_type == AES_XTS)
228 			key_type = AES_256_BIT;
229 		else
230 			return -EINVAL;
231 		break;
232 	default:
233 		return -EINVAL;
234 	}
235 
236 	if (cipher_type == DES3_CBC)
237 		key_type = 0;
238 
239 	memset(req_info, 0, sizeof(struct cpt_request_info));
240 	memset(fctx, 0, sizeof(struct fc_context));
241 	create_input_list(req, enc, cipher_type, key_type, enc_iv_len);
242 	create_output_list(req, cipher_type, enc_iv_len);
243 	store_cb_info(req, req_info);
244 	cdev = dev_handle.cdev[smp_processor_id()];
245 	status = cptvf_do_request(cdev, req_info);
246 	/* We perform an asynchronous send and once
247 	 * the request is completed the driver would
248 	 * intimate through  registered call back functions
249 	 */
250 
251 	if (status)
252 		return status;
253 	else
254 		return -EINPROGRESS;
255 }
256 
257 int cvm_des3_encrypt_cbc(struct ablkcipher_request *req)
258 {
259 	return cvm_enc_dec(req, true, DES3_CBC);
260 }
261 
262 int cvm_des3_decrypt_cbc(struct ablkcipher_request *req)
263 {
264 	return cvm_enc_dec(req, false, DES3_CBC);
265 }
266 
267 int cvm_aes_encrypt_xts(struct ablkcipher_request *req)
268 {
269 	return cvm_enc_dec(req, true, AES_XTS);
270 }
271 
272 int cvm_aes_decrypt_xts(struct ablkcipher_request *req)
273 {
274 	return cvm_enc_dec(req, false, AES_XTS);
275 }
276 
277 int cvm_aes_encrypt_cbc(struct ablkcipher_request *req)
278 {
279 	return cvm_enc_dec(req, true, AES_CBC);
280 }
281 
282 int cvm_aes_decrypt_cbc(struct ablkcipher_request *req)
283 {
284 	return cvm_enc_dec(req, false, AES_CBC);
285 }
286 
287 int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
288 		   u32 keylen)
289 {
290 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
291 	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
292 	int err;
293 	const u8 *key1 = key;
294 	const u8 *key2 = key + (keylen / 2);
295 
296 	err = xts_check_key(tfm, key, keylen);
297 	if (err)
298 		return err;
299 	ctx->key_len = keylen;
300 	memcpy(ctx->enc_key, key1, keylen / 2);
301 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
302 
303 	return 0;
304 }
305 
306 int cvm_enc_dec_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
307 		       u32 keylen)
308 {
309 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
310 	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
311 
312 	if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
313 		ctx->key_len = keylen;
314 		memcpy(ctx->enc_key, key, keylen);
315 		return 0;
316 	}
317 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
318 
319 	return -EINVAL;
320 }
321 
322 int cvm_enc_dec_init(struct crypto_tfm *tfm)
323 {
324 	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
325 
326 	memset(ctx, 0, sizeof(*ctx));
327 	tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
328 					sizeof(struct ablkcipher_request);
329 	/* Additional memory for ablkcipher_request is
330 	 * allocated since the cryptd daemon uses
331 	 * this memory for request_ctx information
332 	 */
333 
334 	return 0;
335 }
336 
337 struct crypto_alg algs[] = { {
338 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
339 	.cra_blocksize = AES_BLOCK_SIZE,
340 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
341 	.cra_alignmask = 7,
342 	.cra_priority = 4001,
343 	.cra_name = "xts(aes)",
344 	.cra_driver_name = "cavium-xts-aes",
345 	.cra_type = &crypto_ablkcipher_type,
346 	.cra_u = {
347 		.ablkcipher = {
348 			.ivsize = AES_BLOCK_SIZE,
349 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
350 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
351 			.setkey = cvm_xts_setkey,
352 			.encrypt = cvm_aes_encrypt_xts,
353 			.decrypt = cvm_aes_decrypt_xts,
354 		},
355 	},
356 	.cra_init = cvm_enc_dec_init,
357 	.cra_module = THIS_MODULE,
358 }, {
359 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
360 	.cra_blocksize = AES_BLOCK_SIZE,
361 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
362 	.cra_alignmask = 7,
363 	.cra_priority = 4001,
364 	.cra_name = "cbc(aes)",
365 	.cra_driver_name = "cavium-cbc-aes",
366 	.cra_type = &crypto_ablkcipher_type,
367 	.cra_u = {
368 		.ablkcipher = {
369 			.ivsize = AES_BLOCK_SIZE,
370 			.min_keysize = AES_MIN_KEY_SIZE,
371 			.max_keysize = AES_MAX_KEY_SIZE,
372 			.setkey = cvm_enc_dec_setkey,
373 			.encrypt = cvm_aes_encrypt_cbc,
374 			.decrypt = cvm_aes_decrypt_cbc,
375 		},
376 	},
377 	.cra_init = cvm_enc_dec_init,
378 	.cra_module = THIS_MODULE,
379 }, {
380 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
381 	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
382 	.cra_ctxsize = sizeof(struct cvm_des3_ctx),
383 	.cra_alignmask = 7,
384 	.cra_priority = 4001,
385 	.cra_name = "cbc(des3_ede)",
386 	.cra_driver_name = "cavium-cbc-des3_ede",
387 	.cra_type = &crypto_ablkcipher_type,
388 	.cra_u = {
389 		.ablkcipher = {
390 			.min_keysize = DES3_EDE_KEY_SIZE,
391 			.max_keysize = DES3_EDE_KEY_SIZE,
392 			.ivsize = DES_BLOCK_SIZE,
393 			.setkey = cvm_enc_dec_setkey,
394 			.encrypt = cvm_des3_encrypt_cbc,
395 			.decrypt = cvm_des3_decrypt_cbc,
396 		},
397 	},
398 	.cra_init = cvm_enc_dec_init,
399 	.cra_module = THIS_MODULE,
400 } };
401 
402 static inline int cav_register_algs(void)
403 {
404 	int err = 0;
405 
406 	err = crypto_register_algs(algs, ARRAY_SIZE(algs));
407 	if (err)
408 		return err;
409 
410 	return 0;
411 }
412 
413 static inline void cav_unregister_algs(void)
414 {
415 	crypto_unregister_algs(algs, ARRAY_SIZE(algs));
416 }
417 
418 int cvm_crypto_init(struct cpt_vf *cptvf)
419 {
420 	struct pci_dev *pdev = cptvf->pdev;
421 	u32 dev_count;
422 
423 	dev_count = dev_handle.dev_count;
424 	dev_handle.cdev[dev_count] = cptvf;
425 	dev_handle.dev_count++;
426 
427 	if (dev_count == 3) {
428 		if (cav_register_algs()) {
429 			dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
430 			return -EINVAL;
431 		}
432 	}
433 
434 	return 0;
435 }
436 
437 void cvm_crypto_exit(void)
438 {
439 	u32 dev_count;
440 
441 	dev_count = --dev_handle.dev_count;
442 	if (!dev_count)
443 		cav_unregister_algs();
444 }
445