1 /***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46 #include "zip_crypto.h"
47
zip_static_init_zip_ops(struct zip_operation * zip_ops,int lzs_flag)48 static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
49 int lzs_flag)
50 {
51 zip_ops->flush = ZIP_FLUSH_FINISH;
52
53 /* equivalent to level 6 of opensource zlib */
54 zip_ops->speed = 1;
55
56 if (!lzs_flag) {
57 zip_ops->ccode = 0; /* Auto Huffman */
58 zip_ops->lzs_flag = 0;
59 zip_ops->format = ZLIB_FORMAT;
60 } else {
61 zip_ops->ccode = 3; /* LZS Encoding */
62 zip_ops->lzs_flag = 1;
63 zip_ops->format = LZS_FORMAT;
64 }
65 zip_ops->begin_file = 1;
66 zip_ops->history_len = 0;
67 zip_ops->end_file = 1;
68 zip_ops->compcode = 0;
69 zip_ops->csum = 1; /* Adler checksum desired */
70 }
71
zip_ctx_init(struct zip_kernel_ctx * zip_ctx,int lzs_flag)72 static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
73 {
74 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
75 struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
76
77 zip_static_init_zip_ops(comp_ctx, lzs_flag);
78 zip_static_init_zip_ops(decomp_ctx, lzs_flag);
79
80 comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
81 if (!comp_ctx->input)
82 return -ENOMEM;
83
84 comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
85 if (!comp_ctx->output)
86 goto err_comp_input;
87
88 decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
89 if (!decomp_ctx->input)
90 goto err_comp_output;
91
92 decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
93 if (!decomp_ctx->output)
94 goto err_decomp_input;
95
96 return 0;
97
98 err_decomp_input:
99 zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
100
101 err_comp_output:
102 zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
103
104 err_comp_input:
105 zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
106
107 return -ENOMEM;
108 }
109
zip_ctx_exit(struct zip_kernel_ctx * zip_ctx)110 static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
111 {
112 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
113 struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
114
115 zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
116 zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
117
118 zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
119 zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
120 }
121
zip_compress(const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen,struct zip_kernel_ctx * zip_ctx)122 static int zip_compress(const u8 *src, unsigned int slen,
123 u8 *dst, unsigned int *dlen,
124 struct zip_kernel_ctx *zip_ctx)
125 {
126 struct zip_operation *zip_ops = NULL;
127 struct zip_state *zip_state;
128 struct zip_device *zip = NULL;
129 int ret;
130
131 if (!zip_ctx || !src || !dst || !dlen)
132 return -ENOMEM;
133
134 zip = zip_get_device(zip_get_node_id());
135 if (!zip)
136 return -ENODEV;
137
138 zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
139 if (!zip_state)
140 return -ENOMEM;
141
142 zip_ops = &zip_ctx->zip_comp;
143
144 zip_ops->input_len = slen;
145 zip_ops->output_len = *dlen;
146 memcpy(zip_ops->input, src, slen);
147
148 ret = zip_deflate(zip_ops, zip_state, zip);
149
150 if (!ret) {
151 *dlen = zip_ops->output_len;
152 memcpy(dst, zip_ops->output, *dlen);
153 }
154 kfree(zip_state);
155 return ret;
156 }
157
zip_decompress(const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen,struct zip_kernel_ctx * zip_ctx)158 static int zip_decompress(const u8 *src, unsigned int slen,
159 u8 *dst, unsigned int *dlen,
160 struct zip_kernel_ctx *zip_ctx)
161 {
162 struct zip_operation *zip_ops = NULL;
163 struct zip_state *zip_state;
164 struct zip_device *zip = NULL;
165 int ret;
166
167 if (!zip_ctx || !src || !dst || !dlen)
168 return -ENOMEM;
169
170 zip = zip_get_device(zip_get_node_id());
171 if (!zip)
172 return -ENODEV;
173
174 zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
175 if (!zip_state)
176 return -ENOMEM;
177
178 zip_ops = &zip_ctx->zip_decomp;
179 memcpy(zip_ops->input, src, slen);
180
181 /* Work around for a bug in zlib which needs an extra bytes sometimes */
182 if (zip_ops->ccode != 3) /* Not LZS Encoding */
183 zip_ops->input[slen++] = 0;
184
185 zip_ops->input_len = slen;
186 zip_ops->output_len = *dlen;
187
188 ret = zip_inflate(zip_ops, zip_state, zip);
189
190 if (!ret) {
191 *dlen = zip_ops->output_len;
192 memcpy(dst, zip_ops->output, *dlen);
193 }
194 kfree(zip_state);
195 return ret;
196 }
197
198 /* Legacy Compress framework start */
zip_alloc_comp_ctx_deflate(struct crypto_tfm * tfm)199 int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
200 {
201 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
202
203 return zip_ctx_init(zip_ctx, 0);
204 }
205
zip_alloc_comp_ctx_lzs(struct crypto_tfm * tfm)206 int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
207 {
208 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
209
210 return zip_ctx_init(zip_ctx, 1);
211 }
212
zip_free_comp_ctx(struct crypto_tfm * tfm)213 void zip_free_comp_ctx(struct crypto_tfm *tfm)
214 {
215 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
216
217 zip_ctx_exit(zip_ctx);
218 }
219
zip_comp_compress(struct crypto_tfm * tfm,const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen)220 int zip_comp_compress(struct crypto_tfm *tfm,
221 const u8 *src, unsigned int slen,
222 u8 *dst, unsigned int *dlen)
223 {
224 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
225
226 return zip_compress(src, slen, dst, dlen, zip_ctx);
227 }
228
zip_comp_decompress(struct crypto_tfm * tfm,const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen)229 int zip_comp_decompress(struct crypto_tfm *tfm,
230 const u8 *src, unsigned int slen,
231 u8 *dst, unsigned int *dlen)
232 {
233 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
234
235 return zip_decompress(src, slen, dst, dlen, zip_ctx);
236 } /* Legacy compress framework end */
237
238 /* SCOMP framework start */
zip_alloc_scomp_ctx_deflate(struct crypto_scomp * tfm)239 void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
240 {
241 int ret;
242 struct zip_kernel_ctx *zip_ctx;
243
244 zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
245 if (!zip_ctx)
246 return ERR_PTR(-ENOMEM);
247
248 ret = zip_ctx_init(zip_ctx, 0);
249
250 if (ret) {
251 kfree_sensitive(zip_ctx);
252 return ERR_PTR(ret);
253 }
254
255 return zip_ctx;
256 }
257
zip_alloc_scomp_ctx_lzs(struct crypto_scomp * tfm)258 void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
259 {
260 int ret;
261 struct zip_kernel_ctx *zip_ctx;
262
263 zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
264 if (!zip_ctx)
265 return ERR_PTR(-ENOMEM);
266
267 ret = zip_ctx_init(zip_ctx, 1);
268
269 if (ret) {
270 kfree_sensitive(zip_ctx);
271 return ERR_PTR(ret);
272 }
273
274 return zip_ctx;
275 }
276
zip_free_scomp_ctx(struct crypto_scomp * tfm,void * ctx)277 void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
278 {
279 struct zip_kernel_ctx *zip_ctx = ctx;
280
281 zip_ctx_exit(zip_ctx);
282 kfree_sensitive(zip_ctx);
283 }
284
zip_scomp_compress(struct crypto_scomp * tfm,const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen,void * ctx)285 int zip_scomp_compress(struct crypto_scomp *tfm,
286 const u8 *src, unsigned int slen,
287 u8 *dst, unsigned int *dlen, void *ctx)
288 {
289 struct zip_kernel_ctx *zip_ctx = ctx;
290
291 return zip_compress(src, slen, dst, dlen, zip_ctx);
292 }
293
zip_scomp_decompress(struct crypto_scomp * tfm,const u8 * src,unsigned int slen,u8 * dst,unsigned int * dlen,void * ctx)294 int zip_scomp_decompress(struct crypto_scomp *tfm,
295 const u8 *src, unsigned int slen,
296 u8 *dst, unsigned int *dlen, void *ctx)
297 {
298 struct zip_kernel_ctx *zip_ctx = ctx;
299
300 return zip_decompress(src, slen, dst, dlen, zip_ctx);
301 } /* SCOMP framework end */
302