xref: /linux/crypto/scompress.c (revision 5a06ef1f8da226b2de587e22c17f88b72cede3be)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/highmem.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/overflow.h>
19 #include <linux/scatterlist.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 
26 #include "compress.h"
27 
28 #define SCOMP_SCRATCH_SIZE 65400
29 
30 struct scomp_scratch {
31 	spinlock_t	lock;
32 	union {
33 		void	*src;
34 		unsigned long saddr;
35 	};
36 	void		*dst;
37 };
38 
39 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
40 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
41 };
42 
43 static const struct crypto_type crypto_scomp_type;
44 static int scomp_scratch_users;
45 static DEFINE_MUTEX(scomp_lock);
46 
47 static int __maybe_unused crypto_scomp_report(
48 	struct sk_buff *skb, struct crypto_alg *alg)
49 {
50 	struct crypto_report_comp rscomp;
51 
52 	memset(&rscomp, 0, sizeof(rscomp));
53 
54 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
55 
56 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
57 		       sizeof(rscomp), &rscomp);
58 }
59 
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61 	__maybe_unused;
62 
63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
64 {
65 	seq_puts(m, "type         : scomp\n");
66 }
67 
68 static void crypto_scomp_free_scratches(void)
69 {
70 	struct scomp_scratch *scratch;
71 	int i;
72 
73 	for_each_possible_cpu(i) {
74 		scratch = per_cpu_ptr(&scomp_scratch, i);
75 
76 		free_page(scratch->saddr);
77 		vfree(scratch->dst);
78 		scratch->src = NULL;
79 		scratch->dst = NULL;
80 	}
81 }
82 
83 static int crypto_scomp_alloc_scratches(void)
84 {
85 	struct scomp_scratch *scratch;
86 	int i;
87 
88 	for_each_possible_cpu(i) {
89 		struct page *page;
90 		void *mem;
91 
92 		scratch = per_cpu_ptr(&scomp_scratch, i);
93 
94 		page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
95 		if (!page)
96 			goto error;
97 		scratch->src = page_address(page);
98 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
99 		if (!mem)
100 			goto error;
101 		scratch->dst = mem;
102 	}
103 	return 0;
104 error:
105 	crypto_scomp_free_scratches();
106 	return -ENOMEM;
107 }
108 
109 static void scomp_free_streams(struct scomp_alg *alg)
110 {
111 	struct crypto_acomp_stream __percpu *stream = alg->stream;
112 	int i;
113 
114 	for_each_possible_cpu(i) {
115 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
116 
117 		if (!ps->ctx)
118 			break;
119 
120 		alg->free_ctx(ps->ctx);
121 	}
122 
123 	free_percpu(stream);
124 }
125 
126 static int scomp_alloc_streams(struct scomp_alg *alg)
127 {
128 	struct crypto_acomp_stream __percpu *stream;
129 	int i;
130 
131 	stream = alloc_percpu(struct crypto_acomp_stream);
132 	if (!stream)
133 		return -ENOMEM;
134 
135 	for_each_possible_cpu(i) {
136 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
137 
138 		ps->ctx = alg->alloc_ctx();
139 		if (IS_ERR(ps->ctx)) {
140 			scomp_free_streams(alg);
141 			return PTR_ERR(ps->ctx);
142 		}
143 
144 		spin_lock_init(&ps->lock);
145 	}
146 
147 	alg->stream = stream;
148 	return 0;
149 }
150 
151 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
152 {
153 	struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
154 	int ret = 0;
155 
156 	mutex_lock(&scomp_lock);
157 	if (!alg->stream) {
158 		ret = scomp_alloc_streams(alg);
159 		if (ret)
160 			goto unlock;
161 	}
162 	if (!scomp_scratch_users) {
163 		ret = crypto_scomp_alloc_scratches();
164 		if (ret)
165 			goto unlock;
166 		scomp_scratch_users++;
167 	}
168 unlock:
169 	mutex_unlock(&scomp_lock);
170 
171 	return ret;
172 }
173 
174 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
175 {
176 	struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
177 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
178 	struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
179 	struct crypto_scomp *scomp = *tfm_ctx;
180 	struct crypto_acomp_stream *stream;
181 	unsigned int slen = req->slen;
182 	unsigned int dlen = req->dlen;
183 	struct page *spage, *dpage;
184 	unsigned int n;
185 	const u8 *src;
186 	size_t soff;
187 	size_t doff;
188 	u8 *dst;
189 	int ret;
190 
191 	if (!req->src || !slen)
192 		return -EINVAL;
193 
194 	if (!req->dst || !dlen)
195 		return -EINVAL;
196 
197 	if (acomp_request_src_isvirt(req))
198 		src = req->svirt;
199 	else {
200 		src = scratch->src;
201 		do {
202 			if (acomp_request_src_isfolio(req)) {
203 				spage = folio_page(req->sfolio, 0);
204 				soff = req->soff;
205 			} else if (slen <= req->src->length) {
206 				spage = sg_page(req->src);
207 				soff = req->src->offset;
208 			} else
209 				break;
210 
211 			spage = nth_page(spage, soff / PAGE_SIZE);
212 			soff = offset_in_page(soff);
213 
214 			n = slen / PAGE_SIZE;
215 			n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
216 			if (PageHighMem(nth_page(spage, n)) &&
217 			    size_add(soff, slen) > PAGE_SIZE)
218 				break;
219 			src = kmap_local_page(spage) + soff;
220 		} while (0);
221 	}
222 
223 	if (acomp_request_dst_isvirt(req))
224 		dst = req->dvirt;
225 	else {
226 		unsigned int max = SCOMP_SCRATCH_SIZE;
227 
228 		dst = scratch->dst;
229 		do {
230 			if (acomp_request_dst_isfolio(req)) {
231 				dpage = folio_page(req->dfolio, 0);
232 				doff = req->doff;
233 			} else if (dlen <= req->dst->length) {
234 				dpage = sg_page(req->dst);
235 				doff = req->dst->offset;
236 			} else
237 				break;
238 
239 			dpage = nth_page(dpage, doff / PAGE_SIZE);
240 			doff = offset_in_page(doff);
241 
242 			n = dlen / PAGE_SIZE;
243 			n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
244 			if (PageHighMem(dpage + n) &&
245 			    size_add(doff, dlen) > PAGE_SIZE)
246 				break;
247 			dst = kmap_local_page(dpage) + doff;
248 			max = dlen;
249 		} while (0);
250 		dlen = min(dlen, max);
251 	}
252 
253 	spin_lock_bh(&scratch->lock);
254 
255 	if (src == scratch->src)
256 		memcpy_from_sglist(scratch->src, req->src, 0, slen);
257 
258 	stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
259 	spin_lock(&stream->lock);
260 	if (dir)
261 		ret = crypto_scomp_compress(scomp, src, slen,
262 					    dst, &dlen, stream->ctx);
263 	else
264 		ret = crypto_scomp_decompress(scomp, src, slen,
265 					      dst, &dlen, stream->ctx);
266 
267 	if (dst == scratch->dst)
268 		memcpy_to_sglist(req->dst, 0, dst, dlen);
269 
270 	spin_unlock(&stream->lock);
271 	spin_unlock_bh(&scratch->lock);
272 
273 	req->dlen = dlen;
274 
275 	if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
276 		kunmap_local(dst);
277 		dlen += doff;
278 		for (;;) {
279 			flush_dcache_page(dpage);
280 			if (dlen <= PAGE_SIZE)
281 				break;
282 			dlen -= PAGE_SIZE;
283 			dpage = nth_page(dpage, 1);
284 		}
285 	}
286 	if (!acomp_request_src_isvirt(req) && src != scratch->src)
287 		kunmap_local(src);
288 
289 	return ret;
290 }
291 
292 static int scomp_acomp_chain(struct acomp_req *req, int dir)
293 {
294 	struct acomp_req *r2;
295 	int err;
296 
297 	err = scomp_acomp_comp_decomp(req, dir);
298 	req->base.err = err;
299 
300 	list_for_each_entry(r2, &req->base.list, base.list)
301 		r2->base.err = scomp_acomp_comp_decomp(r2, dir);
302 
303 	return err;
304 }
305 
306 static int scomp_acomp_compress(struct acomp_req *req)
307 {
308 	return scomp_acomp_chain(req, 1);
309 }
310 
311 static int scomp_acomp_decompress(struct acomp_req *req)
312 {
313 	return scomp_acomp_chain(req, 0);
314 }
315 
316 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
317 {
318 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
319 
320 	crypto_free_scomp(*ctx);
321 
322 	mutex_lock(&scomp_lock);
323 	if (!--scomp_scratch_users)
324 		crypto_scomp_free_scratches();
325 	mutex_unlock(&scomp_lock);
326 }
327 
328 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
329 {
330 	struct crypto_alg *calg = tfm->__crt_alg;
331 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
332 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
333 	struct crypto_scomp *scomp;
334 
335 	if (!crypto_mod_get(calg))
336 		return -EAGAIN;
337 
338 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
339 	if (IS_ERR(scomp)) {
340 		crypto_mod_put(calg);
341 		return PTR_ERR(scomp);
342 	}
343 
344 	*ctx = scomp;
345 	tfm->exit = crypto_exit_scomp_ops_async;
346 
347 	crt->compress = scomp_acomp_compress;
348 	crt->decompress = scomp_acomp_decompress;
349 
350 	return 0;
351 }
352 
353 static void crypto_scomp_destroy(struct crypto_alg *alg)
354 {
355 	scomp_free_streams(__crypto_scomp_alg(alg));
356 }
357 
358 static const struct crypto_type crypto_scomp_type = {
359 	.extsize = crypto_alg_extsize,
360 	.init_tfm = crypto_scomp_init_tfm,
361 	.destroy = crypto_scomp_destroy,
362 #ifdef CONFIG_PROC_FS
363 	.show = crypto_scomp_show,
364 #endif
365 #if IS_ENABLED(CONFIG_CRYPTO_USER)
366 	.report = crypto_scomp_report,
367 #endif
368 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
369 	.maskset = CRYPTO_ALG_TYPE_MASK,
370 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
371 	.tfmsize = offsetof(struct crypto_scomp, base),
372 };
373 
374 static void scomp_prepare_alg(struct scomp_alg *alg)
375 {
376 	struct crypto_alg *base = &alg->calg.base;
377 
378 	comp_prepare_alg(&alg->calg);
379 
380 	base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
381 }
382 
383 int crypto_register_scomp(struct scomp_alg *alg)
384 {
385 	struct crypto_alg *base = &alg->calg.base;
386 
387 	scomp_prepare_alg(alg);
388 
389 	base->cra_type = &crypto_scomp_type;
390 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
391 
392 	return crypto_register_alg(base);
393 }
394 EXPORT_SYMBOL_GPL(crypto_register_scomp);
395 
396 void crypto_unregister_scomp(struct scomp_alg *alg)
397 {
398 	crypto_unregister_alg(&alg->base);
399 }
400 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
401 
402 int crypto_register_scomps(struct scomp_alg *algs, int count)
403 {
404 	int i, ret;
405 
406 	for (i = 0; i < count; i++) {
407 		ret = crypto_register_scomp(&algs[i]);
408 		if (ret)
409 			goto err;
410 	}
411 
412 	return 0;
413 
414 err:
415 	for (--i; i >= 0; --i)
416 		crypto_unregister_scomp(&algs[i]);
417 
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(crypto_register_scomps);
421 
422 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
423 {
424 	int i;
425 
426 	for (i = count - 1; i >= 0; --i)
427 		crypto_unregister_scomp(&algs[i]);
428 }
429 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
430 
431 MODULE_LICENSE("GPL");
432 MODULE_DESCRIPTION("Synchronous compression type");
433