xref: /linux/crypto/scompress.c (revision 184e56e77c06a7eef68a021e9d4b11a11a8ab096)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/scompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cpumask.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/highmem.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/overflow.h>
19 #include <linux/scatterlist.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/workqueue.h>
24 #include <net/netlink.h>
25 
26 #include "compress.h"
27 
28 struct scomp_scratch {
29 	spinlock_t	lock;
30 	union {
31 		void	*src;
32 		unsigned long saddr;
33 	};
34 };
35 
36 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
37 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
38 };
39 
40 static const struct crypto_type crypto_scomp_type;
41 static int scomp_scratch_users;
42 static DEFINE_MUTEX(scomp_lock);
43 
44 static cpumask_t scomp_scratch_want;
45 static void scomp_scratch_workfn(struct work_struct *work);
46 static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
47 
48 static int __maybe_unused crypto_scomp_report(
49 	struct sk_buff *skb, struct crypto_alg *alg)
50 {
51 	struct crypto_report_comp rscomp;
52 
53 	memset(&rscomp, 0, sizeof(rscomp));
54 
55 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
56 
57 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
58 		       sizeof(rscomp), &rscomp);
59 }
60 
61 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
62 	__maybe_unused;
63 
64 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
65 {
66 	seq_puts(m, "type         : scomp\n");
67 }
68 
69 static void crypto_scomp_free_scratches(void)
70 {
71 	struct scomp_scratch *scratch;
72 	int i;
73 
74 	for_each_possible_cpu(i) {
75 		scratch = per_cpu_ptr(&scomp_scratch, i);
76 
77 		free_page(scratch->saddr);
78 		scratch->src = NULL;
79 	}
80 }
81 
82 static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
83 {
84 	int node = cpu_to_node(cpu);
85 	struct page *page;
86 
87 	page = alloc_pages_node(node, GFP_KERNEL, 0);
88 	if (!page)
89 		return -ENOMEM;
90 	spin_lock_bh(&scratch->lock);
91 	scratch->src = page_address(page);
92 	spin_unlock_bh(&scratch->lock);
93 	return 0;
94 }
95 
96 static void scomp_scratch_workfn(struct work_struct *work)
97 {
98 	int cpu;
99 
100 	for_each_cpu(cpu, &scomp_scratch_want) {
101 		struct scomp_scratch *scratch;
102 
103 		scratch = per_cpu_ptr(&scomp_scratch, cpu);
104 		if (scratch->src)
105 			continue;
106 		if (scomp_alloc_scratch(scratch, cpu))
107 			break;
108 
109 		cpumask_clear_cpu(cpu, &scomp_scratch_want);
110 	}
111 }
112 
113 static int crypto_scomp_alloc_scratches(void)
114 {
115 	unsigned int i = cpumask_first(cpu_possible_mask);
116 	struct scomp_scratch *scratch;
117 
118 	scratch = per_cpu_ptr(&scomp_scratch, i);
119 	return scomp_alloc_scratch(scratch, i);
120 }
121 
122 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
123 {
124 	struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
125 	int ret = 0;
126 
127 	mutex_lock(&scomp_lock);
128 	ret = crypto_acomp_alloc_streams(&alg->streams);
129 	if (ret)
130 		goto unlock;
131 	if (!scomp_scratch_users) {
132 		ret = crypto_scomp_alloc_scratches();
133 		if (ret)
134 			goto unlock;
135 		scomp_scratch_users++;
136 	}
137 unlock:
138 	mutex_unlock(&scomp_lock);
139 
140 	return ret;
141 }
142 
143 static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
144 {
145 	int cpu = raw_smp_processor_id();
146 	struct scomp_scratch *scratch;
147 
148 	scratch = per_cpu_ptr(&scomp_scratch, cpu);
149 	spin_lock(&scratch->lock);
150 	if (likely(scratch->src))
151 		return scratch;
152 	spin_unlock(&scratch->lock);
153 
154 	cpumask_set_cpu(cpu, &scomp_scratch_want);
155 	schedule_work(&scomp_scratch_work);
156 
157 	scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
158 	spin_lock(&scratch->lock);
159 	return scratch;
160 }
161 
162 static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
163 	__releases(scratch)
164 {
165 	spin_unlock(&scratch->lock);
166 }
167 
168 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
169 {
170 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
171 	struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
172 	bool src_isvirt = acomp_request_src_isvirt(req);
173 	bool dst_isvirt = acomp_request_dst_isvirt(req);
174 	struct crypto_scomp *scomp = *tfm_ctx;
175 	struct crypto_acomp_stream *stream;
176 	struct scomp_scratch *scratch;
177 	unsigned int slen = req->slen;
178 	unsigned int dlen = req->dlen;
179 	struct page *spage, *dpage;
180 	unsigned int n;
181 	const u8 *src;
182 	size_t soff;
183 	size_t doff;
184 	u8 *dst;
185 	int ret;
186 
187 	if (!req->src || !slen)
188 		return -EINVAL;
189 
190 	if (!req->dst || !dlen)
191 		return -EINVAL;
192 
193 	if (dst_isvirt)
194 		dst = req->dvirt;
195 	else {
196 		if (acomp_request_dst_isfolio(req)) {
197 			dpage = folio_page(req->dfolio, 0);
198 			doff = req->doff;
199 		} else if (dlen <= req->dst->length) {
200 			dpage = sg_page(req->dst);
201 			doff = req->dst->offset;
202 		} else
203 			return -ENOSYS;
204 
205 		dpage = nth_page(dpage, doff / PAGE_SIZE);
206 		doff = offset_in_page(doff);
207 
208 		n = dlen / PAGE_SIZE;
209 		n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
210 		if (PageHighMem(dpage + n) &&
211 		    size_add(doff, dlen) > PAGE_SIZE)
212 			return -ENOSYS;
213 		dst = kmap_local_page(dpage) + doff;
214 	}
215 
216 	if (src_isvirt)
217 		src = req->svirt;
218 	else {
219 		src = NULL;
220 		do {
221 			if (acomp_request_src_isfolio(req)) {
222 				spage = folio_page(req->sfolio, 0);
223 				soff = req->soff;
224 			} else if (slen <= req->src->length) {
225 				spage = sg_page(req->src);
226 				soff = req->src->offset;
227 			} else
228 				break;
229 
230 			spage = nth_page(spage, soff / PAGE_SIZE);
231 			soff = offset_in_page(soff);
232 
233 			n = slen / PAGE_SIZE;
234 			n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
235 			if (PageHighMem(nth_page(spage, n)) &&
236 			    size_add(soff, slen) > PAGE_SIZE)
237 				break;
238 			src = kmap_local_page(spage) + soff;
239 		} while (0);
240 	}
241 
242 	stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
243 
244 	if (!src_isvirt && !src) {
245 		const u8 *src;
246 
247 		scratch = scomp_lock_scratch();
248 		src = scratch->src;
249 		memcpy_from_sglist(scratch->src, req->src, 0, slen);
250 
251 		if (dir)
252 			ret = crypto_scomp_compress(scomp, src, slen,
253 						    dst, &dlen, stream->ctx);
254 		else
255 			ret = crypto_scomp_decompress(scomp, src, slen,
256 						      dst, &dlen, stream->ctx);
257 
258 		scomp_unlock_scratch(scratch);
259 	} else if (dir)
260 		ret = crypto_scomp_compress(scomp, src, slen,
261 					    dst, &dlen, stream->ctx);
262 	else
263 		ret = crypto_scomp_decompress(scomp, src, slen,
264 					      dst, &dlen, stream->ctx);
265 
266 	crypto_acomp_unlock_stream_bh(stream);
267 
268 	req->dlen = dlen;
269 
270 	if (!src_isvirt && src)
271 		kunmap_local(src);
272 	if (!dst_isvirt) {
273 		kunmap_local(dst);
274 		dlen += doff;
275 		for (;;) {
276 			flush_dcache_page(dpage);
277 			if (dlen <= PAGE_SIZE)
278 				break;
279 			dlen -= PAGE_SIZE;
280 			dpage = nth_page(dpage, 1);
281 		}
282 	}
283 
284 	return ret;
285 }
286 
287 static int scomp_acomp_chain(struct acomp_req *req, int dir)
288 {
289 	struct acomp_req *r2;
290 	int err;
291 
292 	err = scomp_acomp_comp_decomp(req, dir);
293 	req->base.err = err;
294 
295 	list_for_each_entry(r2, &req->base.list, base.list)
296 		r2->base.err = scomp_acomp_comp_decomp(r2, dir);
297 
298 	return err;
299 }
300 
301 static int scomp_acomp_compress(struct acomp_req *req)
302 {
303 	return scomp_acomp_chain(req, 1);
304 }
305 
306 static int scomp_acomp_decompress(struct acomp_req *req)
307 {
308 	return scomp_acomp_chain(req, 0);
309 }
310 
311 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
312 {
313 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
314 
315 	crypto_free_scomp(*ctx);
316 
317 	flush_work(&scomp_scratch_work);
318 	mutex_lock(&scomp_lock);
319 	if (!--scomp_scratch_users)
320 		crypto_scomp_free_scratches();
321 	mutex_unlock(&scomp_lock);
322 }
323 
324 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
325 {
326 	struct crypto_alg *calg = tfm->__crt_alg;
327 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
328 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
329 	struct crypto_scomp *scomp;
330 
331 	if (!crypto_mod_get(calg))
332 		return -EAGAIN;
333 
334 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
335 	if (IS_ERR(scomp)) {
336 		crypto_mod_put(calg);
337 		return PTR_ERR(scomp);
338 	}
339 
340 	*ctx = scomp;
341 	tfm->exit = crypto_exit_scomp_ops_async;
342 
343 	crt->compress = scomp_acomp_compress;
344 	crt->decompress = scomp_acomp_decompress;
345 
346 	return 0;
347 }
348 
349 static void crypto_scomp_destroy(struct crypto_alg *alg)
350 {
351 	struct scomp_alg *scomp = __crypto_scomp_alg(alg);
352 
353 	crypto_acomp_free_streams(&scomp->streams);
354 }
355 
356 static const struct crypto_type crypto_scomp_type = {
357 	.extsize = crypto_alg_extsize,
358 	.init_tfm = crypto_scomp_init_tfm,
359 	.destroy = crypto_scomp_destroy,
360 #ifdef CONFIG_PROC_FS
361 	.show = crypto_scomp_show,
362 #endif
363 #if IS_ENABLED(CONFIG_CRYPTO_USER)
364 	.report = crypto_scomp_report,
365 #endif
366 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
367 	.maskset = CRYPTO_ALG_TYPE_MASK,
368 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
369 	.tfmsize = offsetof(struct crypto_scomp, base),
370 };
371 
372 static void scomp_prepare_alg(struct scomp_alg *alg)
373 {
374 	struct crypto_alg *base = &alg->calg.base;
375 
376 	comp_prepare_alg(&alg->calg);
377 
378 	base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
379 }
380 
381 int crypto_register_scomp(struct scomp_alg *alg)
382 {
383 	struct crypto_alg *base = &alg->calg.base;
384 
385 	scomp_prepare_alg(alg);
386 
387 	base->cra_type = &crypto_scomp_type;
388 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
389 
390 	return crypto_register_alg(base);
391 }
392 EXPORT_SYMBOL_GPL(crypto_register_scomp);
393 
394 void crypto_unregister_scomp(struct scomp_alg *alg)
395 {
396 	crypto_unregister_alg(&alg->base);
397 }
398 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
399 
400 int crypto_register_scomps(struct scomp_alg *algs, int count)
401 {
402 	int i, ret;
403 
404 	for (i = 0; i < count; i++) {
405 		ret = crypto_register_scomp(&algs[i]);
406 		if (ret)
407 			goto err;
408 	}
409 
410 	return 0;
411 
412 err:
413 	for (--i; i >= 0; --i)
414 		crypto_unregister_scomp(&algs[i]);
415 
416 	return ret;
417 }
418 EXPORT_SYMBOL_GPL(crypto_register_scomps);
419 
420 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
421 {
422 	int i;
423 
424 	for (i = count - 1; i >= 0; --i)
425 		crypto_unregister_scomp(&algs[i]);
426 }
427 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
428 
429 MODULE_LICENSE("GPL");
430 MODULE_DESCRIPTION("Synchronous compression type");
431