xref: /linux/crypto/scompress.c (revision 0923fd0419a1a2c8846e15deacac11b619e996d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/scompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cpumask.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/highmem.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/overflow.h>
19 #include <linux/scatterlist.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/workqueue.h>
24 #include <net/netlink.h>
25 
26 #include "compress.h"
27 
28 struct scomp_scratch {
29 	spinlock_t	lock;
30 	union {
31 		void	*src __guarded_by(&lock);
32 		unsigned long saddr __guarded_by(&lock);
33 	};
34 };
35 
36 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
37 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
38 };
39 
40 static const struct crypto_type crypto_scomp_type;
41 static DEFINE_MUTEX(scomp_lock);
42 static int scomp_scratch_users __guarded_by(&scomp_lock);
43 
44 static cpumask_t scomp_scratch_want;
45 static void scomp_scratch_workfn(struct work_struct *work);
46 static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
47 
48 static int __maybe_unused crypto_scomp_report(
49 	struct sk_buff *skb, struct crypto_alg *alg)
50 {
51 	struct crypto_report_comp rscomp;
52 
53 	memset(&rscomp, 0, sizeof(rscomp));
54 
55 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
56 
57 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
58 		       sizeof(rscomp), &rscomp);
59 }
60 
61 static void __maybe_unused crypto_scomp_show(struct seq_file *m,
62 					     struct crypto_alg *alg)
63 {
64 	seq_puts(m, "type         : scomp\n");
65 }
66 
67 static void crypto_scomp_free_scratches(void)
68 	__context_unsafe(/* frees @scratch */)
69 {
70 	struct scomp_scratch *scratch;
71 	int i;
72 
73 	for_each_possible_cpu(i) {
74 		scratch = per_cpu_ptr(&scomp_scratch, i);
75 
76 		free_page(scratch->saddr);
77 		scratch->src = NULL;
78 	}
79 }
80 
81 static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
82 {
83 	int node = cpu_to_node(cpu);
84 	struct page *page;
85 
86 	page = alloc_pages_node(node, GFP_KERNEL, 0);
87 	if (!page)
88 		return -ENOMEM;
89 	spin_lock_bh(&scratch->lock);
90 	scratch->src = page_address(page);
91 	spin_unlock_bh(&scratch->lock);
92 	return 0;
93 }
94 
95 static void scomp_scratch_workfn(struct work_struct *work)
96 {
97 	int cpu;
98 
99 	for_each_cpu(cpu, &scomp_scratch_want) {
100 		struct scomp_scratch *scratch;
101 
102 		scratch = per_cpu_ptr(&scomp_scratch, cpu);
103 		if (context_unsafe(scratch->src))
104 			continue;
105 		if (scomp_alloc_scratch(scratch, cpu))
106 			break;
107 
108 		cpumask_clear_cpu(cpu, &scomp_scratch_want);
109 	}
110 }
111 
112 static int crypto_scomp_alloc_scratches(void)
113 	__context_unsafe(/* allocates @scratch */)
114 {
115 	unsigned int i = cpumask_first(cpu_possible_mask);
116 	struct scomp_scratch *scratch;
117 
118 	scratch = per_cpu_ptr(&scomp_scratch, i);
119 	return scomp_alloc_scratch(scratch, i);
120 }
121 
122 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
123 {
124 	struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
125 	int ret = 0;
126 
127 	mutex_lock(&scomp_lock);
128 	ret = crypto_acomp_alloc_streams(&alg->streams);
129 	if (ret)
130 		goto unlock;
131 	if (!scomp_scratch_users++) {
132 		ret = crypto_scomp_alloc_scratches();
133 		if (ret)
134 			scomp_scratch_users--;
135 	}
136 unlock:
137 	mutex_unlock(&scomp_lock);
138 
139 	return ret;
140 }
141 
142 #define scomp_lock_scratch(...) __acquire_ret(_scomp_lock_scratch(__VA_ARGS__), &__ret->lock)
143 static struct scomp_scratch *_scomp_lock_scratch(void) __acquires_ret
144 {
145 	int cpu = raw_smp_processor_id();
146 	struct scomp_scratch *scratch;
147 
148 	scratch = per_cpu_ptr(&scomp_scratch, cpu);
149 	spin_lock(&scratch->lock);
150 	if (likely(scratch->src))
151 		return scratch;
152 	spin_unlock(&scratch->lock);
153 
154 	cpumask_set_cpu(cpu, &scomp_scratch_want);
155 	schedule_work(&scomp_scratch_work);
156 
157 	scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
158 	spin_lock(&scratch->lock);
159 	return scratch;
160 }
161 
162 static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
163 	__releases(&scratch->lock)
164 {
165 	spin_unlock(&scratch->lock);
166 }
167 
168 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
169 {
170 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
171 	struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
172 	bool src_isvirt = acomp_request_src_isvirt(req);
173 	bool dst_isvirt = acomp_request_dst_isvirt(req);
174 	struct crypto_scomp *scomp = *tfm_ctx;
175 	unsigned int slen = req->slen;
176 	unsigned int dlen = req->dlen;
177 	struct page *spage, *dpage;
178 	unsigned int n;
179 	const u8 *src;
180 	size_t soff;
181 	size_t doff;
182 	u8 *dst;
183 	int ret;
184 
185 	if (!req->src || !slen)
186 		return -EINVAL;
187 
188 	if (!req->dst || !dlen)
189 		return -EINVAL;
190 
191 	if (dst_isvirt)
192 		dst = req->dvirt;
193 	else {
194 		if (dlen <= req->dst->length) {
195 			dpage = sg_page(req->dst);
196 			doff = req->dst->offset;
197 		} else
198 			return -ENOSYS;
199 
200 		dpage += doff / PAGE_SIZE;
201 		doff = offset_in_page(doff);
202 
203 		n = (dlen - 1) / PAGE_SIZE;
204 		n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
205 		if (PageHighMem(dpage + n) &&
206 		    size_add(doff, dlen) > PAGE_SIZE)
207 			return -ENOSYS;
208 		dst = kmap_local_page(dpage) + doff;
209 	}
210 
211 	if (src_isvirt)
212 		src = req->svirt;
213 	else {
214 		src = NULL;
215 		do {
216 			if (slen <= req->src->length) {
217 				spage = sg_page(req->src);
218 				soff = req->src->offset;
219 			} else
220 				break;
221 
222 			spage = spage + soff / PAGE_SIZE;
223 			soff = offset_in_page(soff);
224 
225 			n = (slen - 1) / PAGE_SIZE;
226 			n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
227 			if (PageHighMem(spage + n) &&
228 			    size_add(soff, slen) > PAGE_SIZE)
229 				break;
230 			src = kmap_local_page(spage) + soff;
231 		} while (0);
232 	}
233 
234 	struct crypto_acomp_stream *stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
235 
236 	if (!src_isvirt && !src) {
237 		struct scomp_scratch *scratch = scomp_lock_scratch();
238 		const u8 *src = scratch->src;
239 
240 		memcpy_from_sglist(scratch->src, req->src, 0, slen);
241 
242 		if (dir)
243 			ret = crypto_scomp_compress(scomp, src, slen,
244 						    dst, &dlen, stream->ctx);
245 		else
246 			ret = crypto_scomp_decompress(scomp, src, slen,
247 						      dst, &dlen, stream->ctx);
248 
249 		scomp_unlock_scratch(scratch);
250 	} else if (dir)
251 		ret = crypto_scomp_compress(scomp, src, slen,
252 					    dst, &dlen, stream->ctx);
253 	else
254 		ret = crypto_scomp_decompress(scomp, src, slen,
255 					      dst, &dlen, stream->ctx);
256 
257 	crypto_acomp_unlock_stream_bh(stream);
258 
259 	req->dlen = dlen;
260 
261 	if (!src_isvirt && src)
262 		kunmap_local(src);
263 	if (!dst_isvirt) {
264 		kunmap_local(dst);
265 		dlen += doff;
266 		for (;;) {
267 			flush_dcache_page(dpage);
268 			if (dlen <= PAGE_SIZE)
269 				break;
270 			dlen -= PAGE_SIZE;
271 			dpage++;
272 		}
273 	}
274 
275 	return ret;
276 }
277 
278 static int scomp_acomp_compress(struct acomp_req *req)
279 {
280 	return scomp_acomp_comp_decomp(req, 1);
281 }
282 
283 static int scomp_acomp_decompress(struct acomp_req *req)
284 {
285 	return scomp_acomp_comp_decomp(req, 0);
286 }
287 
288 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
289 {
290 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
291 
292 	crypto_free_scomp(*ctx);
293 
294 	flush_work(&scomp_scratch_work);
295 	mutex_lock(&scomp_lock);
296 	if (!--scomp_scratch_users)
297 		crypto_scomp_free_scratches();
298 	mutex_unlock(&scomp_lock);
299 }
300 
301 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
302 {
303 	struct crypto_alg *calg = tfm->__crt_alg;
304 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
305 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
306 	struct crypto_scomp *scomp;
307 
308 	if (!crypto_mod_get(calg))
309 		return -EAGAIN;
310 
311 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
312 	if (IS_ERR(scomp)) {
313 		crypto_mod_put(calg);
314 		return PTR_ERR(scomp);
315 	}
316 
317 	*ctx = scomp;
318 	tfm->exit = crypto_exit_scomp_ops_async;
319 
320 	crt->compress = scomp_acomp_compress;
321 	crt->decompress = scomp_acomp_decompress;
322 
323 	return 0;
324 }
325 
326 static void crypto_scomp_destroy(struct crypto_alg *alg)
327 {
328 	struct scomp_alg *scomp = __crypto_scomp_alg(alg);
329 
330 	crypto_acomp_free_streams(&scomp->streams);
331 }
332 
333 static const struct crypto_type crypto_scomp_type = {
334 	.extsize = crypto_alg_extsize,
335 	.init_tfm = crypto_scomp_init_tfm,
336 	.destroy = crypto_scomp_destroy,
337 #ifdef CONFIG_PROC_FS
338 	.show = crypto_scomp_show,
339 #endif
340 #if IS_ENABLED(CONFIG_CRYPTO_USER)
341 	.report = crypto_scomp_report,
342 #endif
343 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
344 	.maskset = CRYPTO_ALG_TYPE_MASK,
345 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
346 	.tfmsize = offsetof(struct crypto_scomp, base),
347 	.algsize = offsetof(struct scomp_alg, base),
348 };
349 
350 static void scomp_prepare_alg(struct scomp_alg *alg)
351 {
352 	struct crypto_alg *base = &alg->calg.base;
353 
354 	comp_prepare_alg(&alg->calg);
355 
356 	base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
357 }
358 
359 int crypto_register_scomp(struct scomp_alg *alg)
360 {
361 	struct crypto_alg *base = &alg->calg.base;
362 
363 	scomp_prepare_alg(alg);
364 
365 	base->cra_type = &crypto_scomp_type;
366 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
367 
368 	return crypto_register_alg(base);
369 }
370 EXPORT_SYMBOL_GPL(crypto_register_scomp);
371 
372 void crypto_unregister_scomp(struct scomp_alg *alg)
373 {
374 	crypto_unregister_alg(&alg->base);
375 }
376 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
377 
378 int crypto_register_scomps(struct scomp_alg *algs, int count)
379 {
380 	int i, ret;
381 
382 	for (i = 0; i < count; i++) {
383 		ret = crypto_register_scomp(&algs[i]);
384 		if (ret) {
385 			crypto_unregister_scomps(algs, i);
386 			return ret;
387 		}
388 	}
389 
390 	return 0;
391 }
392 EXPORT_SYMBOL_GPL(crypto_register_scomps);
393 
394 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
395 {
396 	int i;
397 
398 	for (i = count - 1; i >= 0; --i)
399 		crypto_unregister_scomp(&algs[i]);
400 }
401 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
402 
403 MODULE_LICENSE("GPL");
404 MODULE_DESCRIPTION("Synchronous compression type");
405