1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synchronous Compression operations 4 * 5 * Copyright 2015 LG Electronics Inc. 6 * Copyright (c) 2016, Intel Corporation 7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/acompress.h> 11 #include <crypto/internal/scompress.h> 12 #include <crypto/scatterwalk.h> 13 #include <linux/cryptouser.h> 14 #include <linux/err.h> 15 #include <linux/highmem.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/overflow.h> 19 #include <linux/scatterlist.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/vmalloc.h> 24 #include <net/netlink.h> 25 26 #include "compress.h" 27 28 #define SCOMP_SCRATCH_SIZE 65400 29 30 struct scomp_scratch { 31 spinlock_t lock; 32 union { 33 void *src; 34 unsigned long saddr; 35 }; 36 void *dst; 37 }; 38 39 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { 40 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock), 41 }; 42 43 static const struct crypto_type crypto_scomp_type; 44 static int scomp_scratch_users; 45 static DEFINE_MUTEX(scomp_lock); 46 47 static int __maybe_unused crypto_scomp_report( 48 struct sk_buff *skb, struct crypto_alg *alg) 49 { 50 struct crypto_report_comp rscomp; 51 52 memset(&rscomp, 0, sizeof(rscomp)); 53 54 strscpy(rscomp.type, "scomp", sizeof(rscomp.type)); 55 56 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 57 sizeof(rscomp), &rscomp); 58 } 59 60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 61 __maybe_unused; 62 63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 64 { 65 seq_puts(m, "type : scomp\n"); 66 } 67 68 static void crypto_scomp_free_scratches(void) 69 { 70 struct scomp_scratch *scratch; 71 int i; 72 73 for_each_possible_cpu(i) { 74 scratch = per_cpu_ptr(&scomp_scratch, i); 75 76 free_page(scratch->saddr); 77 vfree(scratch->dst); 78 scratch->src = NULL; 79 scratch->dst = NULL; 80 } 81 } 82 83 static int crypto_scomp_alloc_scratches(void) 84 { 85 struct scomp_scratch *scratch; 86 int i; 87 88 for_each_possible_cpu(i) { 89 struct page *page; 90 void *mem; 91 92 scratch = per_cpu_ptr(&scomp_scratch, i); 93 94 page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0); 95 if (!page) 96 goto error; 97 scratch->src = page_address(page); 98 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); 99 if (!mem) 100 goto error; 101 scratch->dst = mem; 102 } 103 return 0; 104 error: 105 crypto_scomp_free_scratches(); 106 return -ENOMEM; 107 } 108 109 static void scomp_free_streams(struct scomp_alg *alg) 110 { 111 struct crypto_acomp_stream __percpu *stream = alg->stream; 112 int i; 113 114 for_each_possible_cpu(i) { 115 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); 116 117 if (!ps->ctx) 118 break; 119 120 alg->free_ctx(ps); 121 } 122 123 free_percpu(stream); 124 } 125 126 static int scomp_alloc_streams(struct scomp_alg *alg) 127 { 128 struct crypto_acomp_stream __percpu *stream; 129 int i; 130 131 stream = alloc_percpu(struct crypto_acomp_stream); 132 if (!stream) 133 return -ENOMEM; 134 135 for_each_possible_cpu(i) { 136 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); 137 138 ps->ctx = alg->alloc_ctx(); 139 if (IS_ERR(ps->ctx)) { 140 scomp_free_streams(alg); 141 return PTR_ERR(ps->ctx); 142 } 143 144 spin_lock_init(&ps->lock); 145 } 146 147 alg->stream = stream; 148 return 0; 149 } 150 151 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 152 { 153 struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); 154 int ret = 0; 155 156 mutex_lock(&scomp_lock); 157 if (!alg->stream) { 158 ret = scomp_alloc_streams(alg); 159 if (ret) 160 goto unlock; 161 } 162 if (!scomp_scratch_users++) 163 ret = crypto_scomp_alloc_scratches(); 164 unlock: 165 mutex_unlock(&scomp_lock); 166 167 return ret; 168 } 169 170 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 171 { 172 struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch); 173 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 174 struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); 175 struct crypto_scomp *scomp = *tfm_ctx; 176 struct crypto_acomp_stream *stream; 177 unsigned int slen = req->slen; 178 unsigned int dlen = req->dlen; 179 struct page *spage, *dpage; 180 unsigned int soff, doff; 181 unsigned int n; 182 const u8 *src; 183 u8 *dst; 184 int ret; 185 186 if (!req->src || !slen) 187 return -EINVAL; 188 189 if (!req->dst || !dlen) 190 return -EINVAL; 191 192 if (acomp_request_src_isvirt(req)) 193 src = req->svirt; 194 else { 195 soff = req->src->offset; 196 spage = nth_page(sg_page(req->src), soff / PAGE_SIZE); 197 soff = offset_in_page(soff); 198 199 n = slen / PAGE_SIZE; 200 n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; 201 if (slen <= req->src->length && 202 (!PageHighMem(nth_page(spage, n)) || 203 size_add(soff, slen) <= PAGE_SIZE)) 204 src = kmap_local_page(spage) + soff; 205 else 206 src = scratch->src; 207 } 208 209 if (acomp_request_dst_isvirt(req)) 210 dst = req->dvirt; 211 else { 212 doff = req->dst->offset; 213 dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE); 214 doff = offset_in_page(doff); 215 216 n = dlen / PAGE_SIZE; 217 n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; 218 if (dlen <= req->dst->length && 219 (!PageHighMem(nth_page(dpage, n)) || 220 size_add(doff, dlen) <= PAGE_SIZE)) 221 dst = kmap_local_page(dpage) + doff; 222 else { 223 if (dlen > SCOMP_SCRATCH_SIZE) 224 dlen = SCOMP_SCRATCH_SIZE; 225 dst = scratch->dst; 226 } 227 } 228 229 spin_lock_bh(&scratch->lock); 230 231 if (src == scratch->src) 232 memcpy_from_sglist(scratch->src, req->src, 0, slen); 233 234 stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); 235 spin_lock(&stream->lock); 236 if (dir) 237 ret = crypto_scomp_compress(scomp, src, slen, 238 dst, &dlen, stream->ctx); 239 else 240 ret = crypto_scomp_decompress(scomp, src, slen, 241 dst, &dlen, stream->ctx); 242 243 if (dst == scratch->dst) 244 memcpy_to_sglist(req->dst, 0, dst, dlen); 245 246 spin_unlock(&stream->lock); 247 spin_unlock_bh(&scratch->lock); 248 249 req->dlen = dlen; 250 251 if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) { 252 kunmap_local(dst); 253 dlen += doff; 254 for (;;) { 255 flush_dcache_page(dpage); 256 if (dlen <= PAGE_SIZE) 257 break; 258 dlen -= PAGE_SIZE; 259 dpage = nth_page(dpage, 1); 260 } 261 } 262 if (!acomp_request_src_isvirt(req) && src != scratch->src) 263 kunmap_local(src); 264 265 return ret; 266 } 267 268 static int scomp_acomp_chain(struct acomp_req *req, int dir) 269 { 270 struct acomp_req *r2; 271 int err; 272 273 err = scomp_acomp_comp_decomp(req, dir); 274 req->base.err = err; 275 276 list_for_each_entry(r2, &req->base.list, base.list) 277 r2->base.err = scomp_acomp_comp_decomp(r2, dir); 278 279 return err; 280 } 281 282 static int scomp_acomp_compress(struct acomp_req *req) 283 { 284 return scomp_acomp_chain(req, 1); 285 } 286 287 static int scomp_acomp_decompress(struct acomp_req *req) 288 { 289 return scomp_acomp_chain(req, 0); 290 } 291 292 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 293 { 294 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 295 296 crypto_free_scomp(*ctx); 297 298 mutex_lock(&scomp_lock); 299 if (!--scomp_scratch_users) 300 crypto_scomp_free_scratches(); 301 mutex_unlock(&scomp_lock); 302 } 303 304 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 305 { 306 struct crypto_alg *calg = tfm->__crt_alg; 307 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 308 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 309 struct crypto_scomp *scomp; 310 311 if (!crypto_mod_get(calg)) 312 return -EAGAIN; 313 314 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 315 if (IS_ERR(scomp)) { 316 crypto_mod_put(calg); 317 return PTR_ERR(scomp); 318 } 319 320 *ctx = scomp; 321 tfm->exit = crypto_exit_scomp_ops_async; 322 323 crt->compress = scomp_acomp_compress; 324 crt->decompress = scomp_acomp_decompress; 325 326 return 0; 327 } 328 329 static void crypto_scomp_destroy(struct crypto_alg *alg) 330 { 331 scomp_free_streams(__crypto_scomp_alg(alg)); 332 } 333 334 static const struct crypto_type crypto_scomp_type = { 335 .extsize = crypto_alg_extsize, 336 .init_tfm = crypto_scomp_init_tfm, 337 .destroy = crypto_scomp_destroy, 338 #ifdef CONFIG_PROC_FS 339 .show = crypto_scomp_show, 340 #endif 341 #if IS_ENABLED(CONFIG_CRYPTO_USER) 342 .report = crypto_scomp_report, 343 #endif 344 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 345 .maskset = CRYPTO_ALG_TYPE_MASK, 346 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 347 .tfmsize = offsetof(struct crypto_scomp, base), 348 }; 349 350 static void scomp_prepare_alg(struct scomp_alg *alg) 351 { 352 struct crypto_alg *base = &alg->calg.base; 353 354 comp_prepare_alg(&alg->calg); 355 356 base->cra_flags |= CRYPTO_ALG_REQ_CHAIN; 357 } 358 359 int crypto_register_scomp(struct scomp_alg *alg) 360 { 361 struct crypto_alg *base = &alg->calg.base; 362 363 scomp_prepare_alg(alg); 364 365 base->cra_type = &crypto_scomp_type; 366 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 367 368 return crypto_register_alg(base); 369 } 370 EXPORT_SYMBOL_GPL(crypto_register_scomp); 371 372 void crypto_unregister_scomp(struct scomp_alg *alg) 373 { 374 crypto_unregister_alg(&alg->base); 375 } 376 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 377 378 int crypto_register_scomps(struct scomp_alg *algs, int count) 379 { 380 int i, ret; 381 382 for (i = 0; i < count; i++) { 383 ret = crypto_register_scomp(&algs[i]); 384 if (ret) 385 goto err; 386 } 387 388 return 0; 389 390 err: 391 for (--i; i >= 0; --i) 392 crypto_unregister_scomp(&algs[i]); 393 394 return ret; 395 } 396 EXPORT_SYMBOL_GPL(crypto_register_scomps); 397 398 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 399 { 400 int i; 401 402 for (i = count - 1; i >= 0; --i) 403 crypto_unregister_scomp(&algs[i]); 404 } 405 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 406 407 MODULE_LICENSE("GPL"); 408 MODULE_DESCRIPTION("Synchronous compression type"); 409