1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synchronous Compression operations 4 * 5 * Copyright 2015 LG Electronics Inc. 6 * Copyright (c) 2016, Intel Corporation 7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/acompress.h> 11 #include <crypto/internal/scompress.h> 12 #include <crypto/scatterwalk.h> 13 #include <linux/cryptouser.h> 14 #include <linux/err.h> 15 #include <linux/highmem.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/overflow.h> 19 #include <linux/scatterlist.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/vmalloc.h> 24 #include <net/netlink.h> 25 26 #include "compress.h" 27 28 #define SCOMP_SCRATCH_SIZE 65400 29 30 struct scomp_scratch { 31 spinlock_t lock; 32 union { 33 void *src; 34 unsigned long saddr; 35 }; 36 void *dst; 37 }; 38 39 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { 40 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock), 41 }; 42 43 static const struct crypto_type crypto_scomp_type; 44 static int scomp_scratch_users; 45 static DEFINE_MUTEX(scomp_lock); 46 47 static int __maybe_unused crypto_scomp_report( 48 struct sk_buff *skb, struct crypto_alg *alg) 49 { 50 struct crypto_report_comp rscomp; 51 52 memset(&rscomp, 0, sizeof(rscomp)); 53 54 strscpy(rscomp.type, "scomp", sizeof(rscomp.type)); 55 56 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 57 sizeof(rscomp), &rscomp); 58 } 59 60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 61 __maybe_unused; 62 63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 64 { 65 seq_puts(m, "type : scomp\n"); 66 } 67 68 static void crypto_scomp_free_scratches(void) 69 { 70 struct scomp_scratch *scratch; 71 int i; 72 73 for_each_possible_cpu(i) { 74 scratch = per_cpu_ptr(&scomp_scratch, i); 75 76 free_page(scratch->saddr); 77 vfree(scratch->dst); 78 scratch->src = NULL; 79 scratch->dst = NULL; 80 } 81 } 82 83 static int crypto_scomp_alloc_scratches(void) 84 { 85 struct scomp_scratch *scratch; 86 int i; 87 88 for_each_possible_cpu(i) { 89 struct page *page; 90 void *mem; 91 92 scratch = per_cpu_ptr(&scomp_scratch, i); 93 94 page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0); 95 if (!page) 96 goto error; 97 scratch->src = page_address(page); 98 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); 99 if (!mem) 100 goto error; 101 scratch->dst = mem; 102 } 103 return 0; 104 error: 105 crypto_scomp_free_scratches(); 106 return -ENOMEM; 107 } 108 109 static void scomp_free_streams(struct scomp_alg *alg) 110 { 111 struct crypto_acomp_stream __percpu *stream = alg->stream; 112 int i; 113 114 for_each_possible_cpu(i) { 115 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); 116 117 if (!ps->ctx) 118 break; 119 120 alg->free_ctx(ps); 121 } 122 123 free_percpu(stream); 124 } 125 126 static int scomp_alloc_streams(struct scomp_alg *alg) 127 { 128 struct crypto_acomp_stream __percpu *stream; 129 int i; 130 131 stream = alloc_percpu(struct crypto_acomp_stream); 132 if (!stream) 133 return -ENOMEM; 134 135 for_each_possible_cpu(i) { 136 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); 137 138 ps->ctx = alg->alloc_ctx(); 139 if (IS_ERR(ps->ctx)) { 140 scomp_free_streams(alg); 141 return PTR_ERR(ps->ctx); 142 } 143 144 spin_lock_init(&ps->lock); 145 } 146 147 alg->stream = stream; 148 return 0; 149 } 150 151 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 152 { 153 struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); 154 int ret = 0; 155 156 mutex_lock(&scomp_lock); 157 if (!alg->stream) { 158 ret = scomp_alloc_streams(alg); 159 if (ret) 160 goto unlock; 161 } 162 if (!scomp_scratch_users++) 163 ret = crypto_scomp_alloc_scratches(); 164 unlock: 165 mutex_unlock(&scomp_lock); 166 167 return ret; 168 } 169 170 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 171 { 172 struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch); 173 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 174 struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); 175 struct crypto_scomp *scomp = *tfm_ctx; 176 struct crypto_acomp_stream *stream; 177 unsigned int slen = req->slen; 178 unsigned int dlen = req->dlen; 179 struct page *spage, *dpage; 180 unsigned int soff, doff; 181 void *src, *dst; 182 unsigned int n; 183 int ret; 184 185 if (!req->src || !slen) 186 return -EINVAL; 187 188 if (!req->dst || !dlen) 189 return -EINVAL; 190 191 soff = req->src->offset; 192 spage = nth_page(sg_page(req->src), soff / PAGE_SIZE); 193 soff = offset_in_page(soff); 194 195 n = slen / PAGE_SIZE; 196 n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; 197 if (slen <= req->src->length && (!PageHighMem(nth_page(spage, n)) || 198 size_add(soff, slen) <= PAGE_SIZE)) 199 src = kmap_local_page(spage) + soff; 200 else 201 src = scratch->src; 202 203 doff = req->dst->offset; 204 dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE); 205 doff = offset_in_page(doff); 206 207 n = dlen / PAGE_SIZE; 208 n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; 209 if (dlen <= req->dst->length && (!PageHighMem(nth_page(dpage, n)) || 210 size_add(doff, dlen) <= PAGE_SIZE)) 211 dst = kmap_local_page(dpage) + doff; 212 else { 213 if (dlen > SCOMP_SCRATCH_SIZE) 214 dlen = SCOMP_SCRATCH_SIZE; 215 dst = scratch->dst; 216 } 217 218 spin_lock_bh(&scratch->lock); 219 220 if (src == scratch->src) 221 memcpy_from_sglist(src, req->src, 0, slen); 222 223 stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); 224 spin_lock(&stream->lock); 225 if (dir) 226 ret = crypto_scomp_compress(scomp, src, slen, 227 dst, &dlen, stream->ctx); 228 else 229 ret = crypto_scomp_decompress(scomp, src, slen, 230 dst, &dlen, stream->ctx); 231 232 if (dst == scratch->dst) 233 memcpy_to_sglist(req->dst, 0, dst, dlen); 234 235 spin_unlock(&stream->lock); 236 spin_unlock_bh(&scratch->lock); 237 238 req->dlen = dlen; 239 240 if (dst != scratch->dst) { 241 kunmap_local(dst); 242 dlen += doff; 243 for (;;) { 244 flush_dcache_page(dpage); 245 if (dlen <= PAGE_SIZE) 246 break; 247 dlen -= PAGE_SIZE; 248 dpage = nth_page(dpage, 1); 249 } 250 } 251 if (src != scratch->src) 252 kunmap_local(src); 253 254 return ret; 255 } 256 257 static int scomp_acomp_compress(struct acomp_req *req) 258 { 259 return scomp_acomp_comp_decomp(req, 1); 260 } 261 262 static int scomp_acomp_decompress(struct acomp_req *req) 263 { 264 return scomp_acomp_comp_decomp(req, 0); 265 } 266 267 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 268 { 269 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 270 271 crypto_free_scomp(*ctx); 272 273 mutex_lock(&scomp_lock); 274 if (!--scomp_scratch_users) 275 crypto_scomp_free_scratches(); 276 mutex_unlock(&scomp_lock); 277 } 278 279 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 280 { 281 struct crypto_alg *calg = tfm->__crt_alg; 282 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 283 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 284 struct crypto_scomp *scomp; 285 286 if (!crypto_mod_get(calg)) 287 return -EAGAIN; 288 289 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 290 if (IS_ERR(scomp)) { 291 crypto_mod_put(calg); 292 return PTR_ERR(scomp); 293 } 294 295 *ctx = scomp; 296 tfm->exit = crypto_exit_scomp_ops_async; 297 298 crt->compress = scomp_acomp_compress; 299 crt->decompress = scomp_acomp_decompress; 300 301 return 0; 302 } 303 304 static void crypto_scomp_destroy(struct crypto_alg *alg) 305 { 306 scomp_free_streams(__crypto_scomp_alg(alg)); 307 } 308 309 static const struct crypto_type crypto_scomp_type = { 310 .extsize = crypto_alg_extsize, 311 .init_tfm = crypto_scomp_init_tfm, 312 .destroy = crypto_scomp_destroy, 313 #ifdef CONFIG_PROC_FS 314 .show = crypto_scomp_show, 315 #endif 316 #if IS_ENABLED(CONFIG_CRYPTO_USER) 317 .report = crypto_scomp_report, 318 #endif 319 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 320 .maskset = CRYPTO_ALG_TYPE_MASK, 321 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 322 .tfmsize = offsetof(struct crypto_scomp, base), 323 }; 324 325 int crypto_register_scomp(struct scomp_alg *alg) 326 { 327 struct crypto_alg *base = &alg->calg.base; 328 329 comp_prepare_alg(&alg->calg); 330 331 base->cra_type = &crypto_scomp_type; 332 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 333 334 return crypto_register_alg(base); 335 } 336 EXPORT_SYMBOL_GPL(crypto_register_scomp); 337 338 void crypto_unregister_scomp(struct scomp_alg *alg) 339 { 340 crypto_unregister_alg(&alg->base); 341 } 342 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 343 344 int crypto_register_scomps(struct scomp_alg *algs, int count) 345 { 346 int i, ret; 347 348 for (i = 0; i < count; i++) { 349 ret = crypto_register_scomp(&algs[i]); 350 if (ret) 351 goto err; 352 } 353 354 return 0; 355 356 err: 357 for (--i; i >= 0; --i) 358 crypto_unregister_scomp(&algs[i]); 359 360 return ret; 361 } 362 EXPORT_SYMBOL_GPL(crypto_register_scomps); 363 364 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 365 { 366 int i; 367 368 for (i = count - 1; i >= 0; --i) 369 crypto_unregister_scomp(&algs[i]); 370 } 371 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 372 373 MODULE_LICENSE("GPL"); 374 MODULE_DESCRIPTION("Synchronous compression type"); 375