1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synchronous Compression operations 4 * 5 * Copyright 2015 LG Electronics Inc. 6 * Copyright (c) 2016, Intel Corporation 7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/scompress.h> 11 #include <crypto/scatterwalk.h> 12 #include <linux/cpumask.h> 13 #include <linux/cryptouser.h> 14 #include <linux/err.h> 15 #include <linux/highmem.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/overflow.h> 19 #include <linux/scatterlist.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/workqueue.h> 24 #include <net/netlink.h> 25 26 #include "compress.h" 27 28 struct scomp_scratch { 29 spinlock_t lock; 30 union { 31 void *src; 32 unsigned long saddr; 33 }; 34 }; 35 36 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { 37 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock), 38 }; 39 40 static const struct crypto_type crypto_scomp_type; 41 static int scomp_scratch_users; 42 static DEFINE_MUTEX(scomp_lock); 43 44 static cpumask_t scomp_scratch_want; 45 static void scomp_scratch_workfn(struct work_struct *work); 46 static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn); 47 48 static int __maybe_unused crypto_scomp_report( 49 struct sk_buff *skb, struct crypto_alg *alg) 50 { 51 struct crypto_report_comp rscomp; 52 53 memset(&rscomp, 0, sizeof(rscomp)); 54 55 strscpy(rscomp.type, "scomp", sizeof(rscomp.type)); 56 57 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 58 sizeof(rscomp), &rscomp); 59 } 60 61 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 62 __maybe_unused; 63 64 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 65 { 66 seq_puts(m, "type : scomp\n"); 67 } 68 69 static void crypto_scomp_free_scratches(void) 70 { 71 struct scomp_scratch *scratch; 72 int i; 73 74 for_each_possible_cpu(i) { 75 scratch = per_cpu_ptr(&scomp_scratch, i); 76 77 free_page(scratch->saddr); 78 scratch->src = NULL; 79 } 80 } 81 82 static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) 83 { 84 int node = cpu_to_node(cpu); 85 struct page *page; 86 87 page = alloc_pages_node(node, GFP_KERNEL, 0); 88 if (!page) 89 return -ENOMEM; 90 spin_lock_bh(&scratch->lock); 91 scratch->src = page_address(page); 92 spin_unlock_bh(&scratch->lock); 93 return 0; 94 } 95 96 static void scomp_scratch_workfn(struct work_struct *work) 97 { 98 int cpu; 99 100 for_each_cpu(cpu, &scomp_scratch_want) { 101 struct scomp_scratch *scratch; 102 103 scratch = per_cpu_ptr(&scomp_scratch, cpu); 104 if (scratch->src) 105 continue; 106 if (scomp_alloc_scratch(scratch, cpu)) 107 break; 108 109 cpumask_clear_cpu(cpu, &scomp_scratch_want); 110 } 111 } 112 113 static int crypto_scomp_alloc_scratches(void) 114 { 115 unsigned int i = cpumask_first(cpu_possible_mask); 116 struct scomp_scratch *scratch; 117 118 scratch = per_cpu_ptr(&scomp_scratch, i); 119 return scomp_alloc_scratch(scratch, i); 120 } 121 122 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 123 { 124 struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); 125 int ret = 0; 126 127 mutex_lock(&scomp_lock); 128 ret = crypto_acomp_alloc_streams(&alg->streams); 129 if (ret) 130 goto unlock; 131 if (!scomp_scratch_users) { 132 ret = crypto_scomp_alloc_scratches(); 133 if (ret) 134 goto unlock; 135 scomp_scratch_users++; 136 } 137 unlock: 138 mutex_unlock(&scomp_lock); 139 140 return ret; 141 } 142 143 static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) 144 { 145 int cpu = raw_smp_processor_id(); 146 struct scomp_scratch *scratch; 147 148 scratch = per_cpu_ptr(&scomp_scratch, cpu); 149 spin_lock(&scratch->lock); 150 if (likely(scratch->src)) 151 return scratch; 152 spin_unlock(&scratch->lock); 153 154 cpumask_set_cpu(cpu, &scomp_scratch_want); 155 schedule_work(&scomp_scratch_work); 156 157 scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask)); 158 spin_lock(&scratch->lock); 159 return scratch; 160 } 161 162 static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) 163 __releases(scratch) 164 { 165 spin_unlock(&scratch->lock); 166 } 167 168 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 169 { 170 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 171 struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); 172 bool src_isvirt = acomp_request_src_isvirt(req); 173 bool dst_isvirt = acomp_request_dst_isvirt(req); 174 struct crypto_scomp *scomp = *tfm_ctx; 175 struct crypto_acomp_stream *stream; 176 struct scomp_scratch *scratch; 177 unsigned int slen = req->slen; 178 unsigned int dlen = req->dlen; 179 struct page *spage, *dpage; 180 unsigned int n; 181 const u8 *src; 182 size_t soff; 183 size_t doff; 184 u8 *dst; 185 int ret; 186 187 if (!req->src || !slen) 188 return -EINVAL; 189 190 if (!req->dst || !dlen) 191 return -EINVAL; 192 193 if (dst_isvirt) 194 dst = req->dvirt; 195 else { 196 if (dlen <= req->dst->length) { 197 dpage = sg_page(req->dst); 198 doff = req->dst->offset; 199 } else 200 return -ENOSYS; 201 202 dpage = nth_page(dpage, doff / PAGE_SIZE); 203 doff = offset_in_page(doff); 204 205 n = dlen / PAGE_SIZE; 206 n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; 207 if (PageHighMem(dpage + n) && 208 size_add(doff, dlen) > PAGE_SIZE) 209 return -ENOSYS; 210 dst = kmap_local_page(dpage) + doff; 211 } 212 213 if (src_isvirt) 214 src = req->svirt; 215 else { 216 src = NULL; 217 do { 218 if (slen <= req->src->length) { 219 spage = sg_page(req->src); 220 soff = req->src->offset; 221 } else 222 break; 223 224 spage = nth_page(spage, soff / PAGE_SIZE); 225 soff = offset_in_page(soff); 226 227 n = slen / PAGE_SIZE; 228 n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; 229 if (PageHighMem(nth_page(spage, n)) && 230 size_add(soff, slen) > PAGE_SIZE) 231 break; 232 src = kmap_local_page(spage) + soff; 233 } while (0); 234 } 235 236 stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); 237 238 if (!src_isvirt && !src) { 239 const u8 *src; 240 241 scratch = scomp_lock_scratch(); 242 src = scratch->src; 243 memcpy_from_sglist(scratch->src, req->src, 0, slen); 244 245 if (dir) 246 ret = crypto_scomp_compress(scomp, src, slen, 247 dst, &dlen, stream->ctx); 248 else 249 ret = crypto_scomp_decompress(scomp, src, slen, 250 dst, &dlen, stream->ctx); 251 252 scomp_unlock_scratch(scratch); 253 } else if (dir) 254 ret = crypto_scomp_compress(scomp, src, slen, 255 dst, &dlen, stream->ctx); 256 else 257 ret = crypto_scomp_decompress(scomp, src, slen, 258 dst, &dlen, stream->ctx); 259 260 crypto_acomp_unlock_stream_bh(stream); 261 262 req->dlen = dlen; 263 264 if (!src_isvirt && src) 265 kunmap_local(src); 266 if (!dst_isvirt) { 267 kunmap_local(dst); 268 dlen += doff; 269 for (;;) { 270 flush_dcache_page(dpage); 271 if (dlen <= PAGE_SIZE) 272 break; 273 dlen -= PAGE_SIZE; 274 dpage = nth_page(dpage, 1); 275 } 276 } 277 278 return ret; 279 } 280 281 static int scomp_acomp_compress(struct acomp_req *req) 282 { 283 return scomp_acomp_comp_decomp(req, 1); 284 } 285 286 static int scomp_acomp_decompress(struct acomp_req *req) 287 { 288 return scomp_acomp_comp_decomp(req, 0); 289 } 290 291 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 292 { 293 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 294 295 crypto_free_scomp(*ctx); 296 297 flush_work(&scomp_scratch_work); 298 mutex_lock(&scomp_lock); 299 if (!--scomp_scratch_users) 300 crypto_scomp_free_scratches(); 301 mutex_unlock(&scomp_lock); 302 } 303 304 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 305 { 306 struct crypto_alg *calg = tfm->__crt_alg; 307 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 308 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 309 struct crypto_scomp *scomp; 310 311 if (!crypto_mod_get(calg)) 312 return -EAGAIN; 313 314 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 315 if (IS_ERR(scomp)) { 316 crypto_mod_put(calg); 317 return PTR_ERR(scomp); 318 } 319 320 *ctx = scomp; 321 tfm->exit = crypto_exit_scomp_ops_async; 322 323 crt->compress = scomp_acomp_compress; 324 crt->decompress = scomp_acomp_decompress; 325 326 return 0; 327 } 328 329 static void crypto_scomp_destroy(struct crypto_alg *alg) 330 { 331 struct scomp_alg *scomp = __crypto_scomp_alg(alg); 332 333 crypto_acomp_free_streams(&scomp->streams); 334 } 335 336 static const struct crypto_type crypto_scomp_type = { 337 .extsize = crypto_alg_extsize, 338 .init_tfm = crypto_scomp_init_tfm, 339 .destroy = crypto_scomp_destroy, 340 #ifdef CONFIG_PROC_FS 341 .show = crypto_scomp_show, 342 #endif 343 #if IS_ENABLED(CONFIG_CRYPTO_USER) 344 .report = crypto_scomp_report, 345 #endif 346 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 347 .maskset = CRYPTO_ALG_TYPE_MASK, 348 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 349 .tfmsize = offsetof(struct crypto_scomp, base), 350 }; 351 352 static void scomp_prepare_alg(struct scomp_alg *alg) 353 { 354 struct crypto_alg *base = &alg->calg.base; 355 356 comp_prepare_alg(&alg->calg); 357 358 base->cra_flags |= CRYPTO_ALG_REQ_CHAIN; 359 } 360 361 int crypto_register_scomp(struct scomp_alg *alg) 362 { 363 struct crypto_alg *base = &alg->calg.base; 364 365 scomp_prepare_alg(alg); 366 367 base->cra_type = &crypto_scomp_type; 368 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 369 370 return crypto_register_alg(base); 371 } 372 EXPORT_SYMBOL_GPL(crypto_register_scomp); 373 374 void crypto_unregister_scomp(struct scomp_alg *alg) 375 { 376 crypto_unregister_alg(&alg->base); 377 } 378 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 379 380 int crypto_register_scomps(struct scomp_alg *algs, int count) 381 { 382 int i, ret; 383 384 for (i = 0; i < count; i++) { 385 ret = crypto_register_scomp(&algs[i]); 386 if (ret) 387 goto err; 388 } 389 390 return 0; 391 392 err: 393 for (--i; i >= 0; --i) 394 crypto_unregister_scomp(&algs[i]); 395 396 return ret; 397 } 398 EXPORT_SYMBOL_GPL(crypto_register_scomps); 399 400 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 401 { 402 int i; 403 404 for (i = count - 1; i >= 0; --i) 405 crypto_unregister_scomp(&algs[i]); 406 } 407 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 408 409 MODULE_LICENSE("GPL"); 410 MODULE_DESCRIPTION("Synchronous compression type"); 411