1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synchronous Compression operations 4 * 5 * Copyright 2015 LG Electronics Inc. 6 * Copyright (c) 2016, Intel Corporation 7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/scompress.h> 11 #include <crypto/scatterwalk.h> 12 #include <linux/cpumask.h> 13 #include <linux/cryptouser.h> 14 #include <linux/err.h> 15 #include <linux/highmem.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/overflow.h> 19 #include <linux/scatterlist.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/workqueue.h> 24 #include <net/netlink.h> 25 26 #include "compress.h" 27 28 struct scomp_scratch { 29 spinlock_t lock; 30 union { 31 void *src; 32 unsigned long saddr; 33 }; 34 }; 35 36 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { 37 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock), 38 }; 39 40 static const struct crypto_type crypto_scomp_type; 41 static int scomp_scratch_users; 42 static DEFINE_MUTEX(scomp_lock); 43 44 static cpumask_t scomp_scratch_want; 45 static void scomp_scratch_workfn(struct work_struct *work); 46 static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn); 47 48 static int __maybe_unused crypto_scomp_report( 49 struct sk_buff *skb, struct crypto_alg *alg) 50 { 51 struct crypto_report_comp rscomp; 52 53 memset(&rscomp, 0, sizeof(rscomp)); 54 55 strscpy(rscomp.type, "scomp", sizeof(rscomp.type)); 56 57 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 58 sizeof(rscomp), &rscomp); 59 } 60 61 static void __maybe_unused crypto_scomp_show(struct seq_file *m, 62 struct crypto_alg *alg) 63 { 64 seq_puts(m, "type : scomp\n"); 65 } 66 67 static void crypto_scomp_free_scratches(void) 68 { 69 struct scomp_scratch *scratch; 70 int i; 71 72 for_each_possible_cpu(i) { 73 scratch = per_cpu_ptr(&scomp_scratch, i); 74 75 free_page(scratch->saddr); 76 scratch->src = NULL; 77 } 78 } 79 80 static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) 81 { 82 int node = cpu_to_node(cpu); 83 struct page *page; 84 85 page = alloc_pages_node(node, GFP_KERNEL, 0); 86 if (!page) 87 return -ENOMEM; 88 spin_lock_bh(&scratch->lock); 89 scratch->src = page_address(page); 90 spin_unlock_bh(&scratch->lock); 91 return 0; 92 } 93 94 static void scomp_scratch_workfn(struct work_struct *work) 95 { 96 int cpu; 97 98 for_each_cpu(cpu, &scomp_scratch_want) { 99 struct scomp_scratch *scratch; 100 101 scratch = per_cpu_ptr(&scomp_scratch, cpu); 102 if (scratch->src) 103 continue; 104 if (scomp_alloc_scratch(scratch, cpu)) 105 break; 106 107 cpumask_clear_cpu(cpu, &scomp_scratch_want); 108 } 109 } 110 111 static int crypto_scomp_alloc_scratches(void) 112 { 113 unsigned int i = cpumask_first(cpu_possible_mask); 114 struct scomp_scratch *scratch; 115 116 scratch = per_cpu_ptr(&scomp_scratch, i); 117 return scomp_alloc_scratch(scratch, i); 118 } 119 120 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 121 { 122 struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); 123 int ret = 0; 124 125 mutex_lock(&scomp_lock); 126 ret = crypto_acomp_alloc_streams(&alg->streams); 127 if (ret) 128 goto unlock; 129 if (!scomp_scratch_users++) { 130 ret = crypto_scomp_alloc_scratches(); 131 if (ret) 132 scomp_scratch_users--; 133 } 134 unlock: 135 mutex_unlock(&scomp_lock); 136 137 return ret; 138 } 139 140 static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) 141 { 142 int cpu = raw_smp_processor_id(); 143 struct scomp_scratch *scratch; 144 145 scratch = per_cpu_ptr(&scomp_scratch, cpu); 146 spin_lock(&scratch->lock); 147 if (likely(scratch->src)) 148 return scratch; 149 spin_unlock(&scratch->lock); 150 151 cpumask_set_cpu(cpu, &scomp_scratch_want); 152 schedule_work(&scomp_scratch_work); 153 154 scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask)); 155 spin_lock(&scratch->lock); 156 return scratch; 157 } 158 159 static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) 160 __releases(scratch) 161 { 162 spin_unlock(&scratch->lock); 163 } 164 165 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 166 { 167 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 168 struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); 169 bool src_isvirt = acomp_request_src_isvirt(req); 170 bool dst_isvirt = acomp_request_dst_isvirt(req); 171 struct crypto_scomp *scomp = *tfm_ctx; 172 struct crypto_acomp_stream *stream; 173 struct scomp_scratch *scratch; 174 unsigned int slen = req->slen; 175 unsigned int dlen = req->dlen; 176 struct page *spage, *dpage; 177 unsigned int n; 178 const u8 *src; 179 size_t soff; 180 size_t doff; 181 u8 *dst; 182 int ret; 183 184 if (!req->src || !slen) 185 return -EINVAL; 186 187 if (!req->dst || !dlen) 188 return -EINVAL; 189 190 if (dst_isvirt) 191 dst = req->dvirt; 192 else { 193 if (dlen <= req->dst->length) { 194 dpage = sg_page(req->dst); 195 doff = req->dst->offset; 196 } else 197 return -ENOSYS; 198 199 dpage += doff / PAGE_SIZE; 200 doff = offset_in_page(doff); 201 202 n = (dlen - 1) / PAGE_SIZE; 203 n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE; 204 if (PageHighMem(dpage + n) && 205 size_add(doff, dlen) > PAGE_SIZE) 206 return -ENOSYS; 207 dst = kmap_local_page(dpage) + doff; 208 } 209 210 if (src_isvirt) 211 src = req->svirt; 212 else { 213 src = NULL; 214 do { 215 if (slen <= req->src->length) { 216 spage = sg_page(req->src); 217 soff = req->src->offset; 218 } else 219 break; 220 221 spage = spage + soff / PAGE_SIZE; 222 soff = offset_in_page(soff); 223 224 n = (slen - 1) / PAGE_SIZE; 225 n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE; 226 if (PageHighMem(spage + n) && 227 size_add(soff, slen) > PAGE_SIZE) 228 break; 229 src = kmap_local_page(spage) + soff; 230 } while (0); 231 } 232 233 stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); 234 235 if (!src_isvirt && !src) { 236 const u8 *src; 237 238 scratch = scomp_lock_scratch(); 239 src = scratch->src; 240 memcpy_from_sglist(scratch->src, req->src, 0, slen); 241 242 if (dir) 243 ret = crypto_scomp_compress(scomp, src, slen, 244 dst, &dlen, stream->ctx); 245 else 246 ret = crypto_scomp_decompress(scomp, src, slen, 247 dst, &dlen, stream->ctx); 248 249 scomp_unlock_scratch(scratch); 250 } else if (dir) 251 ret = crypto_scomp_compress(scomp, src, slen, 252 dst, &dlen, stream->ctx); 253 else 254 ret = crypto_scomp_decompress(scomp, src, slen, 255 dst, &dlen, stream->ctx); 256 257 crypto_acomp_unlock_stream_bh(stream); 258 259 req->dlen = dlen; 260 261 if (!src_isvirt && src) 262 kunmap_local(src); 263 if (!dst_isvirt) { 264 kunmap_local(dst); 265 dlen += doff; 266 for (;;) { 267 flush_dcache_page(dpage); 268 if (dlen <= PAGE_SIZE) 269 break; 270 dlen -= PAGE_SIZE; 271 dpage++; 272 } 273 } 274 275 return ret; 276 } 277 278 static int scomp_acomp_compress(struct acomp_req *req) 279 { 280 return scomp_acomp_comp_decomp(req, 1); 281 } 282 283 static int scomp_acomp_decompress(struct acomp_req *req) 284 { 285 return scomp_acomp_comp_decomp(req, 0); 286 } 287 288 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 289 { 290 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 291 292 crypto_free_scomp(*ctx); 293 294 flush_work(&scomp_scratch_work); 295 mutex_lock(&scomp_lock); 296 if (!--scomp_scratch_users) 297 crypto_scomp_free_scratches(); 298 mutex_unlock(&scomp_lock); 299 } 300 301 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 302 { 303 struct crypto_alg *calg = tfm->__crt_alg; 304 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 305 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 306 struct crypto_scomp *scomp; 307 308 if (!crypto_mod_get(calg)) 309 return -EAGAIN; 310 311 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 312 if (IS_ERR(scomp)) { 313 crypto_mod_put(calg); 314 return PTR_ERR(scomp); 315 } 316 317 *ctx = scomp; 318 tfm->exit = crypto_exit_scomp_ops_async; 319 320 crt->compress = scomp_acomp_compress; 321 crt->decompress = scomp_acomp_decompress; 322 323 return 0; 324 } 325 326 static void crypto_scomp_destroy(struct crypto_alg *alg) 327 { 328 struct scomp_alg *scomp = __crypto_scomp_alg(alg); 329 330 crypto_acomp_free_streams(&scomp->streams); 331 } 332 333 static const struct crypto_type crypto_scomp_type = { 334 .extsize = crypto_alg_extsize, 335 .init_tfm = crypto_scomp_init_tfm, 336 .destroy = crypto_scomp_destroy, 337 #ifdef CONFIG_PROC_FS 338 .show = crypto_scomp_show, 339 #endif 340 #if IS_ENABLED(CONFIG_CRYPTO_USER) 341 .report = crypto_scomp_report, 342 #endif 343 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 344 .maskset = CRYPTO_ALG_TYPE_MASK, 345 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 346 .tfmsize = offsetof(struct crypto_scomp, base), 347 .algsize = offsetof(struct scomp_alg, base), 348 }; 349 350 static void scomp_prepare_alg(struct scomp_alg *alg) 351 { 352 struct crypto_alg *base = &alg->calg.base; 353 354 comp_prepare_alg(&alg->calg); 355 356 base->cra_flags |= CRYPTO_ALG_REQ_VIRT; 357 } 358 359 int crypto_register_scomp(struct scomp_alg *alg) 360 { 361 struct crypto_alg *base = &alg->calg.base; 362 363 scomp_prepare_alg(alg); 364 365 base->cra_type = &crypto_scomp_type; 366 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 367 368 return crypto_register_alg(base); 369 } 370 EXPORT_SYMBOL_GPL(crypto_register_scomp); 371 372 void crypto_unregister_scomp(struct scomp_alg *alg) 373 { 374 crypto_unregister_alg(&alg->base); 375 } 376 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 377 378 int crypto_register_scomps(struct scomp_alg *algs, int count) 379 { 380 int i, ret; 381 382 for (i = 0; i < count; i++) { 383 ret = crypto_register_scomp(&algs[i]); 384 if (ret) { 385 crypto_unregister_scomps(algs, i); 386 return ret; 387 } 388 } 389 390 return 0; 391 } 392 EXPORT_SYMBOL_GPL(crypto_register_scomps); 393 394 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 395 { 396 int i; 397 398 for (i = count - 1; i >= 0; --i) 399 crypto_unregister_scomp(&algs[i]); 400 } 401 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 402 403 MODULE_LICENSE("GPL"); 404 MODULE_DESCRIPTION("Synchronous compression type"); 405