1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Asynchronous Compression operations 4 * 5 * Copyright (c) 2016, Intel Corporation 6 * Authors: Weigang Li <weigang.li@intel.com> 7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/acompress.h> 11 #include <crypto/scatterwalk.h> 12 #include <linux/cryptouser.h> 13 #include <linux/cpumask.h> 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/page-flags.h> 18 #include <linux/percpu.h> 19 #include <linux/scatterlist.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/slab.h> 23 #include <linux/smp.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/workqueue.h> 27 #include <net/netlink.h> 28 29 #include "compress.h" 30 31 struct crypto_scomp; 32 33 enum { 34 ACOMP_WALK_SLEEP = 1 << 0, 35 ACOMP_WALK_SRC_LINEAR = 1 << 1, 36 ACOMP_WALK_SRC_FOLIO = 1 << 2, 37 ACOMP_WALK_DST_LINEAR = 1 << 3, 38 ACOMP_WALK_DST_FOLIO = 1 << 4, 39 }; 40 41 static const struct crypto_type crypto_acomp_type; 42 43 static void acomp_reqchain_done(void *data, int err); 44 45 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) 46 { 47 return container_of(alg, struct acomp_alg, calg.base); 48 } 49 50 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) 51 { 52 return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); 53 } 54 55 static int __maybe_unused crypto_acomp_report( 56 struct sk_buff *skb, struct crypto_alg *alg) 57 { 58 struct crypto_report_acomp racomp; 59 60 memset(&racomp, 0, sizeof(racomp)); 61 62 strscpy(racomp.type, "acomp", sizeof(racomp.type)); 63 64 return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp); 65 } 66 67 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 68 __maybe_unused; 69 70 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 71 { 72 seq_puts(m, "type : acomp\n"); 73 } 74 75 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) 76 { 77 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 78 struct acomp_alg *alg = crypto_acomp_alg(acomp); 79 80 if (alg->exit) 81 alg->exit(acomp); 82 83 if (acomp_is_async(acomp)) 84 crypto_free_acomp(acomp->fb); 85 } 86 87 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) 88 { 89 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 90 struct acomp_alg *alg = crypto_acomp_alg(acomp); 91 struct crypto_acomp *fb = NULL; 92 int err; 93 94 acomp->fb = acomp; 95 96 if (tfm->__crt_alg->cra_type != &crypto_acomp_type) 97 return crypto_init_scomp_ops_async(tfm); 98 99 if (acomp_is_async(acomp)) { 100 fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, 101 CRYPTO_ALG_ASYNC); 102 if (IS_ERR(fb)) 103 return PTR_ERR(fb); 104 105 err = -EINVAL; 106 if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) 107 goto out_free_fb; 108 109 acomp->fb = fb; 110 } 111 112 acomp->compress = alg->compress; 113 acomp->decompress = alg->decompress; 114 acomp->reqsize = alg->reqsize; 115 116 acomp->base.exit = crypto_acomp_exit_tfm; 117 118 if (!alg->init) 119 return 0; 120 121 err = alg->init(acomp); 122 if (err) 123 goto out_free_fb; 124 125 return 0; 126 127 out_free_fb: 128 crypto_free_acomp(fb); 129 return err; 130 } 131 132 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) 133 { 134 int extsize = crypto_alg_extsize(alg); 135 136 if (alg->cra_type != &crypto_acomp_type) 137 extsize += sizeof(struct crypto_scomp *); 138 139 return extsize; 140 } 141 142 static const struct crypto_type crypto_acomp_type = { 143 .extsize = crypto_acomp_extsize, 144 .init_tfm = crypto_acomp_init_tfm, 145 #ifdef CONFIG_PROC_FS 146 .show = crypto_acomp_show, 147 #endif 148 #if IS_ENABLED(CONFIG_CRYPTO_USER) 149 .report = crypto_acomp_report, 150 #endif 151 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 152 .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, 153 .type = CRYPTO_ALG_TYPE_ACOMPRESS, 154 .tfmsize = offsetof(struct crypto_acomp, base), 155 }; 156 157 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, 158 u32 mask) 159 { 160 return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); 161 } 162 EXPORT_SYMBOL_GPL(crypto_alloc_acomp); 163 164 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, 165 u32 mask, int node) 166 { 167 return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, 168 node); 169 } 170 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); 171 172 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) 173 { 174 struct acomp_req_chain *state = &req->chain; 175 176 state->compl = req->base.complete; 177 state->data = req->base.data; 178 req->base.complete = cplt; 179 req->base.data = state; 180 state->req0 = req; 181 } 182 183 static void acomp_restore_req(struct acomp_req *req) 184 { 185 struct acomp_req_chain *state = req->base.data; 186 187 req->base.complete = state->compl; 188 req->base.data = state->data; 189 } 190 191 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err) 192 { 193 struct acomp_req *req = state->cur; 194 unsigned int slen = req->slen; 195 unsigned int dlen = req->dlen; 196 197 req->base.err = err; 198 state = &req->chain; 199 200 if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) 201 acomp_request_set_src_dma(req, state->src, slen); 202 else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO) 203 acomp_request_set_src_folio(req, state->sfolio, state->soff, slen); 204 if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) 205 acomp_request_set_dst_dma(req, state->dst, dlen); 206 else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO) 207 acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen); 208 } 209 210 static void acomp_virt_to_sg(struct acomp_req *req) 211 { 212 struct acomp_req_chain *state = &req->chain; 213 214 state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | 215 CRYPTO_ACOMP_REQ_DST_VIRT | 216 CRYPTO_ACOMP_REQ_SRC_FOLIO | 217 CRYPTO_ACOMP_REQ_DST_FOLIO); 218 219 if (acomp_request_src_isvirt(req)) { 220 unsigned int slen = req->slen; 221 const u8 *svirt = req->svirt; 222 223 state->src = svirt; 224 sg_init_one(&state->ssg, svirt, slen); 225 acomp_request_set_src_sg(req, &state->ssg, slen); 226 } else if (acomp_request_src_isfolio(req)) { 227 struct folio *folio = req->sfolio; 228 unsigned int slen = req->slen; 229 size_t off = req->soff; 230 231 state->sfolio = folio; 232 state->soff = off; 233 sg_init_table(&state->ssg, 1); 234 sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE), 235 slen, off % PAGE_SIZE); 236 acomp_request_set_src_sg(req, &state->ssg, slen); 237 } 238 239 if (acomp_request_dst_isvirt(req)) { 240 unsigned int dlen = req->dlen; 241 u8 *dvirt = req->dvirt; 242 243 state->dst = dvirt; 244 sg_init_one(&state->dsg, dvirt, dlen); 245 acomp_request_set_dst_sg(req, &state->dsg, dlen); 246 } else if (acomp_request_dst_isfolio(req)) { 247 struct folio *folio = req->dfolio; 248 unsigned int dlen = req->dlen; 249 size_t off = req->doff; 250 251 state->dfolio = folio; 252 state->doff = off; 253 sg_init_table(&state->dsg, 1); 254 sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE), 255 dlen, off % PAGE_SIZE); 256 acomp_request_set_src_sg(req, &state->dsg, dlen); 257 } 258 } 259 260 static int acomp_do_nondma(struct acomp_req_chain *state, 261 struct acomp_req *req) 262 { 263 u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | 264 CRYPTO_ACOMP_REQ_SRC_NONDMA | 265 CRYPTO_ACOMP_REQ_DST_VIRT | 266 CRYPTO_ACOMP_REQ_DST_NONDMA; 267 ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); 268 int err; 269 270 acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL); 271 fbreq->base.flags &= ~keep; 272 fbreq->base.flags |= req->base.flags & keep; 273 fbreq->src = req->src; 274 fbreq->dst = req->dst; 275 fbreq->slen = req->slen; 276 fbreq->dlen = req->dlen; 277 278 if (state->op == crypto_acomp_reqtfm(req)->compress) 279 err = crypto_acomp_compress(fbreq); 280 else 281 err = crypto_acomp_decompress(fbreq); 282 283 req->dlen = fbreq->dlen; 284 return err; 285 } 286 287 static int acomp_do_one_req(struct acomp_req_chain *state, 288 struct acomp_req *req) 289 { 290 state->cur = req; 291 292 if (acomp_request_isnondma(req)) 293 return acomp_do_nondma(state, req); 294 295 acomp_virt_to_sg(req); 296 return state->op(req); 297 } 298 299 static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask) 300 { 301 struct acomp_req_chain *state = req0->base.data; 302 struct acomp_req *req = state->cur; 303 struct acomp_req *n; 304 305 acomp_reqchain_virt(state, err); 306 307 if (req != req0) 308 list_add_tail(&req->base.list, &req0->base.list); 309 310 list_for_each_entry_safe(req, n, &state->head, base.list) { 311 list_del_init(&req->base.list); 312 313 req->base.flags &= mask; 314 req->base.complete = acomp_reqchain_done; 315 req->base.data = state; 316 317 err = acomp_do_one_req(state, req); 318 319 if (err == -EINPROGRESS) { 320 if (!list_empty(&state->head)) 321 err = -EBUSY; 322 goto out; 323 } 324 325 if (err == -EBUSY) 326 goto out; 327 328 acomp_reqchain_virt(state, err); 329 list_add_tail(&req->base.list, &req0->base.list); 330 } 331 332 acomp_restore_req(req0); 333 334 out: 335 return err; 336 } 337 338 static void acomp_reqchain_done(void *data, int err) 339 { 340 struct acomp_req_chain *state = data; 341 crypto_completion_t compl = state->compl; 342 343 data = state->data; 344 345 if (err == -EINPROGRESS) { 346 if (!list_empty(&state->head)) 347 return; 348 goto notify; 349 } 350 351 err = acomp_reqchain_finish(state->req0, err, 352 CRYPTO_TFM_REQ_MAY_BACKLOG); 353 if (err == -EBUSY) 354 return; 355 356 notify: 357 compl(data, err); 358 } 359 360 static int acomp_do_req_chain(struct acomp_req *req, 361 int (*op)(struct acomp_req *req)) 362 { 363 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 364 struct acomp_req_chain *state; 365 int err; 366 367 if (crypto_acomp_req_chain(tfm) || 368 (!acomp_request_chained(req) && acomp_request_issg(req))) 369 return op(req); 370 371 acomp_save_req(req, acomp_reqchain_done); 372 state = req->base.data; 373 374 state->op = op; 375 state->src = NULL; 376 INIT_LIST_HEAD(&state->head); 377 list_splice_init(&req->base.list, &state->head); 378 379 err = acomp_do_one_req(state, req); 380 if (err == -EBUSY || err == -EINPROGRESS) 381 return -EBUSY; 382 383 return acomp_reqchain_finish(req, err, ~0); 384 } 385 386 int crypto_acomp_compress(struct acomp_req *req) 387 { 388 return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress); 389 } 390 EXPORT_SYMBOL_GPL(crypto_acomp_compress); 391 392 int crypto_acomp_decompress(struct acomp_req *req) 393 { 394 return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress); 395 } 396 EXPORT_SYMBOL_GPL(crypto_acomp_decompress); 397 398 void comp_prepare_alg(struct comp_alg_common *alg) 399 { 400 struct crypto_alg *base = &alg->base; 401 402 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 403 } 404 405 int crypto_register_acomp(struct acomp_alg *alg) 406 { 407 struct crypto_alg *base = &alg->calg.base; 408 409 comp_prepare_alg(&alg->calg); 410 411 base->cra_type = &crypto_acomp_type; 412 base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; 413 414 return crypto_register_alg(base); 415 } 416 EXPORT_SYMBOL_GPL(crypto_register_acomp); 417 418 void crypto_unregister_acomp(struct acomp_alg *alg) 419 { 420 crypto_unregister_alg(&alg->base); 421 } 422 EXPORT_SYMBOL_GPL(crypto_unregister_acomp); 423 424 int crypto_register_acomps(struct acomp_alg *algs, int count) 425 { 426 int i, ret; 427 428 for (i = 0; i < count; i++) { 429 ret = crypto_register_acomp(&algs[i]); 430 if (ret) 431 goto err; 432 } 433 434 return 0; 435 436 err: 437 for (--i; i >= 0; --i) 438 crypto_unregister_acomp(&algs[i]); 439 440 return ret; 441 } 442 EXPORT_SYMBOL_GPL(crypto_register_acomps); 443 444 void crypto_unregister_acomps(struct acomp_alg *algs, int count) 445 { 446 int i; 447 448 for (i = count - 1; i >= 0; --i) 449 crypto_unregister_acomp(&algs[i]); 450 } 451 EXPORT_SYMBOL_GPL(crypto_unregister_acomps); 452 453 static void acomp_stream_workfn(struct work_struct *work) 454 { 455 struct crypto_acomp_streams *s = 456 container_of(work, struct crypto_acomp_streams, stream_work); 457 struct crypto_acomp_stream __percpu *streams = s->streams; 458 int cpu; 459 460 for_each_cpu(cpu, &s->stream_want) { 461 struct crypto_acomp_stream *ps; 462 void *ctx; 463 464 ps = per_cpu_ptr(streams, cpu); 465 if (ps->ctx) 466 continue; 467 468 ctx = s->alloc_ctx(); 469 if (IS_ERR(ctx)) 470 break; 471 472 spin_lock_bh(&ps->lock); 473 ps->ctx = ctx; 474 spin_unlock_bh(&ps->lock); 475 476 cpumask_clear_cpu(cpu, &s->stream_want); 477 } 478 } 479 480 void crypto_acomp_free_streams(struct crypto_acomp_streams *s) 481 { 482 struct crypto_acomp_stream __percpu *streams = s->streams; 483 void (*free_ctx)(void *); 484 int i; 485 486 cancel_work_sync(&s->stream_work); 487 free_ctx = s->free_ctx; 488 489 for_each_possible_cpu(i) { 490 struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i); 491 492 if (!ps->ctx) 493 continue; 494 495 free_ctx(ps->ctx); 496 } 497 498 free_percpu(streams); 499 } 500 EXPORT_SYMBOL_GPL(crypto_acomp_free_streams); 501 502 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s) 503 { 504 struct crypto_acomp_stream __percpu *streams; 505 struct crypto_acomp_stream *ps; 506 unsigned int i; 507 void *ctx; 508 509 if (s->streams) 510 return 0; 511 512 streams = alloc_percpu(struct crypto_acomp_stream); 513 if (!streams) 514 return -ENOMEM; 515 516 ctx = s->alloc_ctx(); 517 if (IS_ERR(ctx)) { 518 free_percpu(streams); 519 return PTR_ERR(ctx); 520 } 521 522 i = cpumask_first(cpu_possible_mask); 523 ps = per_cpu_ptr(streams, i); 524 ps->ctx = ctx; 525 526 for_each_possible_cpu(i) { 527 ps = per_cpu_ptr(streams, i); 528 spin_lock_init(&ps->lock); 529 } 530 531 s->streams = streams; 532 533 INIT_WORK(&s->stream_work, acomp_stream_workfn); 534 return 0; 535 } 536 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams); 537 538 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( 539 struct crypto_acomp_streams *s) __acquires(stream) 540 { 541 struct crypto_acomp_stream __percpu *streams = s->streams; 542 int cpu = raw_smp_processor_id(); 543 struct crypto_acomp_stream *ps; 544 545 ps = per_cpu_ptr(streams, cpu); 546 spin_lock_bh(&ps->lock); 547 if (likely(ps->ctx)) 548 return ps; 549 spin_unlock(&ps->lock); 550 551 cpumask_set_cpu(cpu, &s->stream_want); 552 schedule_work(&s->stream_work); 553 554 ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask)); 555 spin_lock(&ps->lock); 556 return ps; 557 } 558 EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh); 559 560 void acomp_walk_done_src(struct acomp_walk *walk, int used) 561 { 562 walk->slen -= used; 563 if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) 564 scatterwalk_advance(&walk->in, used); 565 else 566 scatterwalk_done_src(&walk->in, used); 567 568 if ((walk->flags & ACOMP_WALK_SLEEP)) 569 cond_resched(); 570 } 571 EXPORT_SYMBOL_GPL(acomp_walk_done_src); 572 573 void acomp_walk_done_dst(struct acomp_walk *walk, int used) 574 { 575 walk->dlen -= used; 576 if ((walk->flags & ACOMP_WALK_DST_LINEAR)) 577 scatterwalk_advance(&walk->out, used); 578 else 579 scatterwalk_done_dst(&walk->out, used); 580 581 if ((walk->flags & ACOMP_WALK_SLEEP)) 582 cond_resched(); 583 } 584 EXPORT_SYMBOL_GPL(acomp_walk_done_dst); 585 586 int acomp_walk_next_src(struct acomp_walk *walk) 587 { 588 unsigned int slen = walk->slen; 589 unsigned int max = UINT_MAX; 590 591 if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) 592 max = PAGE_SIZE; 593 if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { 594 walk->in.__addr = (void *)(((u8 *)walk->in.sg) + 595 walk->in.offset); 596 return min(slen, max); 597 } 598 599 return slen ? scatterwalk_next(&walk->in, slen) : 0; 600 } 601 EXPORT_SYMBOL_GPL(acomp_walk_next_src); 602 603 int acomp_walk_next_dst(struct acomp_walk *walk) 604 { 605 unsigned int dlen = walk->dlen; 606 unsigned int max = UINT_MAX; 607 608 if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) 609 max = PAGE_SIZE; 610 if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { 611 walk->out.__addr = (void *)(((u8 *)walk->out.sg) + 612 walk->out.offset); 613 return min(dlen, max); 614 } 615 616 return dlen ? scatterwalk_next(&walk->out, dlen) : 0; 617 } 618 EXPORT_SYMBOL_GPL(acomp_walk_next_dst); 619 620 int acomp_walk_virt(struct acomp_walk *__restrict walk, 621 struct acomp_req *__restrict req) 622 { 623 struct scatterlist *src = req->src; 624 struct scatterlist *dst = req->dst; 625 626 walk->slen = req->slen; 627 walk->dlen = req->dlen; 628 629 if (!walk->slen || !walk->dlen) 630 return -EINVAL; 631 632 walk->flags = 0; 633 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) 634 walk->flags |= ACOMP_WALK_SLEEP; 635 if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT)) 636 walk->flags |= ACOMP_WALK_SRC_LINEAR; 637 else if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)) { 638 src = &req->chain.ssg; 639 sg_init_table(src, 1); 640 sg_set_folio(src, req->sfolio, walk->slen, req->soff); 641 } 642 if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT)) 643 walk->flags |= ACOMP_WALK_DST_LINEAR; 644 else if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO)) { 645 dst = &req->chain.dsg; 646 sg_init_table(dst, 1); 647 sg_set_folio(dst, req->dfolio, walk->dlen, req->doff); 648 } 649 650 if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { 651 walk->in.sg = (void *)req->svirt; 652 walk->in.offset = 0; 653 } else 654 scatterwalk_start(&walk->in, src); 655 if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { 656 walk->out.sg = (void *)req->dvirt; 657 walk->out.offset = 0; 658 } else 659 scatterwalk_start(&walk->out, dst); 660 661 return 0; 662 } 663 EXPORT_SYMBOL_GPL(acomp_walk_virt); 664 665 MODULE_LICENSE("GPL"); 666 MODULE_DESCRIPTION("Asynchronous compression type"); 667