1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Asynchronous Compression operations 4 * 5 * Copyright (c) 2016, Intel Corporation 6 * Authors: Weigang Li <weigang.li@intel.com> 7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 10 #include <crypto/internal/acompress.h> 11 #include <linux/cryptouser.h> 12 #include <linux/errno.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/page-flags.h> 16 #include <linux/seq_file.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <net/netlink.h> 20 21 #include "compress.h" 22 23 struct crypto_scomp; 24 25 static const struct crypto_type crypto_acomp_type; 26 27 static void acomp_reqchain_done(void *data, int err); 28 29 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) 30 { 31 return container_of(alg, struct acomp_alg, calg.base); 32 } 33 34 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) 35 { 36 return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); 37 } 38 39 static int __maybe_unused crypto_acomp_report( 40 struct sk_buff *skb, struct crypto_alg *alg) 41 { 42 struct crypto_report_acomp racomp; 43 44 memset(&racomp, 0, sizeof(racomp)); 45 46 strscpy(racomp.type, "acomp", sizeof(racomp.type)); 47 48 return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp); 49 } 50 51 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 52 __maybe_unused; 53 54 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 55 { 56 seq_puts(m, "type : acomp\n"); 57 } 58 59 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) 60 { 61 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 62 struct acomp_alg *alg = crypto_acomp_alg(acomp); 63 64 if (alg->exit) 65 alg->exit(acomp); 66 67 if (acomp_is_async(acomp)) 68 crypto_free_acomp(acomp->fb); 69 } 70 71 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) 72 { 73 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 74 struct acomp_alg *alg = crypto_acomp_alg(acomp); 75 struct crypto_acomp *fb = NULL; 76 int err; 77 78 acomp->fb = acomp; 79 80 if (tfm->__crt_alg->cra_type != &crypto_acomp_type) 81 return crypto_init_scomp_ops_async(tfm); 82 83 if (acomp_is_async(acomp)) { 84 fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, 85 CRYPTO_ALG_ASYNC); 86 if (IS_ERR(fb)) 87 return PTR_ERR(fb); 88 89 err = -EINVAL; 90 if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) 91 goto out_free_fb; 92 93 acomp->fb = fb; 94 } 95 96 acomp->compress = alg->compress; 97 acomp->decompress = alg->decompress; 98 acomp->reqsize = alg->reqsize; 99 100 acomp->base.exit = crypto_acomp_exit_tfm; 101 102 if (!alg->init) 103 return 0; 104 105 err = alg->init(acomp); 106 if (err) 107 goto out_free_fb; 108 109 return 0; 110 111 out_free_fb: 112 crypto_free_acomp(fb); 113 return err; 114 } 115 116 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) 117 { 118 int extsize = crypto_alg_extsize(alg); 119 120 if (alg->cra_type != &crypto_acomp_type) 121 extsize += sizeof(struct crypto_scomp *); 122 123 return extsize; 124 } 125 126 static const struct crypto_type crypto_acomp_type = { 127 .extsize = crypto_acomp_extsize, 128 .init_tfm = crypto_acomp_init_tfm, 129 #ifdef CONFIG_PROC_FS 130 .show = crypto_acomp_show, 131 #endif 132 #if IS_ENABLED(CONFIG_CRYPTO_USER) 133 .report = crypto_acomp_report, 134 #endif 135 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 136 .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, 137 .type = CRYPTO_ALG_TYPE_ACOMPRESS, 138 .tfmsize = offsetof(struct crypto_acomp, base), 139 }; 140 141 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, 142 u32 mask) 143 { 144 return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); 145 } 146 EXPORT_SYMBOL_GPL(crypto_alloc_acomp); 147 148 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, 149 u32 mask, int node) 150 { 151 return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, 152 node); 153 } 154 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); 155 156 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) 157 { 158 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 159 struct acomp_req_chain *state = &req->chain; 160 161 if (!acomp_is_async(tfm)) 162 return; 163 164 state->compl = req->base.complete; 165 state->data = req->base.data; 166 req->base.complete = cplt; 167 req->base.data = state; 168 state->req0 = req; 169 } 170 171 static void acomp_restore_req(struct acomp_req_chain *state) 172 { 173 struct acomp_req *req = state->req0; 174 struct crypto_acomp *tfm; 175 176 tfm = crypto_acomp_reqtfm(req); 177 if (!acomp_is_async(tfm)) 178 return; 179 180 req->base.complete = state->compl; 181 req->base.data = state->data; 182 } 183 184 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err) 185 { 186 struct acomp_req *req = state->cur; 187 unsigned int slen = req->slen; 188 unsigned int dlen = req->dlen; 189 190 req->base.err = err; 191 state = &req->chain; 192 193 if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) 194 acomp_request_set_src_dma(req, state->src, slen); 195 else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO) 196 acomp_request_set_src_folio(req, state->sfolio, state->soff, slen); 197 if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) 198 acomp_request_set_dst_dma(req, state->dst, dlen); 199 else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO) 200 acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen); 201 } 202 203 static void acomp_virt_to_sg(struct acomp_req *req) 204 { 205 struct acomp_req_chain *state = &req->chain; 206 207 state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | 208 CRYPTO_ACOMP_REQ_DST_VIRT | 209 CRYPTO_ACOMP_REQ_SRC_FOLIO | 210 CRYPTO_ACOMP_REQ_DST_FOLIO); 211 212 if (acomp_request_src_isvirt(req)) { 213 unsigned int slen = req->slen; 214 const u8 *svirt = req->svirt; 215 216 state->src = svirt; 217 sg_init_one(&state->ssg, svirt, slen); 218 acomp_request_set_src_sg(req, &state->ssg, slen); 219 } else if (acomp_request_src_isfolio(req)) { 220 struct folio *folio = req->sfolio; 221 unsigned int slen = req->slen; 222 size_t off = req->soff; 223 224 state->sfolio = folio; 225 state->soff = off; 226 sg_init_table(&state->ssg, 1); 227 sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE), 228 slen, off % PAGE_SIZE); 229 acomp_request_set_src_sg(req, &state->ssg, slen); 230 } 231 232 if (acomp_request_dst_isvirt(req)) { 233 unsigned int dlen = req->dlen; 234 u8 *dvirt = req->dvirt; 235 236 state->dst = dvirt; 237 sg_init_one(&state->dsg, dvirt, dlen); 238 acomp_request_set_dst_sg(req, &state->dsg, dlen); 239 } else if (acomp_request_dst_isfolio(req)) { 240 struct folio *folio = req->dfolio; 241 unsigned int dlen = req->dlen; 242 size_t off = req->doff; 243 244 state->dfolio = folio; 245 state->doff = off; 246 sg_init_table(&state->dsg, 1); 247 sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE), 248 dlen, off % PAGE_SIZE); 249 acomp_request_set_src_sg(req, &state->dsg, dlen); 250 } 251 } 252 253 static int acomp_do_nondma(struct acomp_req_chain *state, 254 struct acomp_req *req) 255 { 256 u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | 257 CRYPTO_ACOMP_REQ_SRC_NONDMA | 258 CRYPTO_ACOMP_REQ_DST_VIRT | 259 CRYPTO_ACOMP_REQ_DST_NONDMA; 260 ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); 261 int err; 262 263 acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL); 264 fbreq->base.flags &= ~keep; 265 fbreq->base.flags |= req->base.flags & keep; 266 fbreq->src = req->src; 267 fbreq->dst = req->dst; 268 fbreq->slen = req->slen; 269 fbreq->dlen = req->dlen; 270 271 if (state->op == crypto_acomp_reqtfm(req)->compress) 272 err = crypto_acomp_compress(fbreq); 273 else 274 err = crypto_acomp_decompress(fbreq); 275 276 req->dlen = fbreq->dlen; 277 return err; 278 } 279 280 static int acomp_do_one_req(struct acomp_req_chain *state, 281 struct acomp_req *req) 282 { 283 state->cur = req; 284 285 if (acomp_request_isnondma(req)) 286 return acomp_do_nondma(state, req); 287 288 acomp_virt_to_sg(req); 289 return state->op(req); 290 } 291 292 static int acomp_reqchain_finish(struct acomp_req_chain *state, 293 int err, u32 mask) 294 { 295 struct acomp_req *req0 = state->req0; 296 struct acomp_req *req = state->cur; 297 struct acomp_req *n; 298 299 acomp_reqchain_virt(state, err); 300 301 if (req != req0) 302 list_add_tail(&req->base.list, &req0->base.list); 303 304 list_for_each_entry_safe(req, n, &state->head, base.list) { 305 list_del_init(&req->base.list); 306 307 req->base.flags &= mask; 308 req->base.complete = acomp_reqchain_done; 309 req->base.data = state; 310 311 err = acomp_do_one_req(state, req); 312 313 if (err == -EINPROGRESS) { 314 if (!list_empty(&state->head)) 315 err = -EBUSY; 316 goto out; 317 } 318 319 if (err == -EBUSY) 320 goto out; 321 322 acomp_reqchain_virt(state, err); 323 list_add_tail(&req->base.list, &req0->base.list); 324 } 325 326 acomp_restore_req(state); 327 328 out: 329 return err; 330 } 331 332 static void acomp_reqchain_done(void *data, int err) 333 { 334 struct acomp_req_chain *state = data; 335 crypto_completion_t compl = state->compl; 336 337 data = state->data; 338 339 if (err == -EINPROGRESS) { 340 if (!list_empty(&state->head)) 341 return; 342 goto notify; 343 } 344 345 err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG); 346 if (err == -EBUSY) 347 return; 348 349 notify: 350 compl(data, err); 351 } 352 353 static int acomp_do_req_chain(struct acomp_req *req, 354 int (*op)(struct acomp_req *req)) 355 { 356 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 357 struct acomp_req_chain *state = &req->chain; 358 int err; 359 360 if (crypto_acomp_req_chain(tfm) || 361 (!acomp_request_chained(req) && acomp_request_issg(req))) 362 return op(req); 363 364 if (acomp_is_async(tfm)) { 365 acomp_save_req(req, acomp_reqchain_done); 366 state = req->base.data; 367 } 368 369 state->op = op; 370 state->src = NULL; 371 INIT_LIST_HEAD(&state->head); 372 list_splice_init(&req->base.list, &state->head); 373 374 err = acomp_do_one_req(state, req); 375 if (err == -EBUSY || err == -EINPROGRESS) 376 return -EBUSY; 377 378 return acomp_reqchain_finish(state, err, ~0); 379 } 380 381 int crypto_acomp_compress(struct acomp_req *req) 382 { 383 return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress); 384 } 385 EXPORT_SYMBOL_GPL(crypto_acomp_compress); 386 387 int crypto_acomp_decompress(struct acomp_req *req) 388 { 389 return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress); 390 } 391 EXPORT_SYMBOL_GPL(crypto_acomp_decompress); 392 393 void comp_prepare_alg(struct comp_alg_common *alg) 394 { 395 struct crypto_alg *base = &alg->base; 396 397 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 398 } 399 400 int crypto_register_acomp(struct acomp_alg *alg) 401 { 402 struct crypto_alg *base = &alg->calg.base; 403 404 comp_prepare_alg(&alg->calg); 405 406 base->cra_type = &crypto_acomp_type; 407 base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; 408 409 return crypto_register_alg(base); 410 } 411 EXPORT_SYMBOL_GPL(crypto_register_acomp); 412 413 void crypto_unregister_acomp(struct acomp_alg *alg) 414 { 415 crypto_unregister_alg(&alg->base); 416 } 417 EXPORT_SYMBOL_GPL(crypto_unregister_acomp); 418 419 int crypto_register_acomps(struct acomp_alg *algs, int count) 420 { 421 int i, ret; 422 423 for (i = 0; i < count; i++) { 424 ret = crypto_register_acomp(&algs[i]); 425 if (ret) 426 goto err; 427 } 428 429 return 0; 430 431 err: 432 for (--i; i >= 0; --i) 433 crypto_unregister_acomp(&algs[i]); 434 435 return ret; 436 } 437 EXPORT_SYMBOL_GPL(crypto_register_acomps); 438 439 void crypto_unregister_acomps(struct acomp_alg *algs, int count) 440 { 441 int i; 442 443 for (i = count - 1; i >= 0; --i) 444 crypto_unregister_acomp(&algs[i]); 445 } 446 EXPORT_SYMBOL_GPL(crypto_unregister_acomps); 447 448 MODULE_LICENSE("GPL"); 449 MODULE_DESCRIPTION("Asynchronous compression type"); 450