1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Asynchronous Compression operations 4 * 5 * Copyright (c) 2016, Intel Corporation 6 * Authors: Weigang Li <weigang.li@intel.com> 7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 #ifndef _CRYPTO_ACOMP_H 10 #define _CRYPTO_ACOMP_H 11 12 #include <linux/atomic.h> 13 #include <linux/args.h> 14 #include <linux/compiler_types.h> 15 #include <linux/container_of.h> 16 #include <linux/crypto.h> 17 #include <linux/err.h> 18 #include <linux/scatterlist.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock_types.h> 21 #include <linux/types.h> 22 23 /* Set this bit if source is virtual address instead of SG list. */ 24 #define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 25 26 /* Set this bit for if virtual address source cannot be used for DMA. */ 27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 28 29 /* Set this bit if destination is virtual address instead of SG list. */ 30 #define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 31 32 /* Set this bit for if virtual address destination cannot be used for DMA. */ 33 #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 34 35 /* Set this bit if source is a folio. */ 36 #define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020 37 38 /* Set this bit if destination is a folio. */ 39 #define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040 40 41 #define CRYPTO_ACOMP_DST_MAX 131072 42 43 #define MAX_SYNC_COMP_REQSIZE 0 44 45 #define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ 46 char __##name##_req[sizeof(struct acomp_req) + \ 47 MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ 48 struct acomp_req *name = acomp_request_on_stack_init( \ 49 __##name##_req, (tfm), (gfp), false) 50 51 struct acomp_req; 52 struct folio; 53 54 struct acomp_req_chain { 55 struct list_head head; 56 struct acomp_req *req0; 57 struct acomp_req *cur; 58 int (*op)(struct acomp_req *req); 59 crypto_completion_t compl; 60 void *data; 61 struct scatterlist ssg; 62 struct scatterlist dsg; 63 union { 64 const u8 *src; 65 struct folio *sfolio; 66 }; 67 union { 68 u8 *dst; 69 struct folio *dfolio; 70 }; 71 size_t soff; 72 size_t doff; 73 u32 flags; 74 }; 75 76 /** 77 * struct acomp_req - asynchronous (de)compression request 78 * 79 * @base: Common attributes for asynchronous crypto requests 80 * @src: Source scatterlist 81 * @dst: Destination scatterlist 82 * @svirt: Source virtual address 83 * @dvirt: Destination virtual address 84 * @sfolio: Source folio 85 * @soff: Source folio offset 86 * @dfolio: Destination folio 87 * @doff: Destination folio offset 88 * @slen: Size of the input buffer 89 * @dlen: Size of the output buffer and number of bytes produced 90 * @chain: Private API code data, do not use 91 * @__ctx: Start of private context data 92 */ 93 struct acomp_req { 94 struct crypto_async_request base; 95 union { 96 struct scatterlist *src; 97 const u8 *svirt; 98 struct folio *sfolio; 99 }; 100 union { 101 struct scatterlist *dst; 102 u8 *dvirt; 103 struct folio *dfolio; 104 }; 105 size_t soff; 106 size_t doff; 107 unsigned int slen; 108 unsigned int dlen; 109 110 struct acomp_req_chain chain; 111 112 void *__ctx[] CRYPTO_MINALIGN_ATTR; 113 }; 114 115 /** 116 * struct crypto_acomp - user-instantiated objects which encapsulate 117 * algorithms and core processing logic 118 * 119 * @compress: Function performs a compress operation 120 * @decompress: Function performs a de-compress operation 121 * @reqsize: Context size for (de)compression requests 122 * @fb: Synchronous fallback tfm 123 * @base: Common crypto API algorithm data structure 124 */ 125 struct crypto_acomp { 126 int (*compress)(struct acomp_req *req); 127 int (*decompress)(struct acomp_req *req); 128 unsigned int reqsize; 129 struct crypto_acomp *fb; 130 struct crypto_tfm base; 131 }; 132 133 #define COMP_ALG_COMMON { \ 134 struct crypto_alg base; \ 135 } 136 struct comp_alg_common COMP_ALG_COMMON; 137 138 /** 139 * DOC: Asynchronous Compression API 140 * 141 * The Asynchronous Compression API is used with the algorithms of type 142 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) 143 */ 144 145 /** 146 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle 147 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 148 * compression algorithm e.g. "deflate" 149 * @type: specifies the type of the algorithm 150 * @mask: specifies the mask for the algorithm 151 * 152 * Allocate a handle for a compression algorithm. The returned struct 153 * crypto_acomp is the handle that is required for any subsequent 154 * API invocation for the compression operations. 155 * 156 * Return: allocated handle in case of success; IS_ERR() is true in case 157 * of an error, PTR_ERR() returns the error code. 158 */ 159 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, 160 u32 mask); 161 /** 162 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node 163 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 164 * compression algorithm e.g. "deflate" 165 * @type: specifies the type of the algorithm 166 * @mask: specifies the mask for the algorithm 167 * @node: specifies the NUMA node the ZIP hardware belongs to 168 * 169 * Allocate a handle for a compression algorithm. Drivers should try to use 170 * (de)compressors on the specified NUMA node. 171 * The returned struct crypto_acomp is the handle that is required for any 172 * subsequent API invocation for the compression operations. 173 * 174 * Return: allocated handle in case of success; IS_ERR() is true in case 175 * of an error, PTR_ERR() returns the error code. 176 */ 177 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, 178 u32 mask, int node); 179 180 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) 181 { 182 return &tfm->base; 183 } 184 185 static inline struct comp_alg_common *__crypto_comp_alg_common( 186 struct crypto_alg *alg) 187 { 188 return container_of(alg, struct comp_alg_common, base); 189 } 190 191 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) 192 { 193 return container_of(tfm, struct crypto_acomp, base); 194 } 195 196 static inline struct comp_alg_common *crypto_comp_alg_common( 197 struct crypto_acomp *tfm) 198 { 199 return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); 200 } 201 202 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) 203 { 204 return tfm->reqsize; 205 } 206 207 static inline void acomp_request_set_tfm(struct acomp_req *req, 208 struct crypto_acomp *tfm) 209 { 210 req->base.tfm = crypto_acomp_tfm(tfm); 211 } 212 213 static inline bool acomp_is_async(struct crypto_acomp *tfm) 214 { 215 return crypto_comp_alg_common(tfm)->base.cra_flags & 216 CRYPTO_ALG_ASYNC; 217 } 218 219 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) 220 { 221 return __crypto_acomp_tfm(req->base.tfm); 222 } 223 224 /** 225 * crypto_free_acomp() -- free ACOMPRESS tfm handle 226 * 227 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() 228 * 229 * If @tfm is a NULL or error pointer, this function does nothing. 230 */ 231 static inline void crypto_free_acomp(struct crypto_acomp *tfm) 232 { 233 crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); 234 } 235 236 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) 237 { 238 type &= ~CRYPTO_ALG_TYPE_MASK; 239 type |= CRYPTO_ALG_TYPE_ACOMPRESS; 240 mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; 241 242 return crypto_has_alg(alg_name, type, mask); 243 } 244 245 static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) 246 { 247 return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); 248 } 249 250 static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) 251 { 252 return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); 253 } 254 255 /** 256 * acomp_request_alloc() -- allocates asynchronous (de)compression request 257 * 258 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() 259 * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) 260 * 261 * Return: allocated handle in case of success or NULL in case of an error 262 */ 263 static inline struct acomp_req *acomp_request_alloc_extra_noprof( 264 struct crypto_acomp *tfm, size_t extra, gfp_t gfp) 265 { 266 struct acomp_req *req; 267 size_t len; 268 269 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); 270 if (check_add_overflow(len, extra, &len)) 271 return NULL; 272 273 req = kzalloc_noprof(len, gfp); 274 if (likely(req)) 275 acomp_request_set_tfm(req, tfm); 276 return req; 277 } 278 #define acomp_request_alloc_noprof(tfm, ...) \ 279 CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ 280 tfm, ##__VA_ARGS__) 281 #define acomp_request_alloc_noprof_0(tfm) \ 282 acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) 283 #define acomp_request_alloc_noprof_1(tfm, gfp) \ 284 acomp_request_alloc_extra_noprof(tfm, 0, gfp) 285 #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) 286 287 /** 288 * acomp_request_alloc_extra() -- allocate acomp request with extra memory 289 * 290 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() 291 * @extra: amount of extra memory 292 * @gfp: gfp to pass to kzalloc 293 * 294 * Return: allocated handle in case of success or NULL in case of an error 295 */ 296 #define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) 297 298 static inline void *acomp_request_extra(struct acomp_req *req) 299 { 300 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 301 size_t len; 302 303 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); 304 return (void *)((char *)req + len); 305 } 306 307 /** 308 * acomp_request_free() -- zeroize and free asynchronous (de)compression 309 * request as well as the output buffer if allocated 310 * inside the algorithm 311 * 312 * @req: request to free 313 */ 314 static inline void acomp_request_free(struct acomp_req *req) 315 { 316 if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) 317 return; 318 kfree_sensitive(req); 319 } 320 321 /** 322 * acomp_request_set_callback() -- Sets an asynchronous callback 323 * 324 * Callback will be called when an asynchronous operation on a given 325 * request is finished. 326 * 327 * @req: request that the callback will be set for 328 * @flgs: specify for instance if the operation may backlog 329 * @cmlp: callback which will be called 330 * @data: private data used by the caller 331 */ 332 static inline void acomp_request_set_callback(struct acomp_req *req, 333 u32 flgs, 334 crypto_completion_t cmpl, 335 void *data) 336 { 337 u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | 338 CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | 339 CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO | 340 CRYPTO_TFM_REQ_ON_STACK; 341 342 req->base.complete = cmpl; 343 req->base.data = data; 344 req->base.flags &= keep; 345 req->base.flags |= flgs & ~keep; 346 347 crypto_reqchain_init(&req->base); 348 } 349 350 /** 351 * acomp_request_set_params() -- Sets request parameters 352 * 353 * Sets parameters required by an acomp operation 354 * 355 * @req: asynchronous compress request 356 * @src: pointer to input buffer scatterlist 357 * @dst: pointer to output buffer scatterlist. If this is NULL, the 358 * acomp layer will allocate the output memory 359 * @slen: size of the input buffer 360 * @dlen: size of the output buffer. If dst is NULL, this can be used by 361 * the user to specify the maximum amount of memory to allocate 362 */ 363 static inline void acomp_request_set_params(struct acomp_req *req, 364 struct scatterlist *src, 365 struct scatterlist *dst, 366 unsigned int slen, 367 unsigned int dlen) 368 { 369 req->src = src; 370 req->dst = dst; 371 req->slen = slen; 372 req->dlen = dlen; 373 374 req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | 375 CRYPTO_ACOMP_REQ_SRC_NONDMA | 376 CRYPTO_ACOMP_REQ_SRC_FOLIO | 377 CRYPTO_ACOMP_REQ_DST_FOLIO | 378 CRYPTO_ACOMP_REQ_DST_VIRT | 379 CRYPTO_ACOMP_REQ_DST_NONDMA); 380 } 381 382 /** 383 * acomp_request_set_src_sg() -- Sets source scatterlist 384 * 385 * Sets source scatterlist required by an acomp operation. 386 * 387 * @req: asynchronous compress request 388 * @src: pointer to input buffer scatterlist 389 * @slen: size of the input buffer 390 */ 391 static inline void acomp_request_set_src_sg(struct acomp_req *req, 392 struct scatterlist *src, 393 unsigned int slen) 394 { 395 req->src = src; 396 req->slen = slen; 397 398 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; 399 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; 400 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; 401 } 402 403 /** 404 * acomp_request_set_src_dma() -- Sets DMA source virtual address 405 * 406 * Sets source virtual address required by an acomp operation. 407 * The address must be usable for DMA. 408 * 409 * @req: asynchronous compress request 410 * @src: virtual address pointer to input buffer 411 * @slen: size of the input buffer 412 */ 413 static inline void acomp_request_set_src_dma(struct acomp_req *req, 414 const u8 *src, unsigned int slen) 415 { 416 req->svirt = src; 417 req->slen = slen; 418 419 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; 420 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; 421 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; 422 } 423 424 /** 425 * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address 426 * 427 * Sets source virtual address required by an acomp operation. 428 * The address can not be used for DMA. 429 * 430 * @req: asynchronous compress request 431 * @src: virtual address pointer to input buffer 432 * @slen: size of the input buffer 433 */ 434 static inline void acomp_request_set_src_nondma(struct acomp_req *req, 435 const u8 *src, 436 unsigned int slen) 437 { 438 req->svirt = src; 439 req->slen = slen; 440 441 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; 442 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; 443 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; 444 } 445 446 /** 447 * acomp_request_set_src_folio() -- Sets source folio 448 * 449 * Sets source folio required by an acomp operation. 450 * 451 * @req: asynchronous compress request 452 * @folio: pointer to input folio 453 * @off: input folio offset 454 * @len: size of the input buffer 455 */ 456 static inline void acomp_request_set_src_folio(struct acomp_req *req, 457 struct folio *folio, size_t off, 458 unsigned int len) 459 { 460 req->sfolio = folio; 461 req->soff = off; 462 req->slen = len; 463 464 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; 465 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; 466 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO; 467 } 468 469 /** 470 * acomp_request_set_dst_sg() -- Sets destination scatterlist 471 * 472 * Sets destination scatterlist required by an acomp operation. 473 * 474 * @req: asynchronous compress request 475 * @dst: pointer to output buffer scatterlist 476 * @dlen: size of the output buffer 477 */ 478 static inline void acomp_request_set_dst_sg(struct acomp_req *req, 479 struct scatterlist *dst, 480 unsigned int dlen) 481 { 482 req->dst = dst; 483 req->dlen = dlen; 484 485 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; 486 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; 487 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; 488 } 489 490 /** 491 * acomp_request_set_dst_dma() -- Sets DMA destination virtual address 492 * 493 * Sets destination virtual address required by an acomp operation. 494 * The address must be usable for DMA. 495 * 496 * @req: asynchronous compress request 497 * @dst: virtual address pointer to output buffer 498 * @dlen: size of the output buffer 499 */ 500 static inline void acomp_request_set_dst_dma(struct acomp_req *req, 501 u8 *dst, unsigned int dlen) 502 { 503 req->dvirt = dst; 504 req->dlen = dlen; 505 506 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; 507 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; 508 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; 509 } 510 511 /** 512 * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address 513 * 514 * Sets destination virtual address required by an acomp operation. 515 * The address can not be used for DMA. 516 * 517 * @req: asynchronous compress request 518 * @dst: virtual address pointer to output buffer 519 * @dlen: size of the output buffer 520 */ 521 static inline void acomp_request_set_dst_nondma(struct acomp_req *req, 522 u8 *dst, unsigned int dlen) 523 { 524 req->dvirt = dst; 525 req->dlen = dlen; 526 527 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; 528 req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; 529 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; 530 } 531 532 /** 533 * acomp_request_set_dst_folio() -- Sets destination folio 534 * 535 * Sets destination folio required by an acomp operation. 536 * 537 * @req: asynchronous compress request 538 * @folio: pointer to input folio 539 * @off: input folio offset 540 * @len: size of the input buffer 541 */ 542 static inline void acomp_request_set_dst_folio(struct acomp_req *req, 543 struct folio *folio, size_t off, 544 unsigned int len) 545 { 546 req->dfolio = folio; 547 req->doff = off; 548 req->dlen = len; 549 550 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; 551 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; 552 req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO; 553 } 554 555 static inline void acomp_request_chain(struct acomp_req *req, 556 struct acomp_req *head) 557 { 558 crypto_request_chain(&req->base, &head->base); 559 } 560 561 /** 562 * crypto_acomp_compress() -- Invoke asynchronous compress operation 563 * 564 * Function invokes the asynchronous compress operation 565 * 566 * @req: asynchronous compress request 567 * 568 * Return: zero on success; error code in case of error 569 */ 570 int crypto_acomp_compress(struct acomp_req *req); 571 572 /** 573 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation 574 * 575 * Function invokes the asynchronous decompress operation 576 * 577 * @req: asynchronous compress request 578 * 579 * Return: zero on success; error code in case of error 580 */ 581 int crypto_acomp_decompress(struct acomp_req *req); 582 583 static inline struct acomp_req *acomp_request_on_stack_init( 584 char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) 585 { 586 struct acomp_req *req; 587 588 if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) 589 return req; 590 591 req = (void *)buf; 592 acomp_request_set_tfm(req, tfm->fb); 593 req->base.flags = CRYPTO_TFM_REQ_ON_STACK; 594 595 return req; 596 } 597 598 #endif 599