1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Asynchronous Compression operations 4 * 5 * Copyright (c) 2016, Intel Corporation 6 * Authors: Weigang Li <weigang.li@intel.com> 7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com> 8 */ 9 #ifndef _CRYPTO_ACOMP_H 10 #define _CRYPTO_ACOMP_H 11 12 #include <linux/atomic.h> 13 #include <linux/compiler_types.h> 14 #include <linux/container_of.h> 15 #include <linux/crypto.h> 16 #include <linux/scatterlist.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock_types.h> 19 #include <linux/types.h> 20 21 #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 22 23 /* Set this bit if source is virtual address instead of SG list. */ 24 #define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 25 26 /* Set this bit for if virtual address source cannot be used for DMA. */ 27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 28 29 /* Set this bit if destination is virtual address instead of SG list. */ 30 #define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 31 32 /* Set this bit for if virtual address destination cannot be used for DMA. */ 33 #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 34 35 #define CRYPTO_ACOMP_DST_MAX 131072 36 37 struct acomp_req; 38 39 struct acomp_req_chain { 40 struct list_head head; 41 struct acomp_req *req0; 42 struct acomp_req *cur; 43 int (*op)(struct acomp_req *req); 44 crypto_completion_t compl; 45 void *data; 46 struct scatterlist ssg; 47 struct scatterlist dsg; 48 const u8 *src; 49 u8 *dst; 50 }; 51 52 /** 53 * struct acomp_req - asynchronous (de)compression request 54 * 55 * @base: Common attributes for asynchronous crypto requests 56 * @src: Source Data 57 * @dst: Destination data 58 * @slen: Size of the input buffer 59 * @dlen: Size of the output buffer and number of bytes produced 60 * @chain: Private API code data, do not use 61 * @__ctx: Start of private context data 62 */ 63 struct acomp_req { 64 struct crypto_async_request base; 65 union { 66 struct scatterlist *src; 67 const u8 *svirt; 68 }; 69 union { 70 struct scatterlist *dst; 71 u8 *dvirt; 72 }; 73 unsigned int slen; 74 unsigned int dlen; 75 76 struct acomp_req_chain chain; 77 78 void *__ctx[] CRYPTO_MINALIGN_ATTR; 79 }; 80 81 /** 82 * struct crypto_acomp - user-instantiated objects which encapsulate 83 * algorithms and core processing logic 84 * 85 * @compress: Function performs a compress operation 86 * @decompress: Function performs a de-compress operation 87 * @dst_free: Frees destination buffer if allocated inside the 88 * algorithm 89 * @reqsize: Context size for (de)compression requests 90 * @base: Common crypto API algorithm data structure 91 */ 92 struct crypto_acomp { 93 int (*compress)(struct acomp_req *req); 94 int (*decompress)(struct acomp_req *req); 95 void (*dst_free)(struct scatterlist *dst); 96 unsigned int reqsize; 97 struct crypto_tfm base; 98 }; 99 100 struct crypto_acomp_stream { 101 spinlock_t lock; 102 void *ctx; 103 }; 104 105 #define COMP_ALG_COMMON { \ 106 struct crypto_alg base; \ 107 struct crypto_acomp_stream __percpu *stream; \ 108 } 109 struct comp_alg_common COMP_ALG_COMMON; 110 111 /** 112 * DOC: Asynchronous Compression API 113 * 114 * The Asynchronous Compression API is used with the algorithms of type 115 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) 116 */ 117 118 /** 119 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle 120 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 121 * compression algorithm e.g. "deflate" 122 * @type: specifies the type of the algorithm 123 * @mask: specifies the mask for the algorithm 124 * 125 * Allocate a handle for a compression algorithm. The returned struct 126 * crypto_acomp is the handle that is required for any subsequent 127 * API invocation for the compression operations. 128 * 129 * Return: allocated handle in case of success; IS_ERR() is true in case 130 * of an error, PTR_ERR() returns the error code. 131 */ 132 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, 133 u32 mask); 134 /** 135 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node 136 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 137 * compression algorithm e.g. "deflate" 138 * @type: specifies the type of the algorithm 139 * @mask: specifies the mask for the algorithm 140 * @node: specifies the NUMA node the ZIP hardware belongs to 141 * 142 * Allocate a handle for a compression algorithm. Drivers should try to use 143 * (de)compressors on the specified NUMA node. 144 * The returned struct crypto_acomp is the handle that is required for any 145 * subsequent API invocation for the compression operations. 146 * 147 * Return: allocated handle in case of success; IS_ERR() is true in case 148 * of an error, PTR_ERR() returns the error code. 149 */ 150 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, 151 u32 mask, int node); 152 153 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) 154 { 155 return &tfm->base; 156 } 157 158 static inline struct comp_alg_common *__crypto_comp_alg_common( 159 struct crypto_alg *alg) 160 { 161 return container_of(alg, struct comp_alg_common, base); 162 } 163 164 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) 165 { 166 return container_of(tfm, struct crypto_acomp, base); 167 } 168 169 static inline struct comp_alg_common *crypto_comp_alg_common( 170 struct crypto_acomp *tfm) 171 { 172 return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); 173 } 174 175 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) 176 { 177 return tfm->reqsize; 178 } 179 180 static inline void acomp_request_set_tfm(struct acomp_req *req, 181 struct crypto_acomp *tfm) 182 { 183 req->base.tfm = crypto_acomp_tfm(tfm); 184 } 185 186 static inline bool acomp_is_async(struct crypto_acomp *tfm) 187 { 188 return crypto_comp_alg_common(tfm)->base.cra_flags & 189 CRYPTO_ALG_ASYNC; 190 } 191 192 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) 193 { 194 return __crypto_acomp_tfm(req->base.tfm); 195 } 196 197 /** 198 * crypto_free_acomp() -- free ACOMPRESS tfm handle 199 * 200 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() 201 * 202 * If @tfm is a NULL or error pointer, this function does nothing. 203 */ 204 static inline void crypto_free_acomp(struct crypto_acomp *tfm) 205 { 206 crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); 207 } 208 209 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) 210 { 211 type &= ~CRYPTO_ALG_TYPE_MASK; 212 type |= CRYPTO_ALG_TYPE_ACOMPRESS; 213 mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; 214 215 return crypto_has_alg(alg_name, type, mask); 216 } 217 218 /** 219 * acomp_request_alloc() -- allocates asynchronous (de)compression request 220 * 221 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() 222 * 223 * Return: allocated handle in case of success or NULL in case of an error 224 */ 225 static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp *tfm) 226 { 227 struct acomp_req *req; 228 229 req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); 230 if (likely(req)) 231 acomp_request_set_tfm(req, tfm); 232 return req; 233 } 234 #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) 235 236 /** 237 * acomp_request_free() -- zeroize and free asynchronous (de)compression 238 * request as well as the output buffer if allocated 239 * inside the algorithm 240 * 241 * @req: request to free 242 */ 243 static inline void acomp_request_free(struct acomp_req *req) 244 { 245 kfree_sensitive(req); 246 } 247 248 /** 249 * acomp_request_set_callback() -- Sets an asynchronous callback 250 * 251 * Callback will be called when an asynchronous operation on a given 252 * request is finished. 253 * 254 * @req: request that the callback will be set for 255 * @flgs: specify for instance if the operation may backlog 256 * @cmlp: callback which will be called 257 * @data: private data used by the caller 258 */ 259 static inline void acomp_request_set_callback(struct acomp_req *req, 260 u32 flgs, 261 crypto_completion_t cmpl, 262 void *data) 263 { 264 u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_SRC_VIRT | 265 CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | 266 CRYPTO_ACOMP_REQ_DST_NONDMA; 267 268 req->base.complete = cmpl; 269 req->base.data = data; 270 req->base.flags &= keep; 271 req->base.flags |= flgs & ~keep; 272 273 crypto_reqchain_init(&req->base); 274 } 275 276 /** 277 * acomp_request_set_params() -- Sets request parameters 278 * 279 * Sets parameters required by an acomp operation 280 * 281 * @req: asynchronous compress request 282 * @src: pointer to input buffer scatterlist 283 * @dst: pointer to output buffer scatterlist. If this is NULL, the 284 * acomp layer will allocate the output memory 285 * @slen: size of the input buffer 286 * @dlen: size of the output buffer. If dst is NULL, this can be used by 287 * the user to specify the maximum amount of memory to allocate 288 */ 289 static inline void acomp_request_set_params(struct acomp_req *req, 290 struct scatterlist *src, 291 struct scatterlist *dst, 292 unsigned int slen, 293 unsigned int dlen) 294 { 295 req->src = src; 296 req->dst = dst; 297 req->slen = slen; 298 req->dlen = dlen; 299 300 req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT | 301 CRYPTO_ACOMP_REQ_SRC_VIRT | 302 CRYPTO_ACOMP_REQ_SRC_NONDMA | 303 CRYPTO_ACOMP_REQ_DST_VIRT | 304 CRYPTO_ACOMP_REQ_DST_NONDMA); 305 if (!req->dst) 306 req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; 307 } 308 309 /** 310 * acomp_request_set_src_sg() -- Sets source scatterlist 311 * 312 * Sets source scatterlist required by an acomp operation. 313 * 314 * @req: asynchronous compress request 315 * @src: pointer to input buffer scatterlist 316 * @slen: size of the input buffer 317 */ 318 static inline void acomp_request_set_src_sg(struct acomp_req *req, 319 struct scatterlist *src, 320 unsigned int slen) 321 { 322 req->src = src; 323 req->slen = slen; 324 325 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; 326 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; 327 } 328 329 /** 330 * acomp_request_set_src_dma() -- Sets DMA source virtual address 331 * 332 * Sets source virtual address required by an acomp operation. 333 * The address must be usable for DMA. 334 * 335 * @req: asynchronous compress request 336 * @src: virtual address pointer to input buffer 337 * @slen: size of the input buffer 338 */ 339 static inline void acomp_request_set_src_dma(struct acomp_req *req, 340 const u8 *src, unsigned int slen) 341 { 342 req->svirt = src; 343 req->slen = slen; 344 345 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; 346 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; 347 } 348 349 /** 350 * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address 351 * 352 * Sets source virtual address required by an acomp operation. 353 * The address can not be used for DMA. 354 * 355 * @req: asynchronous compress request 356 * @src: virtual address pointer to input buffer 357 * @slen: size of the input buffer 358 */ 359 static inline void acomp_request_set_src_nondma(struct acomp_req *req, 360 const u8 *src, 361 unsigned int slen) 362 { 363 req->svirt = src; 364 req->slen = slen; 365 366 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; 367 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; 368 } 369 370 /** 371 * acomp_request_set_dst_sg() -- Sets destination scatterlist 372 * 373 * Sets destination scatterlist required by an acomp operation. 374 * 375 * @req: asynchronous compress request 376 * @dst: pointer to output buffer scatterlist 377 * @dlen: size of the output buffer 378 */ 379 static inline void acomp_request_set_dst_sg(struct acomp_req *req, 380 struct scatterlist *dst, 381 unsigned int dlen) 382 { 383 req->dst = dst; 384 req->dlen = dlen; 385 386 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; 387 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; 388 } 389 390 /** 391 * acomp_request_set_dst_dma() -- Sets DMA destination virtual address 392 * 393 * Sets destination virtual address required by an acomp operation. 394 * The address must be usable for DMA. 395 * 396 * @req: asynchronous compress request 397 * @dst: virtual address pointer to output buffer 398 * @dlen: size of the output buffer 399 */ 400 static inline void acomp_request_set_dst_dma(struct acomp_req *req, 401 u8 *dst, unsigned int dlen) 402 { 403 req->dvirt = dst; 404 req->dlen = dlen; 405 406 req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; 407 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; 408 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; 409 } 410 411 /** 412 * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address 413 * 414 * Sets destination virtual address required by an acomp operation. 415 * The address can not be used for DMA. 416 * 417 * @req: asynchronous compress request 418 * @dst: virtual address pointer to output buffer 419 * @dlen: size of the output buffer 420 */ 421 static inline void acomp_request_set_dst_nondma(struct acomp_req *req, 422 u8 *dst, unsigned int dlen) 423 { 424 req->dvirt = dst; 425 req->dlen = dlen; 426 427 req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; 428 req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; 429 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; 430 } 431 432 static inline void acomp_request_chain(struct acomp_req *req, 433 struct acomp_req *head) 434 { 435 crypto_request_chain(&req->base, &head->base); 436 } 437 438 /** 439 * crypto_acomp_compress() -- Invoke asynchronous compress operation 440 * 441 * Function invokes the asynchronous compress operation 442 * 443 * @req: asynchronous compress request 444 * 445 * Return: zero on success; error code in case of error 446 */ 447 int crypto_acomp_compress(struct acomp_req *req); 448 449 /** 450 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation 451 * 452 * Function invokes the asynchronous decompress operation 453 * 454 * @req: asynchronous compress request 455 * 456 * Return: zero on success; error code in case of error 457 */ 458 int crypto_acomp_decompress(struct acomp_req *req); 459 460 #endif 461