| /linux/fs/btrfs/ |
| H A D | zlib.c | 26 /* workspace buffer size for s390 zlib hardware support */ 29 struct workspace { struct 40 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_get_workspace() local 42 workspace->level = level; in zlib_get_workspace() 49 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_free_workspace() local 51 kvfree(workspace in zlib_free_workspace() 75 struct workspace *workspace; zlib_alloc_workspace() local 116 copy_data_into_buffer(struct address_space * mapping,struct workspace * workspace,u64 filepos,unsigned long length) copy_data_into_buffer() argument 153 struct workspace *workspace = list_entry(ws, struct workspace, list); zlib_compress_folios() local 340 struct workspace *workspace = list_entry(ws, struct workspace, list); zlib_decompress_bio() local 438 struct workspace *workspace = list_entry(ws, struct workspace, list); zlib_decompress() local [all...] |
| H A D | zstd.c | 45 struct workspace { struct 65 * workspace. argument 67 * Getting a workspace is done by using the bitmap to identify the levels that 69 * workspaces because of the monotonic memory guarantee. A workspace's 71 * level. Putting a workspace involves adding it back to the appropriate places 90 static inline struct workspace *list_to_workspace(struct list_head *list) in list_to_workspace() 92 return container_of(list, struct workspace, list); in list_to_workspace() 105 * This scans the lru_list and attempts to reclaim any workspace that hasn't 125 struct workspace *victim = container_of(pos, struct workspace, in zstd_reclaim_timer_fn() 214 struct workspace *workspace; zstd_free_workspace_manager() local 250 struct workspace *workspace; zstd_find_workspace() local 333 struct workspace *workspace = list_to_workspace(ws); zstd_put_workspace() local 364 struct workspace *workspace = list_entry(ws, struct workspace, list); zstd_free_workspace() local 374 struct workspace *workspace; zstd_alloc_workspace() local 404 struct workspace *workspace = list_entry(ws, struct workspace, list); zstd_compress_folios() local 591 struct workspace *workspace = list_entry(ws, struct workspace, list); zstd_decompress_bio() local 680 struct workspace *workspace = list_entry(ws, struct workspace, list); zstd_decompress() local [all...] |
| H A D | lzo.c | 61 struct workspace { struct 79 struct workspace *workspace = list_entry(ws, struct workspace, list); in lzo_free_workspace() local 81 kvfree(workspace->buf); in lzo_free_workspace() 82 kvfree(workspace->cbuf); in lzo_free_workspace() 83 kvfree(workspace->mem); in lzo_free_workspace() 84 kfree(workspace); in lzo_free_workspace() 89 struct workspace *workspace; in lzo_alloc_workspace() local 222 struct workspace *workspace = list_entry(ws, struct workspace, list); lzo_compress_folios() local 341 struct workspace *workspace = list_entry(ws, struct workspace, list); lzo_decompress_bio() local 453 struct workspace *workspace = list_entry(ws, struct workspace, list); lzo_decompress() local [all...] |
| H A D | compression.c | 651 struct heuristic_ws *workspace; 653 workspace = list_entry(ws, struct heuristic_ws, list); 655 kvfree(workspace->sample); 656 kfree(workspace->bucket); 657 kfree(workspace->bucket_b); 658 kfree(workspace); 734 struct list_head *workspace; 748 * Preallocate one workspace for each compression type so we can in alloc_workspace() 751 workspace = alloc_workspace(fs_info, type, 0); in alloc_workspace() 752 if (IS_ERR(workspace)) { in alloc_workspace() 695 struct heuristic_ws *workspace; free_heuristic_ws() local 778 struct list_head *workspace; alloc_workspace_manager() local 837 struct list_head *workspace; btrfs_get_workspace() local 1055 struct list_head *workspace; btrfs_compress_folios() local 1071 struct list_head *workspace; btrfs_decompress_bio() local 1094 struct list_head *workspace; btrfs_decompress() local [all...] |
| /linux/lib/zstd/compress/ |
| H A D | zstd_preSplit.c | 157 void* workspace, size_t wkspSize) in ZSTD_splitBlock_byChunks() argument 164 FPStats* const fpstats = (FPStats*)workspace; in ZSTD_splitBlock_byChunks() 169 assert(workspace != NULL); in ZSTD_splitBlock_byChunks() 170 assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); in ZSTD_splitBlock_byChunks() 200 void* workspace, size_t wkspSize) in ZSTD_splitBlock_fromBorders() argument 203 FPStats* const fpstats = (FPStats*)workspace; in ZSTD_splitBlock_fromBorders() 204 Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned)); in ZSTD_splitBlock_fromBorders() 206 assert(workspace != NULL); in ZSTD_splitBlock_fromBorders() 207 assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); in ZSTD_splitBlock_fromBorders() 231 void* workspace, size_t wkspSize) in ZSTD_splitBlock() argument [all …]
|
| H A D | zstd_cwksp.h | 157 void* workspace; member 181 assert(ws->workspace <= ws->objectEnd); in ZSTD_cwksp_assert_internal_consistency() 188 assert(ws->workspace <= ws->initOnceStart); in ZSTD_cwksp_assert_internal_consistency() 338 return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); in ZSTD_cwksp_owns_buffer() 558 return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); in ZSTD_cwksp_sizeof() 562 return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) in ZSTD_cwksp_used() 574 ws->workspace = start; in ZSTD_cwksp_init() 576 ws->objectEnd = ws->workspace; in ZSTD_cwksp_init() 587 void* workspace = ZSTD_customMalloc(size, customMem); in ZSTD_cwksp_create() local 589 RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); in ZSTD_cwksp_create() [all …]
|
| H A D | zstd_compress_superblock.c | 311 void* workspace, size_t wkspSize, in ZSTD_estimateSubBlockSize_literal() argument 314 unsigned* const countWksp = (unsigned*)workspace; in ZSTD_estimateSubBlockSize_literal() 321 … HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); in ZSTD_estimateSubBlockSize_literal() 336 void* workspace, size_t wkspSize) in ZSTD_estimateSubBlockSize_symbolType() argument 338 unsigned* const countWksp = (unsigned*)workspace; in ZSTD_estimateSubBlockSize_symbolType() 345 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ in ZSTD_estimateSubBlockSize_symbolType() 372 void* workspace, size_t wkspSize, in ZSTD_estimateSubBlockSize_sequences() argument 381 workspace, wkspSize); in ZSTD_estimateSubBlockSize_sequences() 385 workspace, wkspSize); in ZSTD_estimateSubBlockSize_sequences() 389 workspace, wkspSize); in ZSTD_estimateSubBlockSize_sequences() [all …]
|
| H A D | zstd_compress.c | 83 ZSTD_cwksp workspace; member 124 ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) in ZSTD_initStaticCCtx() argument 129 if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ in ZSTD_initStaticCCtx() 130 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); in ZSTD_initStaticCCtx() 136 ZSTD_cwksp_move(&cctx->workspace, &ws); in ZSTD_initStaticCCtx() 140 …if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedB… in ZSTD_initStaticCCtx() 141 …CBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_com… in ZSTD_initStaticCCtx() 142 …CBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_com… in ZSTD_initStaticCCtx() 143 cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); in ZSTD_initStaticCCtx() 173 ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); in ZSTD_freeCCtxContent() [all …]
|
| H A D | zstd_preSplit.h | 32 void* workspace, size_t wkspSize);
|
| /linux/lib/zstd/ |
| H A D | zstd_decompress_module.c | 74 zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size) in zstd_init_dctx() argument 76 if (workspace == NULL) in zstd_init_dctx() 78 return ZSTD_initStaticDCtx(workspace, workspace_size); in zstd_init_dctx() 104 zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace, in zstd_init_dstream() argument 107 if (workspace == NULL) in zstd_init_dstream() 110 return ZSTD_initStaticDStream(workspace, workspace_size); in zstd_init_dstream()
|
| H A D | zstd_compress_module.c | 154 zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size) in zstd_init_cctx() argument 156 if (workspace == NULL) in zstd_init_cctx() 158 return ZSTD_initStaticCCtx(workspace, workspace_size); in zstd_init_cctx() 213 unsigned long long pledged_src_size, void *workspace, size_t workspace_size) in zstd_init_cstream() argument 217 if (workspace == NULL) in zstd_init_cstream() 220 cstream = ZSTD_initStaticCStream(workspace, workspace_size); in zstd_init_cstream()
|
| /linux/fs/jffs2/ |
| H A D | compr_zlib.c | 45 def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, in alloc_workspaces() 47 if (!def_strm.workspace) in alloc_workspaces() 52 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); in alloc_workspaces() 53 if (!inf_strm.workspace) { in alloc_workspaces() 54 vfree(def_strm.workspace); in alloc_workspaces() 64 vfree(def_strm.workspace); in free_workspaces() 65 vfree(inf_strm.workspace); in free_workspaces()
|
| /linux/fs/pstore/ |
| H A D | platform.c | 181 .workspace = compress_workspace, in pstore_compress() 588 char *unzipped, *workspace; in decompress_record() local 601 if (!zstream->workspace) { in decompress_record() 614 workspace = kvzalloc(max_uncompressed_size + record->ecc_notice_size, in decompress_record() 616 if (!workspace) in decompress_record() 621 zstream->next_out = workspace; in decompress_record() 627 kvfree(workspace); in decompress_record() 634 memcpy(workspace + unzipped_len, record->buf + record->size, in decompress_record() 638 unzipped = kvmemdup(workspace, unzipped_len + record->ecc_notice_size, in decompress_record() 640 kvfree(workspace); in decompress_record() [all …]
|
| /linux/lib/zlib_inflate/ |
| H A D | infutil.c | 20 strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); in zlib_inflate_blob() 21 if (strm->workspace == NULL) in zlib_inflate_blob() 44 kfree(strm->workspace); in zlib_inflate_blob()
|
| /linux/fs/cramfs/ |
| H A D | uncompress.c | 62 stream.workspace = vmalloc(zlib_inflate_workspacesize()); in cramfs_uncompress_init() 63 if (!stream.workspace) { in cramfs_uncompress_init() 78 vfree(stream.workspace); in cramfs_uncompress_exit()
|
| /linux/fs/squashfs/ |
| H A D | zstd_wrapper.c | 23 struct workspace { struct 31 struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); in zstd_init() argument 53 struct workspace *wksp = strm; in zstd_free() 65 struct workspace *wksp = strm; in zstd_uncompress()
|
| H A D | zlib_wrapper.c | 29 stream->workspace = vmalloc(zlib_inflate_workspacesize()); in zlib_init() 30 if (stream->workspace == NULL) in zlib_init() 47 vfree(stream->workspace); in zlib_free()
|
| /linux/include/linux/ |
| H A D | zstd.h | 226 zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size); 330 zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size); 477 unsigned long long pledged_src_size, void *workspace, size_t workspace_size); 567 zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
|
| /linux/crypto/ |
| H A D | deflate.c | 31 u8 workspace[]; member 43 ctx = kvmalloc(struct_size(ctx, workspace, size), GFP_KERNEL); in deflate_alloc_stream() 47 ctx->stream.workspace = ctx->workspace; in deflate_alloc_stream()
|
| /linux/lib/ |
| H A D | decompress_inflate.c | 82 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : in __gunzip() 89 if (strm->workspace == NULL) { in __gunzip() 186 free(strm->workspace); in __gunzip()
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | cudbg_zlib.c | 45 compress_stream.workspace = pdbg_init->workspace; in cudbg_compress_buff()
|
| /linux/drivers/net/ppp/ |
| H A D | ppp_deflate.c | 66 vfree(state->strm.workspace); in z_comp_free() 107 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8)); in z_comp_alloc() 108 if (state->strm.workspace == NULL) in z_comp_alloc() 282 vfree(state->strm.workspace); in z_decomp_free() 322 state->strm.workspace = vmalloc(zlib_inflate_workspacesize()); in z_decomp_alloc() 323 if (state->strm.workspace == NULL) in z_decomp_alloc()
|
| /linux/lib/zstd/decompress/ |
| H A D | zstd_decompress_internal.h | 87 U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; member 134 …U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */ member
|
| /linux/lib/crypto/ |
| H A D | sha1.c | 52 #define W(x) (workspace[(x)&15]) 77 u32 workspace[SHA1_WORKSPACE_WORDS]) 118 u32 workspace[SHA1_WORKSPACE_WORDS]; in sha1_transform() 121 sha1_block_generic(state, data, workspace); in sha1_transform() 125 memzero_explicit(workspace, sizeof(workspace)); in sha1_transform() 147 u32 workspace[SHA1_WORKSPACE_WORDS]; sha1_blocks_generic() local
|
| /linux/kernel/module/ |
| H A D | decompress.c | 103 s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); in module_gzip_decompress() 104 if (!s.workspace) in module_gzip_decompress() 141 kvfree(s.workspace); in module_gzip_decompress()
|