| /linux/lib/zstd/compress/ |
| H A D | zstd_double_fast.c | 30 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillDoubleHashTableForCDict() local 37 for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { in ZSTD_fillDoubleHashTableForCDict() 68 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillDoubleHashTableForCCtx() local 75 for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { in ZSTD_fillDoubleHashTableForCCtx() 122 const BYTE* const iend = istart + srcSize; in ZSTD_compressBlock_doubleFast_noDict_generic() local 123 const BYTE* const ilimit = iend - HASH_READ_SIZE; in ZSTD_compressBlock_doubleFast_noDict_generic() 192 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; in ZSTD_compressBlock_doubleFast_noDict_generic() 194 … ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); in ZSTD_compressBlock_doubleFast_noDict_generic() 208 mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; in ZSTD_compressBlock_doubleFast_noDict_generic() 252 return (size_t)(iend - anchor); in ZSTD_compressBlock_doubleFast_noDict_generic() [all …]
|
| H A D | zstd_fast.c | 27 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillHashTableForCDict() local 37 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { in ZSTD_fillHashTableForCDict() 64 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillHashTableForCCtx() local 74 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { in ZSTD_fillHashTableForCCtx() 205 const BYTE* const iend = istart + srcSize; in ZSTD_compressBlock_fast_noDict_generic() local 206 const BYTE* const ilimit = iend - HASH_READ_SIZE; in ZSTD_compressBlock_fast_noDict_generic() 374 return (size_t)(iend - anchor); in ZSTD_compressBlock_fast_noDict_generic() 395 mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); in ZSTD_compressBlock_fast_noDict_generic() 397 ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); in ZSTD_compressBlock_fast_noDict_generic() 412 size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; in ZSTD_compressBlock_fast_noDict_generic() [all …]
|
| H A D | zstd_ldm.c | 255 const BYTE* const iend = (const BYTE*)end; in ZSTD_ldm_fillFastTables() local 260 ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); in ZSTD_ldm_fillFastTables() 265 ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); in ZSTD_ldm_fillFastTables() 288 const BYTE* iend, ldmParams_t const* params) in ZSTD_ldm_fillHashTable() argument 302 while (ip < iend) { in ZSTD_ldm_fillHashTable() 307 hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits); in ZSTD_ldm_fillHashTable() 362 BYTE const* const iend = istart + srcSize; in ZSTD_ldm_generateSequences_internal() local 363 BYTE const* const ilimit = iend - HASH_READ_SIZE; in ZSTD_ldm_generateSequences_internal() 375 return iend - anchor; in ZSTD_ldm_generateSequences_internal() 436 cur->offset < dictLimit ? dictEnd : iend; in ZSTD_ldm_generateSequences_internal() [all …]
|
| H A D | zstd_lazy.c | 31 const BYTE* ip, const BYTE* iend, in ZSTD_updateDUBT() argument 49 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ in ZSTD_updateDUBT() 50 (void)iend; in ZSTD_updateDUBT() 89 const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit; in ZSTD_insertDUBT1() local 105 assert(ip < iend); /* condition for ZSTD_count */ in ZSTD_insertDUBT1() 124 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); in ZSTD_insertDUBT1() 127 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_insertDUBT1() 135 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ in ZSTD_insertDUBT1() 167 const BYTE* const ip, const BYTE* const iend, in ZSTD_DUBT_findBetterDictMatch() argument 204 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_DUBT_findBetterDictMatch() [all …]
|
| H A D | zstd_opt.c | 445 const BYTE* const ip, const BYTE* const iend, in ZSTD_insertBt1() argument 486 assert(ip <= iend-8); /* required for h calculation */ in ZSTD_insertBt1() 519 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); in ZSTD_insertBt1() 522 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_insertBt1() 533 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ in ZSTD_insertBt1() 565 const BYTE* const ip, const BYTE* const iend, in ZSTD_updateTree_internal() argument 575 … U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); in ZSTD_updateTree_internal() 580 assert((size_t)(iend - base) <= (size_t)(U32)(-1)); in ZSTD_updateTree_internal() 584 void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) { in ZSTD_updateTree() argument 585 ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); in ZSTD_updateTree() [all …]
|
| H A D | hist.c | 84 const BYTE* const iend = ip+sourceSize; in HIST_count_parallel_wksp() local 103 while (ip < iend-15) { in HIST_count_parallel_wksp() 129 while (ip<iend) Counting1[*ip++]++; in HIST_count_parallel_wksp()
|
| H A D | zstd_compress_superblock.c | 499 BYTE const* const iend = ip + srcSize; in ZSTD_compressSubBlock_multi() local 566 assert(ip + decompressedSize <= iend); in ZSTD_compressSubBlock_multi() 610 assert(ip + decompressedSize <= iend); in ZSTD_compressSubBlock_multi() 641 if (ip < iend) { in ZSTD_compressSubBlock_multi() 643 size_t const rSize = (size_t)((iend - ip)); in ZSTD_compressSubBlock_multi()
|
| H A D | zstd_ldm.h | 26 const BYTE* iend, ldmParams_t const* params);
|
| H A D | zstd_compress.c | 4444 void const* iend) in ZSTD_overflowCorrectIfNeeded() argument 4448 … (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { in ZSTD_overflowCorrectIfNeeded() 4825 const BYTE* const iend = ip + srcSize; in ZSTD_loadDictionaryContent() local 4853 ip = iend - maxDictSize; in ZSTD_loadDictionaryContent() 4871 ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); in ZSTD_loadDictionaryContent() 4872 ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); in ZSTD_loadDictionaryContent() 4879 ip = iend - maxDictSize; in ZSTD_loadDictionaryContent() 4886 ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); in ZSTD_loadDictionaryContent() 4891 ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); in ZSTD_loadDictionaryContent() 4896 ZSTD_fillHashTable(ms, iend, dtlm, tfp); in ZSTD_loadDictionaryContent() [all …]
|
| H A D | zstd_opt.h | 21 void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend);
|
| H A D | fse_compress.c | 557 const BYTE* const iend = istart + srcSize; in FSE_compress_usingCTable_generic() local 558 const BYTE* ip=iend; in FSE_compress_usingCTable_generic()
|
| H A D | zstd_compress_internal.h | 698 ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) in ZSTD_safecopyLiterals() argument 700 assert(iend > ilimit_w); in ZSTD_safecopyLiterals() 706 while (ip < iend) *op++ = *ip++; in ZSTD_safecopyLiterals()
|
| H A D | huf_compress.c | 1172 const BYTE* const iend = ip + srcSize; in HUF_compress4X_usingCTable_internal() local 1206 assert(ip <= iend); in HUF_compress4X_usingCTable_internal() 1207 …, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags)… in HUF_compress4X_usingCTable_internal()
|
| /linux/lib/zstd/common/ |
| H A D | entropy_common.c | 47 const BYTE* const iend = istart + hbSize; in FSE_readNCount_body() local 92 if (LIKELY(ip <= iend-7)) { in FSE_readNCount_body() 95 bitCount -= (int)(8 * (iend - 7 - ip)); in FSE_readNCount_body() 97 ip = iend - 4; in FSE_readNCount_body() 121 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { in FSE_readNCount_body() 126 bitCount -= (int)(8 * (iend - 4 - ip)); in FSE_readNCount_body() 128 ip = iend - 4; in FSE_readNCount_body() 170 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { in FSE_readNCount_body() 174 bitCount -= (int)(8 * (iend - 4 - ip)); in FSE_readNCount_body() 176 ip = iend - 4; in FSE_readNCount_body()
|
| /linux/fs/netfs/ |
| H A D | misc.c | 242 unsigned int iend = offset + length; in netfs_invalidate_folio() local 246 if (iend <= fstart) in netfs_invalidate_folio() 254 if (iend >= fend) in netfs_invalidate_folio() 257 finfo->dirty_len = fend - iend; in netfs_invalidate_folio() 265 if (iend >= fend) { in netfs_invalidate_folio()
|
| /linux/lib/zstd/decompress/ |
| H A D | huf_decompress.c | 177 BYTE const* iend[4]; member 225 args->iend[0] = istart + 6; /* jumpTable */ in HUF_DecompressFastArgs_init() 226 args->iend[1] = args->iend[0] + length1; in HUF_DecompressFastArgs_init() 227 args->iend[2] = args->iend[1] + length2; in HUF_DecompressFastArgs_init() 228 args->iend[3] = args->iend[2] + length3; in HUF_DecompressFastArgs_init() 238 args->ip[0] = args->iend[1] - sizeof(U64); in HUF_DecompressFastArgs_init() 239 args->ip[1] = args->iend[2] - sizeof(U64); in HUF_DecompressFastArgs_init() 240 args->ip[2] = args->iend[3] - sizeof(U64); in HUF_DecompressFastArgs_init() 288 if (args->ip[stream] < args->iend[stream] - 8) in HUF_initRemainingDStream() 867 assert(ilowest + 6 == args.iend[0]); in HUF_decompress4X1_usingDTable_internal_fast() [all …]
|
| H A D | zstd_decompress_block.c | 700 const BYTE* const iend = istart + srcSize; in ZSTD_decodeSeqHeaders() local 712 RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); in ZSTD_decodeSeqHeaders() 716 RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); in ZSTD_decodeSeqHeaders() 724 RETURN_ERROR_IF(ip != iend, corruption_detected, in ZSTD_decodeSeqHeaders() 730 …RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encod… in ZSTD_decodeSeqHeaders() 740 ip, iend-ip, in ZSTD_decodeSeqHeaders() 752 ip, iend-ip, in ZSTD_decodeSeqHeaders() 764 ip, iend-ip, in ZSTD_decodeSeqHeaders() 1410 const BYTE* const iend = ip + seqSize; in ZSTD_decompressSequences_bodySplitLitBuffer() local 1427 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), in ZSTD_decompressSequences_bodySplitLitBuffer() [all …]
|
| H A D | zstd_decompress.c | 2013 const char* const iend = input->size != 0 ? src + input->size : src; in ZSTD_decompressStream() local 2048 DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); in ZSTD_decompressStream() 2058 size_t const remainingInput = (size_t)(iend-ip); in ZSTD_decompressStream() 2059 assert(iend >= ip); in ZSTD_decompressStream() 2082 …size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->for… in ZSTD_decompressStream() 2083 if (cSize <= (size_t)(iend-istart)) { in ZSTD_decompressStream() 2168 … size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); in ZSTD_decompressStream() 2175 if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ in ZSTD_decompressStream() 2182 if (ip==iend) { someMoreWork = 0; break; } /* no more input */ in ZSTD_decompressStream() 2192 … assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); in ZSTD_decompressStream() [all …]
|
| /linux/drivers/scsi/qla2xxx/ |
| H A D | qla_sup.c | 3111 uint32_t istart, iend, iter, vend; in qla2x00_get_fcode_version() local 3120 iend = istart + 0x100; in qla2x00_get_fcode_version() 3125 while ((iter < iend) && !do_next) { in qla2x00_get_fcode_version()
|