1 /* 2 * Copyright (c) Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 /*-************************************* 12 * Dependencies 13 ***************************************/ 14 #include "zstd_compress_superblock.h" 15 16 #include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ 17 #include "hist.h" /* HIST_countFast_wksp */ 18 #include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */ 19 #include "zstd_compress_sequences.h" 20 #include "zstd_compress_literals.h" 21 22 /** ZSTD_compressSubBlock_literal() : 23 * Compresses literals section for a sub-block. 24 * When we have to write the Huffman table we will sometimes choose a header 25 * size larger than necessary. This is because we have to pick the header size 26 * before we know the table size + compressed size, so we have a bound on the 27 * table size. If we guessed incorrectly, we fall back to uncompressed literals. 28 * 29 * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded 30 * in writing the header, otherwise it is set to 0. 31 * 32 * hufMetadata->hType has literals block type info. 33 * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. 34 * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. 35 * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block 36 * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block 37 * and the following sub-blocks' literals sections will be Treeless_Literals_Block. 38 * @return : compressed size of literals section of a sub-block 39 * Or 0 if it unable to compress. 40 * Or error code */ 41 static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, 42 const ZSTD_hufCTablesMetadata_t* hufMetadata, 43 const BYTE* literals, size_t litSize, 44 void* dst, size_t dstSize, 45 const int bmi2, int writeEntropy, int* entropyWritten) 46 { 47 size_t const header = writeEntropy ? 200 : 0; 48 size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); 49 BYTE* const ostart = (BYTE*)dst; 50 BYTE* const oend = ostart + dstSize; 51 BYTE* op = ostart + lhSize; 52 U32 const singleStream = lhSize == 3; 53 symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; 54 size_t cLitSize = 0; 55 56 (void)bmi2; /* TODO bmi2... */ 57 58 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); 59 60 *entropyWritten = 0; 61 if (litSize == 0 || hufMetadata->hType == set_basic) { 62 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); 63 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 64 } else if (hufMetadata->hType == set_rle) { 65 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); 66 return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); 67 } 68 69 assert(litSize > 0); 70 assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); 71 72 if (writeEntropy && hufMetadata->hType == set_compressed) { 73 ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); 74 op += hufMetadata->hufDesSize; 75 cLitSize += hufMetadata->hufDesSize; 76 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); 77 } 78 79 /* TODO bmi2 */ 80 { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) 81 : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); 82 op += cSize; 83 cLitSize += cSize; 84 if (cSize == 0 || ERR_isError(cSize)) { 85 DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); 86 return 0; 87 } 88 /* If we expand and we aren't writing a header then emit uncompressed */ 89 if (!writeEntropy && cLitSize >= litSize) { 90 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); 91 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 92 } 93 /* If we are writing headers then allow expansion that doesn't change our header size. */ 94 if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { 95 assert(cLitSize > litSize); 96 DEBUGLOG(5, "Literals expanded beyond allowed header size"); 97 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 98 } 99 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); 100 } 101 102 /* Build header */ 103 switch(lhSize) 104 { 105 case 3: /* 2 - 2 - 10 - 10 */ 106 { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); 107 MEM_writeLE24(ostart, lhc); 108 break; 109 } 110 case 4: /* 2 - 2 - 14 - 14 */ 111 { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); 112 MEM_writeLE32(ostart, lhc); 113 break; 114 } 115 case 5: /* 2 - 2 - 18 - 18 */ 116 { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); 117 MEM_writeLE32(ostart, lhc); 118 ostart[4] = (BYTE)(cLitSize >> 10); 119 break; 120 } 121 default: /* not possible : lhSize is {3,4,5} */ 122 assert(0); 123 } 124 *entropyWritten = 1; 125 DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); 126 return op-ostart; 127 } 128 129 static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { 130 const seqDef* const sstart = sequences; 131 const seqDef* const send = sequences + nbSeq; 132 const seqDef* sp = sstart; 133 size_t matchLengthSum = 0; 134 size_t litLengthSum = 0; 135 (void)(litLengthSum); /* suppress unused variable warning on some environments */ 136 while (send-sp > 0) { 137 ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); 138 litLengthSum += seqLen.litLength; 139 matchLengthSum += seqLen.matchLength; 140 sp++; 141 } 142 assert(litLengthSum <= litSize); 143 if (!lastSequence) { 144 assert(litLengthSum == litSize); 145 } 146 return matchLengthSum + litSize; 147 } 148 149 /** ZSTD_compressSubBlock_sequences() : 150 * Compresses sequences section for a sub-block. 151 * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have 152 * symbol compression modes for the super-block. 153 * The first successfully compressed block will have these in its header. 154 * We set entropyWritten=1 when we succeed in compressing the sequences. 155 * The following sub-blocks will always have repeat mode. 156 * @return : compressed size of sequences section of a sub-block 157 * Or 0 if it is unable to compress 158 * Or error code. */ 159 static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, 160 const ZSTD_fseCTablesMetadata_t* fseMetadata, 161 const seqDef* sequences, size_t nbSeq, 162 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 163 const ZSTD_CCtx_params* cctxParams, 164 void* dst, size_t dstCapacity, 165 const int bmi2, int writeEntropy, int* entropyWritten) 166 { 167 const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; 168 BYTE* const ostart = (BYTE*)dst; 169 BYTE* const oend = ostart + dstCapacity; 170 BYTE* op = ostart; 171 BYTE* seqHead; 172 173 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); 174 175 *entropyWritten = 0; 176 /* Sequences Header */ 177 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, 178 dstSize_tooSmall, ""); 179 if (nbSeq < 0x7F) 180 *op++ = (BYTE)nbSeq; 181 else if (nbSeq < LONGNBSEQ) 182 op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; 183 else 184 op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; 185 if (nbSeq==0) { 186 return op - ostart; 187 } 188 189 /* seqHead : flags for FSE encoding type */ 190 seqHead = op++; 191 192 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); 193 194 if (writeEntropy) { 195 const U32 LLtype = fseMetadata->llType; 196 const U32 Offtype = fseMetadata->ofType; 197 const U32 MLtype = fseMetadata->mlType; 198 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); 199 *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); 200 ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); 201 op += fseMetadata->fseTablesSize; 202 } else { 203 const U32 repeat = set_repeat; 204 *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); 205 } 206 207 { size_t const bitstreamSize = ZSTD_encodeSequences( 208 op, oend - op, 209 fseTables->matchlengthCTable, mlCode, 210 fseTables->offcodeCTable, ofCode, 211 fseTables->litlengthCTable, llCode, 212 sequences, nbSeq, 213 longOffsets, bmi2); 214 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); 215 op += bitstreamSize; 216 /* zstd versions <= 1.3.4 mistakenly report corruption when 217 * FSE_readNCount() receives a buffer < 4 bytes. 218 * Fixed by https://github.com/facebook/zstd/pull/1146. 219 * This can happen when the last set_compressed table present is 2 220 * bytes and the bitstream is only one byte. 221 * In this exceedingly rare case, we will simply emit an uncompressed 222 * block, since it isn't worth optimizing. 223 */ 224 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 225 if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { 226 /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ 227 assert(fseMetadata->lastCountSize + bitstreamSize == 3); 228 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " 229 "emitting an uncompressed block."); 230 return 0; 231 } 232 #endif 233 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); 234 } 235 236 /* zstd versions <= 1.4.0 mistakenly report error when 237 * sequences section body size is less than 3 bytes. 238 * Fixed by https://github.com/facebook/zstd/pull/1664. 239 * This can happen when the previous sequences section block is compressed 240 * with rle mode and the current block's sequences section is compressed 241 * with repeat mode where sequences section body size can be 1 byte. 242 */ 243 #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 244 if (op-seqHead < 4) { 245 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " 246 "an uncompressed block when sequences are < 4 bytes"); 247 return 0; 248 } 249 #endif 250 251 *entropyWritten = 1; 252 return op - ostart; 253 } 254 255 /** ZSTD_compressSubBlock() : 256 * Compresses a single sub-block. 257 * @return : compressed size of the sub-block 258 * Or 0 if it failed to compress. */ 259 static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, 260 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 261 const seqDef* sequences, size_t nbSeq, 262 const BYTE* literals, size_t litSize, 263 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 264 const ZSTD_CCtx_params* cctxParams, 265 void* dst, size_t dstCapacity, 266 const int bmi2, 267 int writeLitEntropy, int writeSeqEntropy, 268 int* litEntropyWritten, int* seqEntropyWritten, 269 U32 lastBlock) 270 { 271 BYTE* const ostart = (BYTE*)dst; 272 BYTE* const oend = ostart + dstCapacity; 273 BYTE* op = ostart + ZSTD_blockHeaderSize; 274 DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", 275 litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); 276 { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, 277 &entropyMetadata->hufMetadata, literals, litSize, 278 op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); 279 FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); 280 if (cLitSize == 0) return 0; 281 op += cLitSize; 282 } 283 { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, 284 &entropyMetadata->fseMetadata, 285 sequences, nbSeq, 286 llCode, mlCode, ofCode, 287 cctxParams, 288 op, oend-op, 289 bmi2, writeSeqEntropy, seqEntropyWritten); 290 FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); 291 if (cSeqSize == 0) return 0; 292 op += cSeqSize; 293 } 294 /* Write block header */ 295 { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; 296 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); 297 MEM_writeLE24(ostart, cBlockHeader24); 298 } 299 return op-ostart; 300 } 301 302 static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, 303 const ZSTD_hufCTables_t* huf, 304 const ZSTD_hufCTablesMetadata_t* hufMetadata, 305 void* workspace, size_t wkspSize, 306 int writeEntropy) 307 { 308 unsigned* const countWksp = (unsigned*)workspace; 309 unsigned maxSymbolValue = 255; 310 size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 311 312 if (hufMetadata->hType == set_basic) return litSize; 313 else if (hufMetadata->hType == set_rle) return 1; 314 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { 315 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); 316 if (ZSTD_isError(largest)) return litSize; 317 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); 318 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; 319 return cLitSizeEstimate + literalSectionHeaderSize; 320 } } 321 assert(0); /* impossible */ 322 return 0; 323 } 324 325 static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, 326 const BYTE* codeTable, unsigned maxCode, 327 size_t nbSeq, const FSE_CTable* fseCTable, 328 const U8* additionalBits, 329 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, 330 void* workspace, size_t wkspSize) 331 { 332 unsigned* const countWksp = (unsigned*)workspace; 333 const BYTE* ctp = codeTable; 334 const BYTE* const ctStart = ctp; 335 const BYTE* const ctEnd = ctStart + nbSeq; 336 size_t cSymbolTypeSizeEstimateInBits = 0; 337 unsigned max = maxCode; 338 339 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ 340 if (type == set_basic) { 341 /* We selected this encoding type, so it must be valid. */ 342 assert(max <= defaultMax); 343 cSymbolTypeSizeEstimateInBits = max <= defaultMax 344 ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) 345 : ERROR(GENERIC); 346 } else if (type == set_rle) { 347 cSymbolTypeSizeEstimateInBits = 0; 348 } else if (type == set_compressed || type == set_repeat) { 349 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); 350 } 351 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; 352 while (ctp < ctEnd) { 353 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; 354 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ 355 ctp++; 356 } 357 return cSymbolTypeSizeEstimateInBits / 8; 358 } 359 360 static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, 361 const BYTE* llCodeTable, 362 const BYTE* mlCodeTable, 363 size_t nbSeq, 364 const ZSTD_fseCTables_t* fseTables, 365 const ZSTD_fseCTablesMetadata_t* fseMetadata, 366 void* workspace, size_t wkspSize, 367 int writeEntropy) 368 { 369 size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 370 size_t cSeqSizeEstimate = 0; 371 if (nbSeq == 0) return sequencesSectionHeaderSize; 372 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, 373 nbSeq, fseTables->offcodeCTable, NULL, 374 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 375 workspace, wkspSize); 376 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, 377 nbSeq, fseTables->litlengthCTable, LL_bits, 378 LL_defaultNorm, LL_defaultNormLog, MaxLL, 379 workspace, wkspSize); 380 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, 381 nbSeq, fseTables->matchlengthCTable, ML_bits, 382 ML_defaultNorm, ML_defaultNormLog, MaxML, 383 workspace, wkspSize); 384 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; 385 return cSeqSizeEstimate + sequencesSectionHeaderSize; 386 } 387 388 static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, 389 const BYTE* ofCodeTable, 390 const BYTE* llCodeTable, 391 const BYTE* mlCodeTable, 392 size_t nbSeq, 393 const ZSTD_entropyCTables_t* entropy, 394 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 395 void* workspace, size_t wkspSize, 396 int writeLitEntropy, int writeSeqEntropy) { 397 size_t cSizeEstimate = 0; 398 cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, 399 &entropy->huf, &entropyMetadata->hufMetadata, 400 workspace, wkspSize, writeLitEntropy); 401 cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, 402 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, 403 workspace, wkspSize, writeSeqEntropy); 404 return cSizeEstimate + ZSTD_blockHeaderSize; 405 } 406 407 static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) 408 { 409 if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) 410 return 1; 411 if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) 412 return 1; 413 if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) 414 return 1; 415 return 0; 416 } 417 418 /** ZSTD_compressSubBlock_multi() : 419 * Breaks super-block into multiple sub-blocks and compresses them. 420 * Entropy will be written to the first block. 421 * The following blocks will use repeat mode to compress. 422 * All sub-blocks are compressed blocks (no raw or rle blocks). 423 * @return : compressed size of the super block (which is multiple ZSTD blocks) 424 * Or 0 if it failed to compress. */ 425 static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, 426 const ZSTD_compressedBlockState_t* prevCBlock, 427 ZSTD_compressedBlockState_t* nextCBlock, 428 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 429 const ZSTD_CCtx_params* cctxParams, 430 void* dst, size_t dstCapacity, 431 const void* src, size_t srcSize, 432 const int bmi2, U32 lastBlock, 433 void* workspace, size_t wkspSize) 434 { 435 const seqDef* const sstart = seqStorePtr->sequencesStart; 436 const seqDef* const send = seqStorePtr->sequences; 437 const seqDef* sp = sstart; 438 const BYTE* const lstart = seqStorePtr->litStart; 439 const BYTE* const lend = seqStorePtr->lit; 440 const BYTE* lp = lstart; 441 BYTE const* ip = (BYTE const*)src; 442 BYTE const* const iend = ip + srcSize; 443 BYTE* const ostart = (BYTE*)dst; 444 BYTE* const oend = ostart + dstCapacity; 445 BYTE* op = ostart; 446 const BYTE* llCodePtr = seqStorePtr->llCode; 447 const BYTE* mlCodePtr = seqStorePtr->mlCode; 448 const BYTE* ofCodePtr = seqStorePtr->ofCode; 449 size_t targetCBlockSize = cctxParams->targetCBlockSize; 450 size_t litSize, seqCount; 451 int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; 452 int writeSeqEntropy = 1; 453 int lastSequence = 0; 454 455 DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", 456 (unsigned)(lend-lp), (unsigned)(send-sstart)); 457 458 litSize = 0; 459 seqCount = 0; 460 do { 461 size_t cBlockSizeEstimate = 0; 462 if (sstart == send) { 463 lastSequence = 1; 464 } else { 465 const seqDef* const sequence = sp + seqCount; 466 lastSequence = sequence == send - 1; 467 litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; 468 seqCount++; 469 } 470 if (lastSequence) { 471 assert(lp <= lend); 472 assert(litSize <= (size_t)(lend - lp)); 473 litSize = (size_t)(lend - lp); 474 } 475 /* I think there is an optimization opportunity here. 476 * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful 477 * since it recalculates estimate from scratch. 478 * For example, it would recount literal distribution and symbol codes every time. 479 */ 480 cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, 481 &nextCBlock->entropy, entropyMetadata, 482 workspace, wkspSize, writeLitEntropy, writeSeqEntropy); 483 if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { 484 int litEntropyWritten = 0; 485 int seqEntropyWritten = 0; 486 const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); 487 const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, 488 sp, seqCount, 489 lp, litSize, 490 llCodePtr, mlCodePtr, ofCodePtr, 491 cctxParams, 492 op, oend-op, 493 bmi2, writeLitEntropy, writeSeqEntropy, 494 &litEntropyWritten, &seqEntropyWritten, 495 lastBlock && lastSequence); 496 FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); 497 if (cSize > 0 && cSize < decompressedSize) { 498 DEBUGLOG(5, "Committed the sub-block"); 499 assert(ip + decompressedSize <= iend); 500 ip += decompressedSize; 501 sp += seqCount; 502 lp += litSize; 503 op += cSize; 504 llCodePtr += seqCount; 505 mlCodePtr += seqCount; 506 ofCodePtr += seqCount; 507 litSize = 0; 508 seqCount = 0; 509 /* Entropy only needs to be written once */ 510 if (litEntropyWritten) { 511 writeLitEntropy = 0; 512 } 513 if (seqEntropyWritten) { 514 writeSeqEntropy = 0; 515 } 516 } 517 } 518 } while (!lastSequence); 519 if (writeLitEntropy) { 520 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); 521 ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); 522 } 523 if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { 524 /* If we haven't written our entropy tables, then we've violated our contract and 525 * must emit an uncompressed block. 526 */ 527 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); 528 return 0; 529 } 530 if (ip < iend) { 531 size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); 532 DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); 533 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); 534 assert(cSize != 0); 535 op += cSize; 536 /* We have to regenerate the repcodes because we've skipped some sequences */ 537 if (sp < send) { 538 seqDef const* seq; 539 repcodes_t rep; 540 ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); 541 for (seq = sstart; seq < sp; ++seq) { 542 ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); 543 } 544 ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); 545 } 546 } 547 DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); 548 return op-ostart; 549 } 550 551 size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, 552 void* dst, size_t dstCapacity, 553 void const* src, size_t srcSize, 554 unsigned lastBlock) { 555 ZSTD_entropyCTablesMetadata_t entropyMetadata; 556 557 FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, 558 &zc->blockState.prevCBlock->entropy, 559 &zc->blockState.nextCBlock->entropy, 560 &zc->appliedParams, 561 &entropyMetadata, 562 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); 563 564 return ZSTD_compressSubBlock_multi(&zc->seqStore, 565 zc->blockState.prevCBlock, 566 zc->blockState.nextCBlock, 567 &entropyMetadata, 568 &zc->appliedParams, 569 dst, dstCapacity, 570 src, srcSize, 571 zc->bmi2, lastBlock, 572 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); 573 } 574