diff options
Diffstat (limited to 'lib/zstd/compress/zstd_compress.c')
| -rw-r--r-- | lib/zstd/compress/zstd_compress.c | 3293 | 
1 files changed, 2399 insertions, 894 deletions
| diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c index 16bb995bc6c4..c41a747413e0 100644 --- a/lib/zstd/compress/zstd_compress.c +++ b/lib/zstd/compress/zstd_compress.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause  /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates.   * All rights reserved.   *   * This source code is licensed under both the BSD-style license (found in the @@ -11,12 +12,13 @@  /*-*************************************  *  Dependencies  ***************************************/ +#include "../common/allocations.h"  /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */  #include "../common/zstd_deps.h"  /* INT_MAX, ZSTD_memset, ZSTD_memcpy */  #include "../common/mem.h" +#include "../common/error_private.h"  #include "hist.h"           /* HIST_countFast_wksp */  #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */  #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY  #include "../common/huf.h"  #include "zstd_compress_internal.h"  #include "zstd_compress_sequences.h" @@ -27,6 +29,7 @@  #include "zstd_opt.h"  #include "zstd_ldm.h"  #include "zstd_compress_superblock.h" +#include  "../common/bits.h"      /* ZSTD_highbit32, ZSTD_rotateRight_U64 */  /* ***************************************************************  *  Tuning parameters @@ -44,7 +47,7 @@   * in log format, aka 17 => 1 << 17 == 128Ki positions.   * This structure is only used in zstd_opt.   * Since allocation is centralized for all strategies, it has to be known here. - * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, + * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3,   * so that zstd_opt.c doesn't need to know about this constant.   */  #ifndef ZSTD_HASHLOG3_MAX @@ -55,14 +58,17 @@  *  Helper functions  ***************************************/  /* ZSTD_compressBound() - * Note that the result from this function is only compatible with the "normal" - * full-block strategy. - * When there are a lot of small blocks due to frequent flush in streaming mode - * the overhead of headers can make the compressed data to be larger than the - * return value of ZSTD_compressBound(). + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound().   */  size_t ZSTD_compressBound(size_t srcSize) { -    return ZSTD_COMPRESSBOUND(srcSize); +    size_t const r = ZSTD_COMPRESSBOUND(srcSize); +    if (r==0) return ERROR(srcSize_wrong); +    return r;  } @@ -75,12 +81,12 @@ struct ZSTD_CDict_s {      ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */      U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */      ZSTD_cwksp workspace; -    ZSTD_matchState_t matchState; +    ZSTD_MatchState_t matchState;      ZSTD_compressedBlockState_t cBlockState;      ZSTD_customMem customMem;      U32 dictID;      int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ -    ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use +    ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use                                             * row-based matchfinder. Unless the cdict is reloaded, we will use                                             * the same greedy/lazy matchfinder at compression time.                                             */ @@ -130,11 +136,12 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)      ZSTD_cwksp_move(&cctx->workspace, &ws);      cctx->staticSize = workspaceSize; -    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ -    if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; +    /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */ +    if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;      cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));      cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); -    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); +    cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); +    cctx->tmpWkspSize = TMP_WORKSPACE_SIZE;      cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());      return cctx;  } @@ -168,15 +175,13 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)  size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)  { +    DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx);      if (cctx==NULL) return 0;   /* support free on NULL */      RETURN_ERROR_IF(cctx->staticSize, memory_allocation,                      "not compatible with static CCtx"); -    { -        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); +    {   int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);          ZSTD_freeCCtxContent(cctx); -        if (!cctxInWorkspace) { -            ZSTD_customFree(cctx, cctx->customMem); -        } +        if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem);      }      return 0;  } @@ -205,7 +210,7 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)  }  /* private API call, for dictBuilder only */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } +const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }  /* Returns true if the strategy supports using a row based matchfinder */  static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { @@ -215,32 +220,27 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {  /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder   * for this compression.   */ -static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { +static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) {      assert(mode != ZSTD_ps_auto);      return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);  }  /* Returns row matchfinder usage given an initial mode and cParams */ -static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode,                                                           const ZSTD_compressionParameters* const cParams) { -#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) -    int const kHasSIMD128 = 1; -#else -    int const kHasSIMD128 = 0; -#endif +    /* The Linux Kernel does not use SIMD, and 128KB is a very common size, e.g. in BtrFS. +     * The row match finder is slower for this size without SIMD, so disable it. +     */ +    const unsigned kWindowLogLowerBound = 17;      if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */      mode = ZSTD_ps_disable;      if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; -    if (kHasSIMD128) { -        if (cParams->windowLog > 14) mode = ZSTD_ps_enable; -    } else { -        if (cParams->windowLog > 17) mode = ZSTD_ps_enable; -    } +    if (cParams->windowLog > kWindowLogLowerBound) mode = ZSTD_ps_enable;      return mode;  }  /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ -static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode,                                                          const ZSTD_compressionParameters* const cParams) {      if (mode != ZSTD_ps_auto) return mode;      return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; @@ -248,7 +248,7 @@ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,  /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */  static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, -                                   const ZSTD_paramSwitch_e useRowMatchFinder, +                                   const ZSTD_ParamSwitch_e useRowMatchFinder,                                     const U32 forDDSDict) {      assert(useRowMatchFinder != ZSTD_ps_auto);      /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. @@ -257,16 +257,44 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,      return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));  } -/* Returns 1 if compression parameters are such that we should +/* Returns ZSTD_ps_enable if compression parameters are such that we should   * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns 0 otherwise. + * Returns ZSTD_ps_disable otherwise.   */ -static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode,                                   const ZSTD_compressionParameters* const cParams) {      if (mode != ZSTD_ps_auto) return mode;      return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;  } +static int ZSTD_resolveExternalSequenceValidation(int mode) { +    return mode; +} + +/* Resolves maxBlockSize to the default if no value is present. */ +static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { +    if (maxBlockSize == 0) { +        return ZSTD_BLOCKSIZE_MAX; +    } else { +        return maxBlockSize; +    } +} + +static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) { +    if (value != ZSTD_ps_auto) return value; +    if (cLevel < 10) { +        return ZSTD_ps_disable; +    } else { +        return ZSTD_ps_enable; +    } +} + +/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ +static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { +    return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; +} +  static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(          ZSTD_compressionParameters cParams)  { @@ -282,8 +310,12 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(          assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);          assert(cctxParams.ldmParams.hashRateLog < 32);      } -    cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); +    cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams);      cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); +    cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); +    cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); +    cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, +                                                                             cctxParams.compressionLevel);      assert(!ZSTD_checkCParams(cParams));      return cctxParams;  } @@ -329,10 +361,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)  #define ZSTD_NO_CLEVEL 0  /* - * Initializes the cctxParams from params and compressionLevel. + * Initializes `cctxParams` from `params` and `compressionLevel`.   * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.   */ -static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) +static void +ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, +                        const ZSTD_parameters* params, +                              int compressionLevel)  {      assert(!ZSTD_checkCParams(params->cParams));      ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); @@ -343,10 +378,13 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par       */      cctxParams->compressionLevel = compressionLevel;      cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); -    cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); +    cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams);      cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); +    cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); +    cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); +    cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);      DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", -                cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); +                cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm);  }  size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) @@ -359,7 +397,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete  /*   * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param param Validated zstd parameters. + * @param params Validated zstd parameters.   */  static void ZSTD_CCtxParams_setZstdParams(          ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) @@ -455,8 +493,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)          return bounds;      case ZSTD_c_enableLongDistanceMatching: -        bounds.lowerBound = 0; -        bounds.upperBound = 1; +        bounds.lowerBound = (int)ZSTD_ps_auto; +        bounds.upperBound = (int)ZSTD_ps_disable;          return bounds;      case ZSTD_c_ldmHashLog: @@ -534,11 +572,16 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)          bounds.upperBound = 1;          return bounds; -    case ZSTD_c_useBlockSplitter: +    case ZSTD_c_splitAfterSequences:          bounds.lowerBound = (int)ZSTD_ps_auto;          bounds.upperBound = (int)ZSTD_ps_disable;          return bounds; +    case ZSTD_c_blockSplitterLevel: +        bounds.lowerBound = 0; +        bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX; +        return bounds; +      case ZSTD_c_useRowMatchFinder:          bounds.lowerBound = (int)ZSTD_ps_auto;          bounds.upperBound = (int)ZSTD_ps_disable; @@ -549,6 +592,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)          bounds.upperBound = 1;          return bounds; +    case ZSTD_c_prefetchCDictTables: +        bounds.lowerBound = (int)ZSTD_ps_auto; +        bounds.upperBound = (int)ZSTD_ps_disable; +        return bounds; + +    case ZSTD_c_enableSeqProducerFallback: +        bounds.lowerBound = 0; +        bounds.upperBound = 1; +        return bounds; + +    case ZSTD_c_maxBlockSize: +        bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; +        bounds.upperBound = ZSTD_BLOCKSIZE_MAX; +        return bounds; + +    case ZSTD_c_repcodeResolution: +        bounds.lowerBound = (int)ZSTD_ps_auto; +        bounds.upperBound = (int)ZSTD_ps_disable; +        return bounds; +      default:          bounds.error = ERROR(parameter_unsupported);          return bounds; @@ -567,10 +630,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)      return 0;  } -#define BOUNDCHECK(cParam, val) { \ -    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ -                    parameter_outOfBound, "Param out of bounds"); \ -} +#define BOUNDCHECK(cParam, val)                                       \ +    do {                                                              \ +        RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val),        \ +                        parameter_outOfBound, "Param out of bounds"); \ +    } while (0)  static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) @@ -584,6 +648,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)      case ZSTD_c_minMatch:      case ZSTD_c_targetLength:      case ZSTD_c_strategy: +    case ZSTD_c_blockSplitterLevel:          return 1;      case ZSTD_c_format: @@ -610,9 +675,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)      case ZSTD_c_stableOutBuffer:      case ZSTD_c_blockDelimiters:      case ZSTD_c_validateSequences: -    case ZSTD_c_useBlockSplitter: +    case ZSTD_c_splitAfterSequences:      case ZSTD_c_useRowMatchFinder:      case ZSTD_c_deterministicRefPrefix: +    case ZSTD_c_prefetchCDictTables: +    case ZSTD_c_enableSeqProducerFallback: +    case ZSTD_c_maxBlockSize: +    case ZSTD_c_repcodeResolution:      default:          return 0;      } @@ -625,7 +694,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)          if (ZSTD_isUpdateAuthorized(param)) {              cctx->cParamsChanged = 1;          } else { -            RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); +            RETURN_ERROR(stage_wrong, "can only set params in cctx init stage");      }   }      switch(param) @@ -665,9 +734,14 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)      case ZSTD_c_stableOutBuffer:      case ZSTD_c_blockDelimiters:      case ZSTD_c_validateSequences: -    case ZSTD_c_useBlockSplitter: +    case ZSTD_c_splitAfterSequences: +    case ZSTD_c_blockSplitterLevel:      case ZSTD_c_useRowMatchFinder:      case ZSTD_c_deterministicRefPrefix: +    case ZSTD_c_prefetchCDictTables: +    case ZSTD_c_enableSeqProducerFallback: +    case ZSTD_c_maxBlockSize: +    case ZSTD_c_repcodeResolution:          break;      default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); @@ -723,12 +797,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,      case ZSTD_c_minMatch :          if (value!=0)   /* 0 => use default */              BOUNDCHECK(ZSTD_c_minMatch, value); -        CCtxParams->cParams.minMatch = value; +        CCtxParams->cParams.minMatch = (U32)value;          return CCtxParams->cParams.minMatch;      case ZSTD_c_targetLength :          BOUNDCHECK(ZSTD_c_targetLength, value); -        CCtxParams->cParams.targetLength = value; +        CCtxParams->cParams.targetLength = (U32)value;          return CCtxParams->cParams.targetLength;      case ZSTD_c_strategy : @@ -741,12 +815,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,          /* Content size written in frame header _when known_ (default:1) */          DEBUGLOG(4, "set content size flag = %u", (value!=0));          CCtxParams->fParams.contentSizeFlag = value != 0; -        return CCtxParams->fParams.contentSizeFlag; +        return (size_t)CCtxParams->fParams.contentSizeFlag;      case ZSTD_c_checksumFlag :          /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */          CCtxParams->fParams.checksumFlag = value != 0; -        return CCtxParams->fParams.checksumFlag; +        return (size_t)CCtxParams->fParams.checksumFlag;      case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */          DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); @@ -755,18 +829,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,      case ZSTD_c_forceMaxWindow :          CCtxParams->forceWindow = (value != 0); -        return CCtxParams->forceWindow; +        return (size_t)CCtxParams->forceWindow;      case ZSTD_c_forceAttachDict : {          const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; -        BOUNDCHECK(ZSTD_c_forceAttachDict, pref); +        BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref);          CCtxParams->attachDictPref = pref;          return CCtxParams->attachDictPref;      }      case ZSTD_c_literalCompressionMode : { -        const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; -        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); +        const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value; +        BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm);          CCtxParams->literalCompressionMode = lcm;          return CCtxParams->literalCompressionMode;      } @@ -789,47 +863,50 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,      case ZSTD_c_enableDedicatedDictSearch :          CCtxParams->enableDedicatedDictSearch = (value!=0); -        return CCtxParams->enableDedicatedDictSearch; +        return (size_t)CCtxParams->enableDedicatedDictSearch;      case ZSTD_c_enableLongDistanceMatching : -        CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; +        BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); +        CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value;          return CCtxParams->ldmParams.enableLdm;      case ZSTD_c_ldmHashLog :          if (value!=0)   /* 0 ==> auto */              BOUNDCHECK(ZSTD_c_ldmHashLog, value); -        CCtxParams->ldmParams.hashLog = value; +        CCtxParams->ldmParams.hashLog = (U32)value;          return CCtxParams->ldmParams.hashLog;      case ZSTD_c_ldmMinMatch :          if (value!=0)   /* 0 ==> default */              BOUNDCHECK(ZSTD_c_ldmMinMatch, value); -        CCtxParams->ldmParams.minMatchLength = value; +        CCtxParams->ldmParams.minMatchLength = (U32)value;          return CCtxParams->ldmParams.minMatchLength;      case ZSTD_c_ldmBucketSizeLog :          if (value!=0)   /* 0 ==> default */              BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); -        CCtxParams->ldmParams.bucketSizeLog = value; +        CCtxParams->ldmParams.bucketSizeLog = (U32)value;          return CCtxParams->ldmParams.bucketSizeLog;      case ZSTD_c_ldmHashRateLog :          if (value!=0)   /* 0 ==> default */              BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); -        CCtxParams->ldmParams.hashRateLog = value; +        CCtxParams->ldmParams.hashRateLog = (U32)value;          return CCtxParams->ldmParams.hashRateLog;      case ZSTD_c_targetCBlockSize : -        if (value!=0)   /* 0 ==> default */ +        if (value!=0) {  /* 0 ==> default */ +            value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN);              BOUNDCHECK(ZSTD_c_targetCBlockSize, value); -        CCtxParams->targetCBlockSize = value; +        } +        CCtxParams->targetCBlockSize = (U32)value;          return CCtxParams->targetCBlockSize;      case ZSTD_c_srcSizeHint :          if (value!=0)    /* 0 ==> default */              BOUNDCHECK(ZSTD_c_srcSizeHint, value);          CCtxParams->srcSizeHint = value; -        return CCtxParams->srcSizeHint; +        return (size_t)CCtxParams->srcSizeHint;      case ZSTD_c_stableInBuffer:          BOUNDCHECK(ZSTD_c_stableInBuffer, value); @@ -843,28 +920,55 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,      case ZSTD_c_blockDelimiters:          BOUNDCHECK(ZSTD_c_blockDelimiters, value); -        CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; +        CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value;          return CCtxParams->blockDelimiters;      case ZSTD_c_validateSequences:          BOUNDCHECK(ZSTD_c_validateSequences, value);          CCtxParams->validateSequences = value; -        return CCtxParams->validateSequences; +        return (size_t)CCtxParams->validateSequences; + +    case ZSTD_c_splitAfterSequences: +        BOUNDCHECK(ZSTD_c_splitAfterSequences, value); +        CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value; +        return CCtxParams->postBlockSplitter; -    case ZSTD_c_useBlockSplitter: -        BOUNDCHECK(ZSTD_c_useBlockSplitter, value); -        CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; -        return CCtxParams->useBlockSplitter; +    case ZSTD_c_blockSplitterLevel: +        BOUNDCHECK(ZSTD_c_blockSplitterLevel, value); +        CCtxParams->preBlockSplitter_level = value; +        return (size_t)CCtxParams->preBlockSplitter_level;      case ZSTD_c_useRowMatchFinder:          BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); -        CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; +        CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value;          return CCtxParams->useRowMatchFinder;      case ZSTD_c_deterministicRefPrefix:          BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);          CCtxParams->deterministicRefPrefix = !!value; -        return CCtxParams->deterministicRefPrefix; +        return (size_t)CCtxParams->deterministicRefPrefix; + +    case ZSTD_c_prefetchCDictTables: +        BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); +        CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value; +        return CCtxParams->prefetchCDictTables; + +    case ZSTD_c_enableSeqProducerFallback: +        BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); +        CCtxParams->enableMatchFinderFallback = value; +        return (size_t)CCtxParams->enableMatchFinderFallback; + +    case ZSTD_c_maxBlockSize: +        if (value!=0)    /* 0 ==> default */ +            BOUNDCHECK(ZSTD_c_maxBlockSize, value); +        assert(value>=0); +        CCtxParams->maxBlockSize = (size_t)value; +        return CCtxParams->maxBlockSize; + +    case ZSTD_c_repcodeResolution: +        BOUNDCHECK(ZSTD_c_repcodeResolution, value); +        CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value; +        return CCtxParams->searchForExternalRepcodes;      default: RETURN_ERROR(parameter_unsupported, "unknown parameter");      } @@ -881,7 +985,7 @@ size_t ZSTD_CCtxParams_getParameter(      switch(param)      {      case ZSTD_c_format : -        *value = CCtxParams->format; +        *value = (int)CCtxParams->format;          break;      case ZSTD_c_compressionLevel :          *value = CCtxParams->compressionLevel; @@ -896,16 +1000,16 @@ size_t ZSTD_CCtxParams_getParameter(          *value = (int)CCtxParams->cParams.chainLog;          break;      case ZSTD_c_searchLog : -        *value = CCtxParams->cParams.searchLog; +        *value = (int)CCtxParams->cParams.searchLog;          break;      case ZSTD_c_minMatch : -        *value = CCtxParams->cParams.minMatch; +        *value = (int)CCtxParams->cParams.minMatch;          break;      case ZSTD_c_targetLength : -        *value = CCtxParams->cParams.targetLength; +        *value = (int)CCtxParams->cParams.targetLength;          break;      case ZSTD_c_strategy : -        *value = (unsigned)CCtxParams->cParams.strategy; +        *value = (int)CCtxParams->cParams.strategy;          break;      case ZSTD_c_contentSizeFlag :          *value = CCtxParams->fParams.contentSizeFlag; @@ -920,10 +1024,10 @@ size_t ZSTD_CCtxParams_getParameter(          *value = CCtxParams->forceWindow;          break;      case ZSTD_c_forceAttachDict : -        *value = CCtxParams->attachDictPref; +        *value = (int)CCtxParams->attachDictPref;          break;      case ZSTD_c_literalCompressionMode : -        *value = CCtxParams->literalCompressionMode; +        *value = (int)CCtxParams->literalCompressionMode;          break;      case ZSTD_c_nbWorkers :          assert(CCtxParams->nbWorkers == 0); @@ -939,19 +1043,19 @@ size_t ZSTD_CCtxParams_getParameter(          *value = CCtxParams->enableDedicatedDictSearch;          break;      case ZSTD_c_enableLongDistanceMatching : -        *value = CCtxParams->ldmParams.enableLdm; +        *value = (int)CCtxParams->ldmParams.enableLdm;          break;      case ZSTD_c_ldmHashLog : -        *value = CCtxParams->ldmParams.hashLog; +        *value = (int)CCtxParams->ldmParams.hashLog;          break;      case ZSTD_c_ldmMinMatch : -        *value = CCtxParams->ldmParams.minMatchLength; +        *value = (int)CCtxParams->ldmParams.minMatchLength;          break;      case ZSTD_c_ldmBucketSizeLog : -        *value = CCtxParams->ldmParams.bucketSizeLog; +        *value = (int)CCtxParams->ldmParams.bucketSizeLog;          break;      case ZSTD_c_ldmHashRateLog : -        *value = CCtxParams->ldmParams.hashRateLog; +        *value = (int)CCtxParams->ldmParams.hashRateLog;          break;      case ZSTD_c_targetCBlockSize :          *value = (int)CCtxParams->targetCBlockSize; @@ -971,8 +1075,11 @@ size_t ZSTD_CCtxParams_getParameter(      case ZSTD_c_validateSequences :          *value = (int)CCtxParams->validateSequences;          break; -    case ZSTD_c_useBlockSplitter : -        *value = (int)CCtxParams->useBlockSplitter; +    case ZSTD_c_splitAfterSequences : +        *value = (int)CCtxParams->postBlockSplitter; +        break; +    case ZSTD_c_blockSplitterLevel : +        *value = CCtxParams->preBlockSplitter_level;          break;      case ZSTD_c_useRowMatchFinder :          *value = (int)CCtxParams->useRowMatchFinder; @@ -980,6 +1087,18 @@ size_t ZSTD_CCtxParams_getParameter(      case ZSTD_c_deterministicRefPrefix:          *value = (int)CCtxParams->deterministicRefPrefix;          break; +    case ZSTD_c_prefetchCDictTables: +        *value = (int)CCtxParams->prefetchCDictTables; +        break; +    case ZSTD_c_enableSeqProducerFallback: +        *value = CCtxParams->enableMatchFinderFallback; +        break; +    case ZSTD_c_maxBlockSize: +        *value = (int)CCtxParams->maxBlockSize; +        break; +    case ZSTD_c_repcodeResolution: +        *value = (int)CCtxParams->searchForExternalRepcodes; +        break;      default: RETURN_ERROR(parameter_unsupported, "unknown parameter");      }      return 0; @@ -1006,9 +1125,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(      return 0;  } +size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) +{ +    ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); +    DEBUGLOG(4, "ZSTD_CCtx_setCParams"); +    /* only update if all parameters are valid */ +    FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), ""); +    return 0; +} + +size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) +{ +    ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); +    DEBUGLOG(4, "ZSTD_CCtx_setFParams"); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); +    FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); +    return 0; +} + +size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) +{ +    DEBUGLOG(4, "ZSTD_CCtx_setParams"); +    /* First check cParams, because we want to update all or none. */ +    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); +    /* Next set fParams, because this could fail if the cctx isn't in init stage. */ +    FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); +    /* Finally set cParams, which should succeed. */ +    FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); +    return 0; +} +  size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)  { -    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); +    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize);      RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,                      "Can't set pledgedSrcSize when not in init stage.");      cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; @@ -1024,9 +1181,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams(          ZSTD_compressionParameters* cParams);  /* - * Initializes the local dict using the requested parameters. - * NOTE: This does not use the pledged src size, because it may be used for more - * than one compression. + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions.   */  static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)  { @@ -1039,8 +1196,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)          return 0;      }      if (dl->cdict != NULL) { -        assert(cctx->cdict == dl->cdict);          /* Local dictionary already initialized. */ +        assert(cctx->cdict == dl->cdict);          return 0;      }      assert(dl->dictSize > 0); @@ -1060,26 +1217,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)  }  size_t ZSTD_CCtx_loadDictionary_advanced( -        ZSTD_CCtx* cctx, const void* dict, size_t dictSize, -        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) +        ZSTD_CCtx* cctx, +        const void* dict, size_t dictSize, +        ZSTD_dictLoadMethod_e dictLoadMethod, +        ZSTD_dictContentType_e dictContentType)  { -    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, -                    "Can't load a dictionary when ctx is not in init stage.");      DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); -    ZSTD_clearAllDicts(cctx);  /* in case one already exists */ -    if (dict == NULL || dictSize == 0)  /* no dictionary mode */ +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, +                    "Can't load a dictionary when cctx is not in init stage."); +    ZSTD_clearAllDicts(cctx);  /* erase any previously set dictionary */ +    if (dict == NULL || dictSize == 0)  /* no dictionary */          return 0;      if (dictLoadMethod == ZSTD_dlm_byRef) {          cctx->localDict.dict = dict;      } else { +        /* copy dictionary content inside CCtx to own its lifetime */          void* dictBuffer;          RETURN_ERROR_IF(cctx->staticSize, memory_allocation, -                        "no malloc for static CCtx"); +                        "static CCtx can't allocate for an internal copy of dictionary");          dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); -        RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); +        RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, +                        "allocation failed for dictionary content");          ZSTD_memcpy(dictBuffer, dict, dictSize); -        cctx->localDict.dictBuffer = dictBuffer; -        cctx->localDict.dict = dictBuffer; +        cctx->localDict.dictBuffer = dictBuffer;  /* owned ptr to free */ +        cctx->localDict.dict = dictBuffer;        /* read-only reference */      }      cctx->localDict.dictSize = dictSize;      cctx->localDict.dictContentType = dictContentType; @@ -1149,7 +1310,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)      if ( (reset == ZSTD_reset_parameters)        || (reset == ZSTD_reset_session_and_parameters) ) {          RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, -                        "Can't reset parameters only when not in init stage."); +                        "Reset parameters is only possible during init stage.");          ZSTD_clearAllDicts(cctx);          return ZSTD_CCtxParams_reset(&cctx->requestedParams);      } @@ -1168,7 +1329,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)      BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);      BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);      BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); -    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy); +    BOUNDCHECK(ZSTD_c_strategy,  (int)cParams.strategy);      return 0;  } @@ -1178,11 +1339,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)  static ZSTD_compressionParameters  ZSTD_clampCParams(ZSTD_compressionParameters cParams)  { -#   define CLAMP_TYPE(cParam, val, type) {                                \ -        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \ -        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \ -        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \ -    } +#   define CLAMP_TYPE(cParam, val, type)                                      \ +        do {                                                                  \ +            ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \ +            if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \ +            else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \ +        } while (0)  #   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)      CLAMP(ZSTD_c_windowLog, cParams.windowLog);      CLAMP(ZSTD_c_chainLog,  cParams.chainLog); @@ -1240,19 +1402,62 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)   *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).   *  mostly downsize to reduce memory consumption and initialization latency.   * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. - * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`.   *  note : `srcSize==0` means 0!   *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */  static ZSTD_compressionParameters  ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,                              unsigned long long srcSize,                              size_t dictSize, -                            ZSTD_cParamMode_e mode) +                            ZSTD_CParamMode_e mode, +                            ZSTD_ParamSwitch_e useRowMatchFinder)  {      const U64 minSrcSize = 513; /* (1<<9) + 1 */      const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);      assert(ZSTD_checkCParams(cPar)==0); +    /* Cascade the selected strategy down to the next-highest one built into +     * this binary. */ +#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_btultra2) { +        cPar.strategy = ZSTD_btultra; +    } +    if (cPar.strategy == ZSTD_btultra) { +        cPar.strategy = ZSTD_btopt; +    } +#endif +#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_btopt) { +        cPar.strategy = ZSTD_btlazy2; +    } +#endif +#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_btlazy2) { +        cPar.strategy = ZSTD_lazy2; +    } +#endif +#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_lazy2) { +        cPar.strategy = ZSTD_lazy; +    } +#endif +#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_lazy) { +        cPar.strategy = ZSTD_greedy; +    } +#endif +#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_greedy) { +        cPar.strategy = ZSTD_dfast; +    } +#endif +#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR +    if (cPar.strategy == ZSTD_dfast) { +        cPar.strategy = ZSTD_fast; +        cPar.targetLength = 0; +    } +#endif +      switch (mode) {      case ZSTD_cpm_unknown:      case ZSTD_cpm_noAttachDict: @@ -1281,8 +1486,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,      }      /* resize windowLog if input is small enough, to use less memory */ -    if ( (srcSize < maxWindowResize) -      && (dictSize < maxWindowResize) )  { +    if ( (srcSize <= maxWindowResize) +      && (dictSize <= maxWindowResize) )  {          U32 const tSize = (U32)(srcSize + dictSize);          static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;          U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : @@ -1300,6 +1505,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,      if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)          cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */ +    /* We can't use more than 32 bits of hash in total, so that means that we require: +     * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 +     */ +    if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { +        U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; +        if (cPar.hashLog > maxShortCacheHashLog) { +            cPar.hashLog = maxShortCacheHashLog; +        } +        if (cPar.chainLog > maxShortCacheHashLog) { +            cPar.chainLog = maxShortCacheHashLog; +        } +    } + + +    /* At this point, we aren't 100% sure if we are using the row match finder. +     * Unless it is explicitly disabled, conservatively assume that it is enabled. +     * In this case it will only be disabled for small sources, so shrinking the +     * hash log a little bit shouldn't result in any ratio loss. +     */ +    if (useRowMatchFinder == ZSTD_ps_auto) +        useRowMatchFinder = ZSTD_ps_enable; + +    /* We can't hash more than 32-bits in total. So that means that we require: +     * (hashLog - rowLog + 8) <= 32 +     */ +    if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { +        /* Switch to 32-entry rows if searchLog is 5 (or more) */ +        U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); +        U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; +        U32 const maxHashLog = maxRowHashLog + rowLog; +        assert(cPar.hashLog >= rowLog); +        if (cPar.hashLog > maxHashLog) { +            cPar.hashLog = maxHashLog; +        } +    } +      return cPar;  } @@ -1310,11 +1551,11 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,  {      cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */      if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; -    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); +    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto);  } -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);  static void ZSTD_overrideCParams(                ZSTD_compressionParameters* cParams, @@ -1330,24 +1571,25 @@ static void ZSTD_overrideCParams(  }  ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( -        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)  {      ZSTD_compressionParameters cParams;      if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { -      srcSizeHint = CCtxParams->srcSizeHint; +        assert(CCtxParams->srcSizeHint>=0); +        srcSizeHint = (U64)CCtxParams->srcSizeHint;      }      cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);      if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;      ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);      assert(!ZSTD_checkCParams(cParams));      /* srcSizeHint == 0 means 0 */ -    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); +    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder);  }  static size_t  ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, -                       const ZSTD_paramSwitch_e useRowMatchFinder, -                       const U32 enableDedicatedDictSearch, +                       const ZSTD_ParamSwitch_e useRowMatchFinder, +                       const int enableDedicatedDictSearch,                         const U32 forCCtx)  {      /* chain table size should be 0 for fast or row-hash strategies */ @@ -1363,14 +1605,14 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,                              + hSize * sizeof(U32)                              + h3Size * sizeof(U32);      size_t const optPotentialSpace = -        ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32)) -      + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32)) -      + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32)) -      + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32)) -      + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)) -      + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); +        ZSTD_cwksp_aligned64_alloc_size((MaxML+1) * sizeof(U32)) +      + ZSTD_cwksp_aligned64_alloc_size((MaxLL+1) * sizeof(U32)) +      + ZSTD_cwksp_aligned64_alloc_size((MaxOff+1) * sizeof(U32)) +      + ZSTD_cwksp_aligned64_alloc_size((1<<Litbits) * sizeof(U32)) +      + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)) +      + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));      size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) -                                            ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16)) +                                            ? ZSTD_cwksp_aligned64_alloc_size(hSize)                                              : 0;      size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))                                  ? optPotentialSpace @@ -1386,30 +1628,38 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,      return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;  } +/* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ +static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { +    U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; +    return blockSize / divider; +} +  static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(          const ZSTD_compressionParameters* cParams,          const ldmParams_t* ldmParams,          const int isStatic, -        const ZSTD_paramSwitch_e useRowMatchFinder, +        const ZSTD_ParamSwitch_e useRowMatchFinder,          const size_t buffInSize,          const size_t buffOutSize, -        const U64 pledgedSrcSize) +        const U64 pledgedSrcSize, +        int useSequenceProducer, +        size_t maxBlockSize)  {      size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); -    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); -    U32    const divider = (cParams->minMatch==3) ? 3 : 4; -    size_t const maxNbSeq = blockSize / divider; +    size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); +    size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);      size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) -                            + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) +                            + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef))                              + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); -    size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); +    size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE);      size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));      size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);      size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);      size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);      size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? -        ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; +        ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;      size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) @@ -1417,15 +1667,21 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(      size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; +    size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); +    size_t const externalSeqSpace = useSequenceProducer +        ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) +        : 0; +      size_t const neededSpace =          cctxSpace + -        entropySpace + +        tmpWorkSpace +          blockStateSpace +          ldmSpace +          ldmSeqSpace +          matchStateSize +          tokenSpace + -        bufferSpace; +        bufferSpace + +        externalSeqSpace;      DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);      return neededSpace; @@ -1435,7 +1691,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)  {      ZSTD_compressionParameters const cParams =                  ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); -    ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, +    ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,                                                                                 &cParams);      RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); @@ -1443,7 +1699,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)       * be needed. However, we still allocate two 0-sized buffers, which can       * take space under ASAN. */      return ZSTD_estimateCCtxSize_usingCCtxParams_internal( -        &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); +        &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);  }  size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) @@ -1493,18 +1749,18 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)      RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");      {   ZSTD_compressionParameters const cParams =                  ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); -        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); +        size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog);          size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)                  ? ((size_t)1 << cParams.windowLog) + blockSize                  : 0;          size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)                  ? ZSTD_compressBound(blockSize) + 1                  : 0; -        ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); +        ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams);          return ZSTD_estimateCCtxSize_usingCCtxParams_internal(              &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, -            ZSTD_CONTENTSIZE_UNKNOWN); +            ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);      }  } @@ -1600,7 +1856,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)   *  Invalidate all the matches in the match finder tables.   *  Requires nextSrc and base to be set (can be NULL).   */ -static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) +static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms)  {      ZSTD_window_clear(&ms->window); @@ -1637,12 +1893,25 @@ typedef enum {      ZSTD_resetTarget_CCtx  } ZSTD_resetTarget_e; +/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ +static U64 ZSTD_bitmix(U64 val, U64 len) { +    val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); +    val *= 0x9FB21C651E98DF25ULL; +    val ^= (val >> 35) + len ; +    val *= 0x9FB21C651E98DF25ULL; +    return val ^ (val >> 28); +} + +/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ +static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) { +    ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); +}  static size_t -ZSTD_reset_matchState(ZSTD_matchState_t* ms, +ZSTD_reset_matchState(ZSTD_MatchState_t* ms,                        ZSTD_cwksp* ws,                  const ZSTD_compressionParameters* cParams, -                const ZSTD_paramSwitch_e useRowMatchFinder, +                const ZSTD_ParamSwitch_e useRowMatchFinder,                  const ZSTD_compResetPolicy_e crp,                  const ZSTD_indexResetPolicy_e forceResetIndex,                  const ZSTD_resetTarget_e forWho) @@ -1664,6 +1933,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,      }      ms->hashLog3 = hashLog3; +    ms->lazySkipping = 0;      ZSTD_invalidateMatchState(ms); @@ -1685,22 +1955,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,          ZSTD_cwksp_clean_tables(ws);      } -    /* opt parser space */ -    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { -        DEBUGLOG(4, "reserving optimal parser space"); -        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned)); -        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); -        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); -        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); -        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); -        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); -    } -      if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { -        {   /* Row match finder needs an additional table of hashes ("tags") */ -            size_t const tagTableSize = hSize*sizeof(U16); -            ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize); -            if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); +        /* Row match finder needs an additional table of hashes ("tags") */ +        size_t const tagTableSize = hSize; +        /* We want to generate a new salt in case we reset a Cctx, but we always want to use +         * 0 when we reset a Cdict */ +        if(forWho == ZSTD_resetTarget_CCtx) { +            ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); +            ZSTD_advanceHashSalt(ms); +        } else { +            /* When we are not salting we want to always memset the memory */ +            ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); +            ZSTD_memset(ms->tagTable, 0, tagTableSize); +            ms->hashSalt = 0;          }          {   /* Switch to 32-entry rows if searchLog is 5 (or more) */              U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); @@ -1709,6 +1976,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,          }      } +    /* opt parser space */ +    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { +        DEBUGLOG(4, "reserving optimal parser space"); +        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<<Litbits) * sizeof(unsigned)); +        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned)); +        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned)); +        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned)); +        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); +        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); +    } +      ms->cParams = *cParams;      RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, @@ -1754,7 +2032,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,  {      ZSTD_cwksp* const ws = &zc->workspace;      DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", -                (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); +                (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter);      assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));      zc->isFirstBlock = 1; @@ -1766,8 +2044,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,      params = &zc->appliedParams;      assert(params->useRowMatchFinder != ZSTD_ps_auto); -    assert(params->useBlockSplitter != ZSTD_ps_auto); +    assert(params->postBlockSplitter != ZSTD_ps_auto);      assert(params->ldmParams.enableLdm != ZSTD_ps_auto); +    assert(params->maxBlockSize != 0);      if (params->ldmParams.enableLdm == ZSTD_ps_enable) {          /* Adjust long distance matching parameters */          ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); @@ -1776,9 +2055,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,      }      {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); -        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); -        U32    const divider = (params->cParams.minMatch==3) ? 3 : 4; -        size_t const maxNbSeq = blockSize / divider; +        size_t const blockSize = MIN(params->maxBlockSize, windowSize); +        size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params));          size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)                  ? ZSTD_compressBound(blockSize) + 1                  : 0; @@ -1795,8 +2073,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,          size_t const neededSpace =              ZSTD_estimateCCtxSize_usingCCtxParams_internal(                  ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, -                buffInSize, buffOutSize, pledgedSrcSize); -        int resizeWorkspace; +                buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);          FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); @@ -1805,7 +2082,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,          {   /* Check if workspace is large enough, alloc a new one if needed */              int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;              int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); -            resizeWorkspace = workspaceTooSmall || workspaceWasteful; +            int resizeWorkspace = workspaceTooSmall || workspaceWasteful;              DEBUGLOG(4, "Need %zu B workspace", neededSpace);              DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); @@ -1823,21 +2100,23 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,                  DEBUGLOG(5, "reserving object space");                  /* Statically sized space. -                 * entropyWorkspace never moves, +                 * tmpWorkspace never moves,                   * though prev/next block swap places */                  assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));                  zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));                  RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");                  zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));                  RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); -                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); -                RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); +                zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE); +                RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace"); +                zc->tmpWkspSize = TMP_WORKSPACE_SIZE;          }   }          ZSTD_cwksp_clear(ws);          /* init params */          zc->blockState.matchState.cParams = params->cParams; +        zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable;          zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;          zc->consumedSrcSize = 0;          zc->producedCSize = 0; @@ -1845,7 +2124,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,              zc->appliedParams.fParams.contentSizeFlag = 0;          DEBUGLOG(4, "pledged content size : %u ; flag : %u",              (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); -        zc->blockSize = blockSize; +        zc->blockSizeMax = blockSize;          xxh64_reset(&zc->xxhState, 0);          zc->stage = ZSTDcs_init; @@ -1854,13 +2133,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,          ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); +        FORWARD_IF_ERROR(ZSTD_reset_matchState( +                &zc->blockState.matchState, +                ws, +                ¶ms->cParams, +                params->useRowMatchFinder, +                crp, +                needsIndexReset, +                ZSTD_resetTarget_CCtx), ""); + +        zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef)); + +        /* ldm hash table */ +        if (params->ldmParams.enableLdm == ZSTD_ps_enable) { +            /* TODO: avoid memset? */ +            size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; +            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t)); +            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); +            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq)); +            zc->maxNbLdmSequences = maxNbLdmSeq; + +            ZSTD_window_init(&zc->ldmState.window); +            zc->ldmState.loadedDictEnd = 0; +        } + +        /* reserve space for block-level external sequences */ +        if (ZSTD_hasExtSeqProd(params)) { +            size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); +            zc->extSeqBufCapacity = maxNbExternalSeq; +            zc->extSeqBuf = +                (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); +        } + +        /* buffers */ +          /* ZSTD_wildcopy() is used to copy into the literals buffer,           * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.           */          zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);          zc->seqStore.maxNbLit = blockSize; -        /* buffers */          zc->bufferedPolicy = zbuff;          zc->inBuffSize = buffInSize;          zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); @@ -1883,32 +2195,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,          zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));          zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));          zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); -        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); - -        FORWARD_IF_ERROR(ZSTD_reset_matchState( -            &zc->blockState.matchState, -            ws, -            ¶ms->cParams, -            params->useRowMatchFinder, -            crp, -            needsIndexReset, -            ZSTD_resetTarget_CCtx), ""); - -        /* ldm hash table */ -        if (params->ldmParams.enableLdm == ZSTD_ps_enable) { -            /* TODO: avoid memset? */ -            size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; -            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); -            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); -            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); -            zc->maxNbLdmSequences = maxNbLdmSeq; - -            ZSTD_window_init(&zc->ldmState.window); -            zc->ldmState.loadedDictEnd = 0; -        }          DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); -        assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); +        assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace));          zc->initialized = 1; @@ -1980,7 +2269,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,          }          params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, -                                                     cdict->dictContentSize, ZSTD_cpm_attachDict); +                                                     cdict->dictContentSize, ZSTD_cpm_attachDict, +                                                     params.useRowMatchFinder);          params.cParams.windowLog = windowLog;          params.useRowMatchFinder = cdict->useRowMatchFinder;    /* cdict overrides */          FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, @@ -2019,6 +2309,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,      return 0;  } +static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, +                                        ZSTD_compressionParameters const* cParams) { +    if (ZSTD_CDictIndicesAreTagged(cParams)){ +        /* Remove tags from the CDict table if they are present. +         * See docs on "short cache" in zstd_compress_internal.h for context. */ +        size_t i; +        for (i = 0; i < tableSize; i++) { +            U32 const taggedIndex = src[i]; +            U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; +            dst[i] = index; +        } +    } else { +        ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); +    } +} +  static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,                              const ZSTD_CDict* cdict,                              ZSTD_CCtx_params params, @@ -2054,26 +2360,29 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,                                                              : 0;          size_t const hSize =  (size_t)1 << cdict_cParams->hashLog; -        ZSTD_memcpy(cctx->blockState.matchState.hashTable, -               cdict->matchState.hashTable, -               hSize * sizeof(U32)); +        ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, +                                cdict->matchState.hashTable, +                                hSize, cdict_cParams); +          /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */          if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { -            ZSTD_memcpy(cctx->blockState.matchState.chainTable, -               cdict->matchState.chainTable, -               chainSize * sizeof(U32)); +            ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, +                                    cdict->matchState.chainTable, +                                    chainSize, cdict_cParams);          }          /* copy tag table */          if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { -            size_t const tagTableSize = hSize*sizeof(U16); +            size_t const tagTableSize = hSize;              ZSTD_memcpy(cctx->blockState.matchState.tagTable, -                cdict->matchState.tagTable, -                tagTableSize); +                        cdict->matchState.tagTable, +                        tagTableSize); +            cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt;          }      }      /* Zero the hashTable3, since the cdict never fills it */ -    {   int const h3log = cctx->blockState.matchState.hashLog3; +    assert(cctx->blockState.matchState.hashLog3 <= 31); +    {   U32 const h3log = cctx->blockState.matchState.hashLog3;          size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;          assert(cdict->matchState.hashLog3 == 0);          ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); @@ -2082,8 +2391,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,      ZSTD_cwksp_mark_tables_clean(&cctx->workspace);      /* copy dictionary offsets */ -    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState; -        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; +    {   ZSTD_MatchState_t const* srcMatchState = &cdict->matchState; +        ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState;          dstMatchState->window       = srcMatchState->window;          dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;          dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; @@ -2141,12 +2450,13 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,          /* Copy only compression parameters related to tables. */          params.cParams = srcCCtx->appliedParams.cParams;          assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); -        assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); +        assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto);          assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);          params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; -        params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; +        params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter;          params.ldmParams = srcCCtx->appliedParams.ldmParams;          params.fParams = fParams; +        params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize;          ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize,                                  /* loadedDictSize */ 0,                                  ZSTDcrp_leaveDirty, zbuff); @@ -2166,7 +2476,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,                                      ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)                                      : 0;          size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; -        int const h3log = srcCCtx->blockState.matchState.hashLog3; +        U32 const h3log = srcCCtx->blockState.matchState.hashLog3;          size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;          ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, @@ -2184,8 +2494,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,      /* copy dictionary offsets */      { -        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; -        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; +        const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; +        ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState;          dstMatchState->window       = srcMatchState->window;          dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;          dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; @@ -2234,7 +2544,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa      /* Protect special index values < ZSTD_WINDOW_START_INDEX. */      U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;      assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */ -    assert(size < (1U<<31));   /* can be casted to int */ +    assert(size < (1U<<31));   /* can be cast to int */      for (rowNb=0 ; rowNb < nbRows ; rowNb++) { @@ -2267,7 +2577,7 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const  /*! ZSTD_reduceIndex() :  *   rescale all indexes to avoid future overflow (indexes are U32) */ -static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) +static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)  {      {   U32 const hSize = (U32)1 << params->cParams.hashLog;          ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); @@ -2294,26 +2604,32 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par  /* See doc/zstd_compression_format.md for detailed format description */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr)  { -    const seqDef* const sequences = seqStorePtr->sequencesStart; +    const SeqDef* const sequences = seqStorePtr->sequencesStart;      BYTE* const llCodeTable = seqStorePtr->llCode;      BYTE* const ofCodeTable = seqStorePtr->ofCode;      BYTE* const mlCodeTable = seqStorePtr->mlCode;      U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);      U32 u; +    int longOffsets = 0;      assert(nbSeq <= seqStorePtr->maxNbSeq);      for (u=0; u<nbSeq; u++) {          U32 const llv = sequences[u].litLength; +        U32 const ofCode = ZSTD_highbit32(sequences[u].offBase);          U32 const mlv = sequences[u].mlBase;          llCodeTable[u] = (BYTE)ZSTD_LLcode(llv); -        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase); +        ofCodeTable[u] = (BYTE)ofCode;          mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv); +        assert(!(MEM_64bits() && ofCode >= STREAM_ACCUMULATOR_MIN)); +        if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) +            longOffsets = 1;      }      if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)          llCodeTable[seqStorePtr->longLengthPos] = MaxLL;      if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)          mlCodeTable[seqStorePtr->longLengthPos] = MaxML; +    return longOffsets;  }  /* ZSTD_useTargetCBlockSize(): @@ -2333,9 +2649,9 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)   * Returns 1 if true, 0 otherwise. */  static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)  { -    DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); -    assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); -    return (cctxParams->useBlockSplitter == ZSTD_ps_enable); +    DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter); +    assert(cctxParams->postBlockSplitter != ZSTD_ps_auto); +    return (cctxParams->postBlockSplitter == ZSTD_ps_enable);  }  /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types @@ -2347,6 +2663,7 @@ typedef struct {      U32 MLtype;      size_t size;      size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ +    int longOffsets;  } ZSTD_symbolEncodingTypeStats_t;  /* ZSTD_buildSequencesStatistics(): @@ -2357,11 +2674,13 @@ typedef struct {   * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)   */  static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, -                        const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, -                              BYTE* dst, const BYTE* const dstEnd, -                              ZSTD_strategy strategy, unsigned* countWorkspace, -                              void* entropyWorkspace, size_t entropyWkspSize) { +ZSTD_buildSequencesStatistics( +                const SeqStore_t* seqStorePtr, size_t nbSeq, +                const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, +                      BYTE* dst, const BYTE* const dstEnd, +                      ZSTD_strategy strategy, unsigned* countWorkspace, +                      void* entropyWorkspace, size_t entropyWkspSize) +{      BYTE* const ostart = dst;      const BYTE* const oend = dstEnd;      BYTE* op = ostart; @@ -2375,7 +2694,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,      stats.lastCountSize = 0;      /* convert length/distances into codes */ -    ZSTD_seqToCodes(seqStorePtr); +    stats.longOffsets = ZSTD_seqToCodes(seqStorePtr);      assert(op <= oend);      assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */      /* build CTable for Literal Lengths */ @@ -2392,7 +2711,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,          assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */          {   size_t const countSize = ZSTD_buildCTable(                  op, (size_t)(oend - op), -                CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype, +                CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype,                  countWorkspace, max, llCodeTable, nbSeq,                  LL_defaultNorm, LL_defaultNormLog, MaxLL,                  prevEntropy->litlengthCTable, @@ -2413,7 +2732,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,          size_t const mostFrequent = HIST_countFast_wksp(              countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */          /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ -        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; +        ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;          DEBUGLOG(5, "Building OF table");          nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;          stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, @@ -2424,7 +2743,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,          assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */          {   size_t const countSize = ZSTD_buildCTable(                  op, (size_t)(oend - op), -                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype, +                CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype,                  countWorkspace, max, ofCodeTable, nbSeq,                  OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,                  prevEntropy->offcodeCTable, @@ -2454,7 +2773,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,          assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */          {   size_t const countSize = ZSTD_buildCTable(                  op, (size_t)(oend - op), -                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype, +                CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype,                  countWorkspace, max, mlCodeTable, nbSeq,                  ML_defaultNorm, ML_defaultNormLog, MaxML,                  prevEntropy->matchlengthCTable, @@ -2480,22 +2799,23 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,   */  #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20  MEM_STATIC size_t -ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, -                          const ZSTD_entropyCTables_t* prevEntropy, -                                ZSTD_entropyCTables_t* nextEntropy, -                          const ZSTD_CCtx_params* cctxParams, -                                void* dst, size_t dstCapacity, -                                void* entropyWorkspace, size_t entropyWkspSize, -                          const int bmi2) +ZSTD_entropyCompressSeqStore_internal( +                              void* dst, size_t dstCapacity, +                        const void* literals, size_t litSize, +                        const SeqStore_t* seqStorePtr, +                        const ZSTD_entropyCTables_t* prevEntropy, +                              ZSTD_entropyCTables_t* nextEntropy, +                        const ZSTD_CCtx_params* cctxParams, +                              void* entropyWorkspace, size_t entropyWkspSize, +                        const int bmi2)  { -    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;      ZSTD_strategy const strategy = cctxParams->cParams.strategy;      unsigned* count = (unsigned*)entropyWorkspace;      FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;      FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;      FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; -    const seqDef* const sequences = seqStorePtr->sequencesStart; -    const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; +    const SeqDef* const sequences = seqStorePtr->sequencesStart; +    const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);      const BYTE* const ofCodeTable = seqStorePtr->ofCode;      const BYTE* const llCodeTable = seqStorePtr->llCode;      const BYTE* const mlCodeTable = seqStorePtr->mlCode; @@ -2503,29 +2823,28 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,      BYTE* const oend = ostart + dstCapacity;      BYTE* op = ostart;      size_t lastCountSize; +    int longOffsets = 0;      entropyWorkspace = count + (MaxSeq + 1);      entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); -    DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq); +    DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity);      ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));      assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);      /* Compress literals */ -    {   const BYTE* const literals = seqStorePtr->litStart; -        size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; -        size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; +    {   size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);          /* Base suspicion of uncompressibility on ratio of literals to sequences */ -        unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); -        size_t const litSize = (size_t)(seqStorePtr->lit - literals); +        int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); +          size_t const cSize = ZSTD_compressLiterals( -                                    &prevEntropy->huf, &nextEntropy->huf, -                                    cctxParams->cParams.strategy, -                                    ZSTD_literalsCompressionIsDisabled(cctxParams),                                      op, dstCapacity,                                      literals, litSize,                                      entropyWorkspace, entropyWkspSize, -                                    bmi2, suspectUncompressible); +                                    &prevEntropy->huf, &nextEntropy->huf, +                                    cctxParams->cParams.strategy, +                                    ZSTD_literalsCompressionIsDisabled(cctxParams), +                                    suspectUncompressible, bmi2);          FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");          assert(cSize <= dstCapacity);          op += cSize; @@ -2551,11 +2870,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,          ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));          return (size_t)(op - ostart);      } -    { -        ZSTD_symbolEncodingTypeStats_t stats; -        BYTE* seqHead = op++; +    {   BYTE* const seqHead = op++;          /* build stats for sequences */ -        stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, +        const ZSTD_symbolEncodingTypeStats_t stats = +                ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,                                               &prevEntropy->fse, &nextEntropy->fse,                                                op, oend,                                                strategy, count, @@ -2564,6 +2882,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,          *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));          lastCountSize = stats.lastCountSize;          op += stats.size; +        longOffsets = stats.longOffsets;      }      {   size_t const bitstreamSize = ZSTD_encodeSequences( @@ -2597,104 +2916,146 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,      return (size_t)(op - ostart);  } -MEM_STATIC size_t -ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, -                       const ZSTD_entropyCTables_t* prevEntropy, -                             ZSTD_entropyCTables_t* nextEntropy, -                       const ZSTD_CCtx_params* cctxParams, -                             void* dst, size_t dstCapacity, -                             size_t srcSize, -                             void* entropyWorkspace, size_t entropyWkspSize, -                             int bmi2) +static size_t +ZSTD_entropyCompressSeqStore_wExtLitBuffer( +                          void* dst, size_t dstCapacity, +                    const void* literals, size_t litSize, +                          size_t blockSize, +                    const SeqStore_t* seqStorePtr, +                    const ZSTD_entropyCTables_t* prevEntropy, +                          ZSTD_entropyCTables_t* nextEntropy, +                    const ZSTD_CCtx_params* cctxParams, +                          void* entropyWorkspace, size_t entropyWkspSize, +                          int bmi2)  {      size_t const cSize = ZSTD_entropyCompressSeqStore_internal( -                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,                              dst, dstCapacity, +                            literals, litSize, +                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,                              entropyWorkspace, entropyWkspSize, bmi2);      if (cSize == 0) return 0;      /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.       * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.       */ -    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) +    if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) { +        DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);          return 0;  /* block not compressed */ +    }      FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");      /* Check compressibility */ -    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); +    {   size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy);          if (cSize >= maxCSize) return 0;  /* block not compressed */      } -    DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); +    DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); +    /* libzstd decoder before  > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. +     * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. +     */ +    assert(cSize < ZSTD_BLOCKSIZE_MAX);      return cSize;  } +static size_t +ZSTD_entropyCompressSeqStore( +                    const SeqStore_t* seqStorePtr, +                    const ZSTD_entropyCTables_t* prevEntropy, +                          ZSTD_entropyCTables_t* nextEntropy, +                    const ZSTD_CCtx_params* cctxParams, +                          void* dst, size_t dstCapacity, +                          size_t srcSize, +                          void* entropyWorkspace, size_t entropyWkspSize, +                          int bmi2) +{ +    return ZSTD_entropyCompressSeqStore_wExtLitBuffer( +                dst, dstCapacity, +                seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart), +                srcSize, +                seqStorePtr, +                prevEntropy, nextEntropy, +                cctxParams, +                entropyWorkspace, entropyWkspSize, +                bmi2); +} +  /* ZSTD_selectBlockCompressor() :   * Not static, but internal use only (used by long distance matcher)   * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) +ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)  { -    static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { +    static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {          { ZSTD_compressBlock_fast  /* default for 0 */,            ZSTD_compressBlock_fast, -          ZSTD_compressBlock_doubleFast, -          ZSTD_compressBlock_greedy, -          ZSTD_compressBlock_lazy, -          ZSTD_compressBlock_lazy2, -          ZSTD_compressBlock_btlazy2, -          ZSTD_compressBlock_btopt, -          ZSTD_compressBlock_btultra, -          ZSTD_compressBlock_btultra2 }, +          ZSTD_COMPRESSBLOCK_DOUBLEFAST, +          ZSTD_COMPRESSBLOCK_GREEDY, +          ZSTD_COMPRESSBLOCK_LAZY, +          ZSTD_COMPRESSBLOCK_LAZY2, +          ZSTD_COMPRESSBLOCK_BTLAZY2, +          ZSTD_COMPRESSBLOCK_BTOPT, +          ZSTD_COMPRESSBLOCK_BTULTRA, +          ZSTD_COMPRESSBLOCK_BTULTRA2 +        },          { ZSTD_compressBlock_fast_extDict  /* default for 0 */,            ZSTD_compressBlock_fast_extDict, -          ZSTD_compressBlock_doubleFast_extDict, -          ZSTD_compressBlock_greedy_extDict, -          ZSTD_compressBlock_lazy_extDict, -          ZSTD_compressBlock_lazy2_extDict, -          ZSTD_compressBlock_btlazy2_extDict, -          ZSTD_compressBlock_btopt_extDict, -          ZSTD_compressBlock_btultra_extDict, -          ZSTD_compressBlock_btultra_extDict }, +          ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT, +          ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT, +          ZSTD_COMPRESSBLOCK_LAZY_EXTDICT, +          ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT, +          ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT, +          ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT, +          ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT, +          ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT +        },          { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,            ZSTD_compressBlock_fast_dictMatchState, -          ZSTD_compressBlock_doubleFast_dictMatchState, -          ZSTD_compressBlock_greedy_dictMatchState, -          ZSTD_compressBlock_lazy_dictMatchState, -          ZSTD_compressBlock_lazy2_dictMatchState, -          ZSTD_compressBlock_btlazy2_dictMatchState, -          ZSTD_compressBlock_btopt_dictMatchState, -          ZSTD_compressBlock_btultra_dictMatchState, -          ZSTD_compressBlock_btultra_dictMatchState }, +          ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE, +          ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE +        },          { NULL  /* default for 0 */,            NULL,            NULL, -          ZSTD_compressBlock_greedy_dedicatedDictSearch, -          ZSTD_compressBlock_lazy_dedicatedDictSearch, -          ZSTD_compressBlock_lazy2_dedicatedDictSearch, +          ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH, +          ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH, +          ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH,            NULL,            NULL,            NULL,            NULL }      }; -    ZSTD_blockCompressor selectedCompressor; +    ZSTD_BlockCompressor_f selectedCompressor;      ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); -    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); -    DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); +    DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);      if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { -        static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { -            { ZSTD_compressBlock_greedy_row, -            ZSTD_compressBlock_lazy_row, -            ZSTD_compressBlock_lazy2_row }, -            { ZSTD_compressBlock_greedy_extDict_row, -            ZSTD_compressBlock_lazy_extDict_row, -            ZSTD_compressBlock_lazy2_extDict_row }, -            { ZSTD_compressBlock_greedy_dictMatchState_row, -            ZSTD_compressBlock_lazy_dictMatchState_row, -            ZSTD_compressBlock_lazy2_dictMatchState_row }, -            { ZSTD_compressBlock_greedy_dedicatedDictSearch_row, -            ZSTD_compressBlock_lazy_dedicatedDictSearch_row, -            ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } +        static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = { +            { +                ZSTD_COMPRESSBLOCK_GREEDY_ROW, +                ZSTD_COMPRESSBLOCK_LAZY_ROW, +                ZSTD_COMPRESSBLOCK_LAZY2_ROW +            }, +            { +                ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW, +                ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW, +                ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW +            }, +            { +                ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW, +                ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW, +                ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW +            }, +            { +                ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW, +                ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW, +                ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW +            }          }; -        DEBUGLOG(4, "Selecting a row-based matchfinder"); +        DEBUGLOG(5, "Selecting a row-based matchfinder");          assert(useRowMatchFinder != ZSTD_ps_auto);          selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];      } else { @@ -2704,30 +3065,126 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS      return selectedCompressor;  } -static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, +static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr,                                     const BYTE* anchor, size_t lastLLSize)  {      ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);      seqStorePtr->lit += lastLLSize;  } -void ZSTD_resetSeqStore(seqStore_t* ssPtr) +void ZSTD_resetSeqStore(SeqStore_t* ssPtr)  {      ssPtr->lit = ssPtr->litStart;      ssPtr->sequences = ssPtr->sequencesStart;      ssPtr->longLengthType = ZSTD_llt_none;  } -typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + *   - Checks whether nbExternalSeqs represents an error condition. + *   - Appends a block delimiter to outSeqs if one is not already present. + *     See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ +static size_t ZSTD_postProcessSequenceProducerResult( +    ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize +) { +    RETURN_ERROR_IF( +        nbExternalSeqs > outSeqsCapacity, +        sequenceProducer_failed, +        "External sequence producer returned error code %lu", +        (unsigned long)nbExternalSeqs +    ); + +    RETURN_ERROR_IF( +        nbExternalSeqs == 0 && srcSize > 0, +        sequenceProducer_failed, +        "Got zero sequences from external sequence producer for a non-empty src buffer!" +    ); + +    if (srcSize == 0) { +        ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); +        return 1; +    } + +    { +        ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; + +        /* We can return early if lastSeq is already a block delimiter. */ +        if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { +            return nbExternalSeqs; +        } + +        /* This error condition is only possible if the external matchfinder +         * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ +        RETURN_ERROR_IF( +            nbExternalSeqs == outSeqsCapacity, +            sequenceProducer_failed, +            "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" +        ); + +        /* lastSeq is not a block delimiter, so we need to append one. */ +        ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); +        return nbExternalSeqs + 1; +    } +} + +/* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ +static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { +    size_t matchLenSum, litLenSum, i; +    matchLenSum = 0; +    litLenSum = 0; +    for (i = 0; i < seqBufSize; i++) { +        litLenSum += seqBuf[i].litLength; +        matchLenSum += seqBuf[i].matchLength; +    } +    return litLenSum + matchLenSum; +} + +/* + * Function to validate sequences produced by a block compressor. + */ +static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams) +{ +#if DEBUGLEVEL >= 1 +    const SeqDef* seq = seqStore->sequencesStart; +    const SeqDef* const seqEnd = seqStore->sequences; +    size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4; +    for (; seq < seqEnd; ++seq) { +        const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq); +        assert(seqLength.matchLength >= matchLenLowerBound); +        (void)seqLength; +        (void)matchLenLowerBound; +    } +#else +    (void)seqStore; +    (void)cParams; +#endif +} + +static size_t +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, +                                   ZSTD_SequencePosition* seqPos, +                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, +                             const void* src, size_t blockSize, +                                   ZSTD_ParamSwitch_e externalRepSearch); + +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e;  static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)  { -    ZSTD_matchState_t* const ms = &zc->blockState.matchState; +    ZSTD_MatchState_t* const ms = &zc->blockState.matchState;      DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);      assert(srcSize <= ZSTD_BLOCKSIZE_MAX);      /* Assert that we have correctly flushed the ctx params into the ms's copy */      ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); -    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { +    /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding +     * additional 1. We need to revisit and change this logic to be more consistent */ +    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {          if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {              ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);          } else { @@ -2763,6 +3220,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)          }          if (zc->externSeqStore.pos < zc->externSeqStore.size) {              assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); + +            /* External matchfinder + LDM is technically possible, just not implemented yet. +             * We need to revisit soon and implement it. */ +            RETURN_ERROR_IF( +                ZSTD_hasExtSeqProd(&zc->appliedParams), +                parameter_combination_unsupported, +                "Long-distance matching with external sequence producer enabled is not currently supported." +            ); +              /* Updates ldmSeqStore.pos */              lastLLSize =                  ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2772,7 +3238,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)                                         src, srcSize);              assert(zc->externSeqStore.pos <= zc->externSeqStore.size);          } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { -            rawSeqStore_t ldmSeqStore = kNullRawSeqStore; +            RawSeqStore_t ldmSeqStore = kNullRawSeqStore; + +            /* External matchfinder + LDM is technically possible, just not implemented yet. +             * We need to revisit soon and implement it. */ +            RETURN_ERROR_IF( +                ZSTD_hasExtSeqProd(&zc->appliedParams), +                parameter_combination_unsupported, +                "Long-distance matching with external sequence producer enabled is not currently supported." +            );              ldmSeqStore.seq = zc->ldmSequences;              ldmSeqStore.capacity = zc->maxNbLdmSequences; @@ -2788,42 +3262,116 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)                                         zc->appliedParams.useRowMatchFinder,                                         src, srcSize);              assert(ldmSeqStore.pos == ldmSeqStore.size); -        } else {   /* not long range mode */ -            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, -                                                                                    zc->appliedParams.useRowMatchFinder, -                                                                                    dictMode); +        } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) { +            assert( +                zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize) +            ); +            assert(zc->appliedParams.extSeqProdFunc != NULL); + +            {   U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; + +                size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)( +                    zc->appliedParams.extSeqProdState, +                    zc->extSeqBuf, +                    zc->extSeqBufCapacity, +                    src, srcSize, +                    NULL, 0,  /* dict and dictSize, currently not supported */ +                    zc->appliedParams.compressionLevel, +                    windowSize +                ); + +                size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( +                    zc->extSeqBuf, +                    nbExternalSeqs, +                    zc->extSeqBufCapacity, +                    srcSize +                ); + +                /* Return early if there is no error, since we don't need to worry about last literals */ +                if (!ZSTD_isError(nbPostProcessedSeqs)) { +                    ZSTD_SequencePosition seqPos = {0,0,0}; +                    size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); +                    RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); +                    FORWARD_IF_ERROR( +                        ZSTD_transferSequences_wBlockDelim( +                            zc, &seqPos, +                            zc->extSeqBuf, nbPostProcessedSeqs, +                            src, srcSize, +                            zc->appliedParams.searchForExternalRepcodes +                        ), +                        "Failed to copy external sequences to seqStore!" +                    ); +                    ms->ldmSeqStore = NULL; +                    DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); +                    return ZSTDbss_compress; +                } + +                /* Propagate the error if fallback is disabled */ +                if (!zc->appliedParams.enableMatchFinderFallback) { +                    return nbPostProcessedSeqs; +                } + +                /* Fallback to software matchfinder */ +                {   ZSTD_BlockCompressor_f const blockCompressor = +                        ZSTD_selectBlockCompressor( +                            zc->appliedParams.cParams.strategy, +                            zc->appliedParams.useRowMatchFinder, +                            dictMode); +                    ms->ldmSeqStore = NULL; +                    DEBUGLOG( +                        5, +                        "External sequence producer returned error code %lu. Falling back to internal parser.", +                        (unsigned long)nbExternalSeqs +                    ); +                    lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); +            }   } +        } else {   /* not long range mode and no external matchfinder */ +            ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor( +                    zc->appliedParams.cParams.strategy, +                    zc->appliedParams.useRowMatchFinder, +                    dictMode);              ms->ldmSeqStore = NULL;              lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);          }          {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;              ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);      }   } +    ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams);      return ZSTDbss_compress;  } -static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) +static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])  { -    const seqStore_t* seqStore = ZSTD_getSeqStore(zc); -    const seqDef* seqStoreSeqs = seqStore->sequencesStart; -    size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs; -    size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart); -    size_t literalsRead = 0; -    size_t lastLLSize; +    const SeqDef* inSeqs = seqStore->sequencesStart; +    const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs); +    const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); -    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; +    ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; +    const size_t nbOutSequences = nbInSequences + 1; +    size_t nbOutLiterals = 0; +    Repcodes_t repcodes;      size_t i; -    repcodes_t updatedRepcodes; - -    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); -    /* Ensure we have enough space for last literals "sequence" */ -    assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); -    ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); -    for (i = 0; i < seqStoreSeqSize; ++i) { -        U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM; -        outSeqs[i].litLength = seqStoreSeqs[i].litLength; -        outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH; + +    /* Bounds check that we have enough space for every input sequence +     * and the block delimiter +     */ +    assert(seqCollector->seqIndex <= seqCollector->maxSequences); +    RETURN_ERROR_IF( +        nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex), +        dstSize_tooSmall, +        "Not enough space to copy sequences"); + +    ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes)); +    for (i = 0; i < nbInSequences; ++i) { +        U32 rawOffset; +        outSeqs[i].litLength = inSeqs[i].litLength; +        outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH;          outSeqs[i].rep = 0; +        /* Handle the possible single length >= 64K +         * There can only be one because we add MINMATCH to every match length, +         * and blocks are at most 128K. +         */          if (i == seqStore->longLengthPos) {              if (seqStore->longLengthType == ZSTD_llt_literalLength) {                  outSeqs[i].litLength += 0x10000; @@ -2832,46 +3380,75 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)              }          } -        if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) { -            /* Derive the correct offset corresponding to a repcode */ -            outSeqs[i].rep = seqStoreSeqs[i].offBase; +        /* Determine the raw offset given the offBase, which may be a repcode. */ +        if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) { +            const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase); +            assert(repcode > 0); +            outSeqs[i].rep = repcode;              if (outSeqs[i].litLength != 0) { -                rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1]; +                rawOffset = repcodes.rep[repcode - 1];              } else { -                if (outSeqs[i].rep == 3) { -                    rawOffset = updatedRepcodes.rep[0] - 1; +                if (repcode == 3) { +                    assert(repcodes.rep[0] > 1); +                    rawOffset = repcodes.rep[0] - 1;                  } else { -                    rawOffset = updatedRepcodes.rep[outSeqs[i].rep]; +                    rawOffset = repcodes.rep[repcode];                  }              } +        } else { +            rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase);          }          outSeqs[i].offset = rawOffset; -        /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode -           so we provide seqStoreSeqs[i].offset - 1 */ -        ZSTD_updateRep(updatedRepcodes.rep, -                       seqStoreSeqs[i].offBase - 1, -                       seqStoreSeqs[i].litLength == 0); -        literalsRead += outSeqs[i].litLength; + +        /* Update repcode history for the sequence */ +        ZSTD_updateRep(repcodes.rep, +                       inSeqs[i].offBase, +                       inSeqs[i].litLength == 0); + +        nbOutLiterals += outSeqs[i].litLength;      }      /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.       * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker       * for the block boundary, according to the API.       */ -    assert(seqStoreLiteralsSize >= literalsRead); -    lastLLSize = seqStoreLiteralsSize - literalsRead; -    outSeqs[i].litLength = (U32)lastLLSize; -    outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0; -    seqStoreSeqSize++; -    zc->seqCollector.seqIndex += seqStoreSeqSize; +    assert(nbInLiterals >= nbOutLiterals); +    { +        const size_t lastLLSize = nbInLiterals - nbOutLiterals; +        outSeqs[nbInSequences].litLength = (U32)lastLLSize; +        outSeqs[nbInSequences].matchLength = 0; +        outSeqs[nbInSequences].offset = 0; +        assert(nbOutSequences == nbInSequences + 1); +    } +    seqCollector->seqIndex += nbOutSequences; +    assert(seqCollector->seqIndex <= seqCollector->maxSequences); + +    return 0; +} + +size_t ZSTD_sequenceBound(size_t srcSize) { +    const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1; +    const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1; +    return maxNbSeq + maxNbDelims;  }  size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,                                size_t outSeqsSize, const void* src, size_t srcSize)  {      const size_t dstCapacity = ZSTD_compressBound(srcSize); -    void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); +    void* dst; /* Make C90 happy. */      SeqCollector seqCollector; +    { +        int targetCBlockSize; +        FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), ""); +        RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0"); +    } +    { +        int nbWorkers; +        FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), ""); +        RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); +    } +    dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);      RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");      seqCollector.collectSequences = 1; @@ -2880,8 +3457,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,      seqCollector.maxSequences = outSeqsSize;      zc->seqCollector = seqCollector; -    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); -    ZSTD_customFree(dst, ZSTD_defaultCMem); +    { +        const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); +        ZSTD_customFree(dst, ZSTD_defaultCMem); +        FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed"); +    } +    assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize));      return zc->seqCollector.seqIndex;  } @@ -2910,19 +3491,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {      const size_t unrollMask = unrollSize - 1;      const size_t prefixLength = length & unrollMask;      size_t i; -    size_t u;      if (length == 1) return 1;      /* Check if prefix is RLE first before using unrolled loop */      if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {          return 0;      }      for (i = prefixLength; i != length; i += unrollSize) { +        size_t u;          for (u = 0; u < unrollSize; u += sizeof(size_t)) {              if (MEM_readST(ip + i + u) != valueST) {                  return 0; -            } -        } -    } +    }   }   }      return 1;  } @@ -2930,7 +3509,7 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {   * This is just a heuristic based on the compressibility.   * It may return both false positives and false negatives.   */ -static int ZSTD_maybeRLE(seqStore_t const* seqStore) +static int ZSTD_maybeRLE(SeqStore_t const* seqStore)  {      size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);      size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); @@ -2938,7 +3517,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore)      return nbSeqs < 4 && nbLits < 10;  } -static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) +static void +ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)  {      ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;      bs->prevCBlock = bs->nextCBlock; @@ -2946,12 +3526,14 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c  }  /* Writes the block header */ -static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) { +static void +writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) +{      U32 const cBlockHeader = cSize == 1 ?                          lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :                          lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);      MEM_writeLE24(op, cBlockHeader); -    DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); +    DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);  }  /* ZSTD_buildBlockEntropyStats_literals() : @@ -2959,13 +3541,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB   *  Stores literals block type (raw, rle, compressed, repeat) and   *  huffman description table to hufMetadata.   *  Requires ENTROPY_WORKSPACE_SIZE workspace - *  @return : size of huffman description table or error code */ -static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, -                                            const ZSTD_hufCTables_t* prevHuf, -                                                  ZSTD_hufCTables_t* nextHuf, -                                                  ZSTD_hufCTablesMetadata_t* hufMetadata, -                                                  const int literalsCompressionIsDisabled, -                                                  void* workspace, size_t wkspSize) + * @return : size of huffman description table, or an error code + */ +static size_t +ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, +                               const ZSTD_hufCTables_t* prevHuf, +                                     ZSTD_hufCTables_t* nextHuf, +                                     ZSTD_hufCTablesMetadata_t* hufMetadata, +                               const int literalsCompressionIsDisabled, +                                     void* workspace, size_t wkspSize, +                                     int hufFlags)  {      BYTE* const wkspStart = (BYTE*)workspace;      BYTE* const wkspEnd = wkspStart + wkspSize; @@ -2973,9 +3558,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi      unsigned* const countWksp = (unsigned*)workspace;      const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);      BYTE* const nodeWksp = countWkspStart + countWkspSize; -    const size_t nodeWkspSize = wkspEnd-nodeWksp; +    const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp);      unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; -    unsigned huffLog = HUF_TABLELOG_DEFAULT; +    unsigned huffLog = LitHufLog;      HUF_repeat repeat = prevHuf->repeatMode;      DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); @@ -2990,73 +3575,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi      /* small ? don't even attempt compression (speed opt) */  #ifndef COMPRESS_LITERALS_SIZE_MIN -#define COMPRESS_LITERALS_SIZE_MIN 63 +# define COMPRESS_LITERALS_SIZE_MIN 63  /* heuristic */  #endif      {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;          if (srcSize <= minLitSize) {              DEBUGLOG(5, "set_basic - too small");              hufMetadata->hType = set_basic;              return 0; -        } -    } +    }   }      /* Scan input and build symbol stats */ -    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); +    {   size_t const largest = +            HIST_count_wksp (countWksp, &maxSymbolValue, +                            (const BYTE*)src, srcSize, +                            workspace, wkspSize);          FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");          if (largest == srcSize) { +            /* only one literal symbol */              DEBUGLOG(5, "set_rle");              hufMetadata->hType = set_rle;              return 0;          }          if (largest <= (srcSize >> 7)+4) { +            /* heuristic: likely not compressible */              DEBUGLOG(5, "set_basic - no gain");              hufMetadata->hType = set_basic;              return 0; -        } -    } +    }   }      /* Validate the previous Huffman table */ -    if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { +    if (repeat == HUF_repeat_check +      && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {          repeat = HUF_repeat_none;      }      /* Build Huffman Tree */      ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); -    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); +    assert(huffLog <= LitHufLog);      {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,                                                      maxSymbolValue, huffLog,                                                      nodeWksp, nodeWkspSize);          FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");          huffLog = (U32)maxBits; -        {   /* Build and write the CTable */ -            size_t const newCSize = HUF_estimateCompressedSize( -                    (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); -            size_t const hSize = HUF_writeCTable_wksp( -                    hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), -                    (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, -                    nodeWksp, nodeWkspSize); -            /* Check against repeating the previous CTable */ -            if (repeat != HUF_repeat_none) { -                size_t const oldCSize = HUF_estimateCompressedSize( -                        (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); -                if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { -                    DEBUGLOG(5, "set_repeat - smaller"); -                    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); -                    hufMetadata->hType = set_repeat; -                    return 0; -                } -            } -            if (newCSize + hSize >= srcSize) { -                DEBUGLOG(5, "set_basic - no gains"); +    } +    {   /* Build and write the CTable */ +        size_t const newCSize = HUF_estimateCompressedSize( +                (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); +        size_t const hSize = HUF_writeCTable_wksp( +                hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), +                (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, +                nodeWksp, nodeWkspSize); +        /* Check against repeating the previous CTable */ +        if (repeat != HUF_repeat_none) { +            size_t const oldCSize = HUF_estimateCompressedSize( +                    (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); +            if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { +                DEBUGLOG(5, "set_repeat - smaller");                  ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); -                hufMetadata->hType = set_basic; +                hufMetadata->hType = set_repeat;                  return 0; -            } -            DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); -            hufMetadata->hType = set_compressed; -            nextHuf->repeatMode = HUF_repeat_check; -            return hSize; +        }   } +        if (newCSize + hSize >= srcSize) { +            DEBUGLOG(5, "set_basic - no gains"); +            ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); +            hufMetadata->hType = set_basic; +            return 0;          } +        DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); +        hufMetadata->hType = set_compressed; +        nextHuf->repeatMode = HUF_repeat_check; +        return hSize;      }  } @@ -3066,8 +3655,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi   * and updates nextEntropy to the appropriate repeatMode.   */  static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { -    ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0}; +ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) +{ +    ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0};      nextEntropy->litlength_repeatMode = FSE_repeat_none;      nextEntropy->offcode_repeatMode = FSE_repeat_none;      nextEntropy->matchlength_repeatMode = FSE_repeat_none; @@ -3078,16 +3668,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {   *  Builds entropy for the sequences.   *  Stores symbol compression modes and fse table to fseMetadata.   *  Requires ENTROPY_WORKSPACE_SIZE wksp. - *  @return : size of fse tables or error code */ -static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, -                                              const ZSTD_fseCTables_t* prevEntropy, -                                                    ZSTD_fseCTables_t* nextEntropy, -                                              const ZSTD_CCtx_params* cctxParams, -                                                    ZSTD_fseCTablesMetadata_t* fseMetadata, -                                                    void* workspace, size_t wkspSize) + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( +                const SeqStore_t* seqStorePtr, +                const ZSTD_fseCTables_t* prevEntropy, +                      ZSTD_fseCTables_t* nextEntropy, +                const ZSTD_CCtx_params* cctxParams, +                      ZSTD_fseCTablesMetadata_t* fseMetadata, +                      void* workspace, size_t wkspSize)  {      ZSTD_strategy const strategy = cctxParams->cParams.strategy; -    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; +    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);      BYTE* const ostart = fseMetadata->fseTablesBuffer;      BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);      BYTE* op = ostart; @@ -3103,9 +3695,9 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,                                            entropyWorkspace, entropyWorkspaceSize)                         : ZSTD_buildDummySequencesStatistics(nextEntropy);      FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); -    fseMetadata->llType = (symbolEncodingType_e) stats.LLtype; -    fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype; -    fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype; +    fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype; +    fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype; +    fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype;      fseMetadata->lastCountSize = stats.lastCountSize;      return stats.size;  } @@ -3114,23 +3706,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,  /* ZSTD_buildBlockEntropyStats() :   *  Builds entropy for the block.   *  Requires workspace size ENTROPY_WORKSPACE_SIZE - * - *  @return : 0 on success or error code + * @return : 0 on success, or an error code + *  Note : also employed in superblock   */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, -                             const ZSTD_entropyCTables_t* prevEntropy, -                                   ZSTD_entropyCTables_t* nextEntropy, -                             const ZSTD_CCtx_params* cctxParams, -                                   ZSTD_entropyCTablesMetadata_t* entropyMetadata, -                                   void* workspace, size_t wkspSize) -{ -    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; +size_t ZSTD_buildBlockEntropyStats( +            const SeqStore_t* seqStorePtr, +            const ZSTD_entropyCTables_t* prevEntropy, +                  ZSTD_entropyCTables_t* nextEntropy, +            const ZSTD_CCtx_params* cctxParams, +                  ZSTD_entropyCTablesMetadata_t* entropyMetadata, +                  void* workspace, size_t wkspSize) +{ +    size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); +    int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); +    int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; +      entropyMetadata->hufMetadata.hufDesSize =          ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,                                              &prevEntropy->huf, &nextEntropy->huf,                                              &entropyMetadata->hufMetadata,                                              ZSTD_literalsCompressionIsDisabled(cctxParams), -                                            workspace, wkspSize); +                                            workspace, wkspSize, hufFlags); +      FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");      entropyMetadata->fseMetadata.fseTablesSize =          ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, @@ -3143,11 +3740,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,  }  /* Returns the size estimate for the literals section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, -                                                const ZSTD_hufCTables_t* huf, -                                                const ZSTD_hufCTablesMetadata_t* hufMetadata, -                                                void* workspace, size_t wkspSize, -                                                int writeEntropy) +static size_t +ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, +                               const ZSTD_hufCTables_t* huf, +                               const ZSTD_hufCTablesMetadata_t* hufMetadata, +                               void* workspace, size_t wkspSize, +                               int writeEntropy)  {      unsigned* const countWksp = (unsigned*)workspace;      unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; @@ -3169,12 +3767,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz  }  /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ -static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, -                        const BYTE* codeTable, size_t nbSeq, unsigned maxCode, -                        const FSE_CTable* fseCTable, -                        const U8* additionalBits, -                        short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, -                        void* workspace, size_t wkspSize) +static size_t +ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, +                    const BYTE* codeTable, size_t nbSeq, unsigned maxCode, +                    const FSE_CTable* fseCTable, +                    const U8* additionalBits, +                    short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, +                    void* workspace, size_t wkspSize)  {      unsigned* const countWksp = (unsigned*)workspace;      const BYTE* ctp = codeTable; @@ -3206,116 +3805,121 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,  }  /* Returns the size estimate for the sequences section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, -                                                  const BYTE* llCodeTable, -                                                  const BYTE* mlCodeTable, -                                                  size_t nbSeq, -                                                  const ZSTD_fseCTables_t* fseTables, -                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata, -                                                  void* workspace, size_t wkspSize, -                                                  int writeEntropy) +static size_t +ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, +                                 const BYTE* llCodeTable, +                                 const BYTE* mlCodeTable, +                                 size_t nbSeq, +                                 const ZSTD_fseCTables_t* fseTables, +                                 const ZSTD_fseCTablesMetadata_t* fseMetadata, +                                 void* workspace, size_t wkspSize, +                                 int writeEntropy)  {      size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);      size_t cSeqSizeEstimate = 0;      cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, -                                         fseTables->offcodeCTable, NULL, -                                         OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, -                                         workspace, wkspSize); +                                    fseTables->offcodeCTable, NULL, +                                    OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, +                                    workspace, wkspSize);      cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, -                                         fseTables->litlengthCTable, LL_bits, -                                         LL_defaultNorm, LL_defaultNormLog, MaxLL, -                                         workspace, wkspSize); +                                    fseTables->litlengthCTable, LL_bits, +                                    LL_defaultNorm, LL_defaultNormLog, MaxLL, +                                    workspace, wkspSize);      cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, -                                         fseTables->matchlengthCTable, ML_bits, -                                         ML_defaultNorm, ML_defaultNormLog, MaxML, -                                         workspace, wkspSize); +                                    fseTables->matchlengthCTable, ML_bits, +                                    ML_defaultNorm, ML_defaultNormLog, MaxML, +                                    workspace, wkspSize);      if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;      return cSeqSizeEstimate + sequencesSectionHeaderSize;  }  /* Returns the size estimate for a given stream of literals, of, ll, ml */ -static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, -                                     const BYTE* ofCodeTable, -                                     const BYTE* llCodeTable, -                                     const BYTE* mlCodeTable, -                                     size_t nbSeq, -                                     const ZSTD_entropyCTables_t* entropy, -                                     const ZSTD_entropyCTablesMetadata_t* entropyMetadata, -                                     void* workspace, size_t wkspSize, -                                     int writeLitEntropy, int writeSeqEntropy) { +static size_t +ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, +                       const BYTE* ofCodeTable, +                       const BYTE* llCodeTable, +                       const BYTE* mlCodeTable, +                       size_t nbSeq, +                       const ZSTD_entropyCTables_t* entropy, +                       const ZSTD_entropyCTablesMetadata_t* entropyMetadata, +                       void* workspace, size_t wkspSize, +                       int writeLitEntropy, int writeSeqEntropy) +{      size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, -                                                         &entropy->huf, &entropyMetadata->hufMetadata, -                                                         workspace, wkspSize, writeLitEntropy); +                                    &entropy->huf, &entropyMetadata->hufMetadata, +                                    workspace, wkspSize, writeLitEntropy);      size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, -                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, -                                                         workspace, wkspSize, writeSeqEntropy); +                                    nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, +                                    workspace, wkspSize, writeSeqEntropy);      return seqSize + literalsSize + ZSTD_blockHeaderSize;  }  /* Builds entropy statistics and uses them for blocksize estimation.   * - * Returns the estimated compressed size of the seqStore, or a zstd error. + * @return: estimated compressed size of the seqStore, or a zstd error.   */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { -    ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; +static size_t +ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc) +{ +    ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;      DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");      FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,                      &zc->blockState.prevCBlock->entropy,                      &zc->blockState.nextCBlock->entropy,                      &zc->appliedParams,                      entropyMetadata, -                    zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); -    return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), +                    zc->tmpWorkspace, zc->tmpWkspSize), ""); +    return ZSTD_estimateBlockSize( +                    seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),                      seqStore->ofCode, seqStore->llCode, seqStore->mlCode,                      (size_t)(seqStore->sequences - seqStore->sequencesStart), -                    &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, +                    &zc->blockState.nextCBlock->entropy, +                    entropyMetadata, +                    zc->tmpWorkspace, zc->tmpWkspSize,                      (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);  }  /* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore) +{      size_t literalsBytes = 0; -    size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);      size_t i;      for (i = 0; i < nbSeqs; ++i) { -        seqDef seq = seqStore->sequencesStart[i]; +        SeqDef const seq = seqStore->sequencesStart[i];          literalsBytes += seq.litLength;          if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {              literalsBytes += 0x10000; -        } -    } +    }   }      return literalsBytes;  }  /* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore) +{      size_t matchBytes = 0; -    size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);      size_t i;      for (i = 0; i < nbSeqs; ++i) { -        seqDef seq = seqStore->sequencesStart[i]; +        SeqDef seq = seqStore->sequencesStart[i];          matchBytes += seq.mlBase + MINMATCH;          if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {              matchBytes += 0x10000; -        } -    } +    }   }      return matchBytes;  }  /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).   * Stores the result in resultSeqStore.   */ -static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, -                               const seqStore_t* originalSeqStore, -                                     size_t startIdx, size_t endIdx) { -    BYTE* const litEnd = originalSeqStore->lit; -    size_t literalsBytes; -    size_t literalsBytesPreceding = 0; - +static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, +                               const SeqStore_t* originalSeqStore, +                                     size_t startIdx, size_t endIdx) +{      *resultSeqStore = *originalSeqStore;      if (startIdx > 0) {          resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; -        literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); +        resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);      }      /* Move longLengthPos into the correct position if necessary */ @@ -3328,13 +3932,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,      }      resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;      resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; -    literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); -    resultSeqStore->litStart += literalsBytesPreceding;      if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {          /* This accounts for possible last literals if the derived chunk reaches the end of the block */ -        resultSeqStore->lit = litEnd; +        assert(resultSeqStore->lit == originalSeqStore->lit);      } else { -        resultSeqStore->lit = resultSeqStore->litStart+literalsBytes; +        size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); +        resultSeqStore->lit = resultSeqStore->litStart + literalsBytes;      }      resultSeqStore->llCode += startIdx;      resultSeqStore->mlCode += startIdx; @@ -3342,20 +3945,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,  }  /* - * Returns the raw offset represented by the combination of offCode, ll0, and repcode history. - * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq(). + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().   */  static U32 -ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) -{ -    U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0;  /* [ 0 - 3 ] */ -    assert(STORED_IS_REPCODE(offCode)); -    if (adjustedOffCode == ZSTD_REP_NUM) { -        /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */ -        assert(rep[0] > 0); +ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) +{ +    U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;  /* [ 0 - 3 ] */ +    assert(OFFBASE_IS_REPCODE(offBase)); +    if (adjustedRepCode == ZSTD_REP_NUM) { +        assert(ll0); +        /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 +         * This is only valid if it results in a valid offset value, aka > 0. +         * Note : it may happen that `rep[0]==1` in exceptional circumstances. +         * In which case this function will return 0, which is an invalid offset. +         * It's not an issue though, since this value will be +         * compared and discarded within ZSTD_seqStore_resolveOffCodes(). +         */          return rep[0] - 1;      } -    return rep[adjustedOffCode]; +    return rep[adjustedRepCode];  }  /* @@ -3371,30 +3980,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c   *        1-3 : repcode 1-3   *        4+ : real_offset+3   */ -static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, -                                          seqStore_t* const seqStore, U32 const nbSeq) { +static void +ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes, +                        const SeqStore_t* const seqStore, U32 const nbSeq) +{      U32 idx = 0; +    U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq;      for (; idx < nbSeq; ++idx) { -        seqDef* const seq = seqStore->sequencesStart + idx; -        U32 const ll0 = (seq->litLength == 0); -        U32 const offCode = OFFBASE_TO_STORED(seq->offBase); -        assert(seq->offBase > 0); -        if (STORED_IS_REPCODE(offCode)) { -            U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); -            U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); +        SeqDef* const seq = seqStore->sequencesStart + idx; +        U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); +        U32 const offBase = seq->offBase; +        assert(offBase > 0); +        if (OFFBASE_IS_REPCODE(offBase)) { +            U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); +            U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0);              /* Adjust simulated decompression repcode history if we come across a mismatch. Replace               * the repcode with the offset it actually references, determined by the compression               * repcode history.               */              if (dRawOffset != cRawOffset) { -                seq->offBase = cRawOffset + ZSTD_REP_NUM; +                seq->offBase = OFFSET_TO_OFFBASE(cRawOffset);              }          }          /* Compression repcode history is always updated with values directly from the unmodified seqStore.           * Decompression repcode history may use modified seq->offset value taken from compression repcode history.           */ -        ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0); -        ZSTD_updateRep(cRepcodes->rep, offCode, ll0); +        ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); +        ZSTD_updateRep(cRepcodes->rep, offBase, ll0);      }  } @@ -3404,10 +4016,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_   * Returns the total size of that block (including header) or a ZSTD error code.   */  static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, -                                  repcodes_t* const dRep, repcodes_t* const cRep, +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, +                            const SeqStore_t* const seqStore, +                                  Repcodes_t* const dRep, Repcodes_t* const cRep,                                    void* dst, size_t dstCapacity, -                                  const void* src, size_t srcSize, +                            const void* src, size_t srcSize,                                    U32 lastBlock, U32 isPartition)  {      const U32 rleMaxLength = 25; @@ -3417,7 +4030,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,      size_t cSeqsSize;      /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ -    repcodes_t const dRepOriginal = *dRep; +    Repcodes_t const dRepOriginal = *dRep;      DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");      if (isPartition)          ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); @@ -3428,7 +4041,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,                  &zc->appliedParams,                  op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,                  srcSize, -                zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, +                zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,                  zc->bmi2);      FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); @@ -3442,8 +4055,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,          cSeqsSize = 1;      } +    /* Sequence collection not supported when block splitting */      if (zc->seqCollector.collectSequences) { -        ZSTD_copyBlockSequences(zc); +        FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed");          ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);          return 0;      } @@ -3451,18 +4065,18 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,      if (cSeqsSize == 0) {          cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);          FORWARD_IF_ERROR(cSize, "Nocompress block failed"); -        DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize); +        DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize);          *dRep = dRepOriginal; /* reset simulated decompression repcode history */      } else if (cSeqsSize == 1) {          cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);          FORWARD_IF_ERROR(cSize, "RLE compress block failed"); -        DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize); +        DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize);          *dRep = dRepOriginal; /* reset simulated decompression repcode history */      } else {          ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);          writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);          cSize = ZSTD_blockHeaderSize + cSeqsSize; -        DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); +        DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize);      }      if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) @@ -3481,45 +4095,49 @@ typedef struct {  /* Helper function to perform the recursive search for block splits.   * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then - * we do not recurse. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse.   * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING.   * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).   * In practice, recursion depth usually doesn't go beyond 4.   * - * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize   * maximum of 128 KB, this value is actually impossible to reach.   */  static void  ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, -                             ZSTD_CCtx* zc, const seqStore_t* origSeqStore) +                             ZSTD_CCtx* zc, const SeqStore_t* origSeqStore)  { -    seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; -    seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; -    seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; +    SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; +    SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; +    SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;      size_t estimatedOriginalSize;      size_t estimatedFirstHalfSize;      size_t estimatedSecondHalfSize;      size_t midIdx = (startIdx + endIdx)/2; +    DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); +    assert(endIdx >= startIdx);      if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { -        DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); +        DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx);          return;      } -    DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);      ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);      ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);      ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);      estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);      estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);      estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); -    DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", +    DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",               estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);      if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {          return;      }      if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { +        DEBUGLOG(5, "split decided at seqNb:%zu", midIdx);          ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);          splits->splitLocations[splits->idx] = (U32)midIdx;          splits->idx++; @@ -3527,14 +4145,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end      }  } -/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio. +/* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio.   * - * Returns the number of splits made (which equals the size of the partition table - 1). + * @return: number of splits made (which equals the size of the partition table - 1).   */ -static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) { -    seqStoreSplits splits = {partitions, 0}; +static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) +{ +    seqStoreSplits splits; +    splits.splitLocations = partitions; +    splits.idx = 0;      if (nbSeq <= 4) { -        DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split"); +        DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq);          /* Refuse to try and split anything with less than 4 sequences */          return 0;      } @@ -3550,18 +4172,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)   * Returns combined size of all blocks (which includes headers), or a ZSTD error code.   */  static size_t -ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, -                                       const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, +                                    void* dst, size_t dstCapacity, +                              const void* src, size_t blockSize, +                                    U32 lastBlock, U32 nbSeq)  {      size_t cSize = 0;      const BYTE* ip = (const BYTE*)src;      BYTE* op = (BYTE*)dst;      size_t i = 0;      size_t srcBytesTotal = 0; -    U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ -    seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; -    seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; -    size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); +    U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ +    SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; +    SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; +    size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);      /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history       * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3577,36 +4201,37 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac       *       * See ZSTD_seqStore_resolveOffCodes() for more details.       */ -    repcodes_t dRep; -    repcodes_t cRep; -    ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); -    ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); -    ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); +    Repcodes_t dRep; +    Repcodes_t cRep; +    ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); +    ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); +    ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t)); -    DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", +    DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",                  (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,                  (unsigned)zc->blockState.matchState.nextToUpdate);      if (numSplits == 0) { -        size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, -                                                                   &dRep, &cRep, -                                                                    op, dstCapacity, -                                                                    ip, blockSize, -                                                                    lastBlock, 0 /* isPartition */); +        size_t cSizeSingleBlock = +            ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, +                                            &dRep, &cRep, +                                            op, dstCapacity, +                                            ip, blockSize, +                                            lastBlock, 0 /* isPartition */);          FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");          DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); -        assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); +        assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX); +        assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize);          return cSizeSingleBlock;      }      ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);      for (i = 0; i <= numSplits; ++i) { -        size_t srcBytes;          size_t cSizeChunk;          U32 const lastPartition = (i == numSplits);          U32 lastBlockEntireSrc = 0; -        srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); +        size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);          srcBytesTotal += srcBytes;          if (lastPartition) {              /* This is the final partition, need to account for possible last literals */ @@ -3621,7 +4246,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac                                                         op, dstCapacity,                                                         ip, srcBytes,                                                         lastBlockEntireSrc, 1 /* isPartition */); -        DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); +        DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", +                    ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);          FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");          ip += srcBytes; @@ -3629,12 +4255,12 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac          dstCapacity -= cSizeChunk;          cSize += cSizeChunk;          *currSeqStore = *nextSeqStore; -        assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); +        assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize);      } -    /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes -     * for the next block. +    /* cRep and dRep may have diverged during the compression. +     * If so, we use the dRep repcodes for the next block.       */ -    ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); +    ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t));      return cSize;  } @@ -3643,21 +4269,20 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,                                void* dst, size_t dstCapacity,                                const void* src, size_t srcSize, U32 lastBlock)  { -    const BYTE* ip = (const BYTE*)src; -    BYTE* op = (BYTE*)dst;      U32 nbSeq;      size_t cSize; -    DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); -    assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); +    DEBUGLOG(5, "ZSTD_compressBlock_splitBlock"); +    assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable);      {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);          FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");          if (bss == ZSTDbss_noCompress) {              if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)                  zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; -            cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); +            RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); +            cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);              FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); -            DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); +            DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block");              return cSize;          }          nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); @@ -3673,9 +4298,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,                              void* dst, size_t dstCapacity,                              const void* src, size_t srcSize, U32 frame)  { -    /* This the upper bound for the length of an rle block. -     * This isn't the actual upper bound. Finding the real threshold -     * needs further investigation. +    /* This is an estimated upper bound for the length of an rle block. +     * This isn't the actual upper bound. +     * Finding the real threshold needs further investigation.       */      const U32 rleMaxLength = 25;      size_t cSize; @@ -3687,11 +4312,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,      {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);          FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); -        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } +        if (bss == ZSTDbss_noCompress) { +            RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); +            cSize = 0; +            goto out; +        }      }      if (zc->seqCollector.collectSequences) { -        ZSTD_copyBlockSequences(zc); +        FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed");          ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);          return 0;      } @@ -3702,7 +4331,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,              &zc->appliedParams,              dst, dstCapacity,              srcSize, -            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, +            zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,              zc->bmi2);      if (frame && @@ -3767,10 +4396,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,           *   * cSize >= blockBound(srcSize): We have expanded the block too much so           *     emit an uncompressed block.           */ -        { -            size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); +        {   size_t const cSize = +                ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);              if (cSize != ERROR(dstSize_tooSmall)) { -                size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); +                size_t const maxCSize = +                    srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);                  FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");                  if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {                      ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); @@ -3778,7 +4408,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,                  }              }          } -    } +    } /* if (bss == ZSTDbss_compress)*/      DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");      /* Superblock compression failed, attempt to emit a single no compress block. @@ -3807,7 +4437,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,      return cSize;  } -static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, +static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms,                                           ZSTD_cwksp* ws,                                           ZSTD_CCtx_params const* params,                                           void const* ip, @@ -3831,39 +4461,82 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,      }  } +#include "zstd_preSplit.h" + +static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings) +{ +    /* split level based on compression strategy, from `fast` to `btultra2` */ +    static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; +    /* note: conservatively only split full blocks (128 KB) currently. +     * While it's possible to go lower, let's keep it simple for a first implementation. +     * Besides, benefits of splitting are reduced when blocks are already small. +     */ +    if (srcSize < 128 KB || blockSizeMax < 128 KB) +        return MIN(srcSize, blockSizeMax); +    /* do not split incompressible data though: +     * require verified savings to allow pre-splitting. +     * Note: as a consequence, the first full block is not split. +     */ +    if (savings < 3) { +        DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings); +        return 128 KB; +    } +    /* apply @splitLevel, or use default value (which depends on @strat). +     * note that splitting heuristic is still conditioned by @savings >= 3, +     * so the first block will not reach this code path */ +    if (splitLevel == 1) return 128 KB; +    if (splitLevel == 0) { +        assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2); +        splitLevel = splitLevels[strat]; +    } else { +        assert(2 <= splitLevel && splitLevel <= 6); +        splitLevel -= 2; +    } +    return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); +} +  /*! ZSTD_compress_frameChunk() :  *   Compress a chunk of data into one or multiple blocks.  *   All blocks will be terminated, all input will be consumed.  *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.  *   Frame is supposed already started (header already produced) -*   @return : compressed size, or an error code +*  @return : compressed size, or an error code  */  static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,                                       void* dst, size_t dstCapacity,                                 const void* src, size_t srcSize,                                       U32 lastFrameChunk)  { -    size_t blockSize = cctx->blockSize; +    size_t blockSizeMax = cctx->blockSizeMax;      size_t remaining = srcSize;      const BYTE* ip = (const BYTE*)src;      BYTE* const ostart = (BYTE*)dst;      BYTE* op = ostart;      U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; +    S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize;      assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); -    DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); +    DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax);      if (cctx->appliedParams.fParams.checksumFlag && srcSize)          xxh64_update(&cctx->xxhState, src, srcSize);      while (remaining) { -        ZSTD_matchState_t* const ms = &cctx->blockState.matchState; -        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); - -        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, +        ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; +        size_t const blockSize = ZSTD_optimalBlockSize(cctx, +                                ip, remaining, +                                blockSizeMax, +                                cctx->appliedParams.preBlockSplitter_level, +                                cctx->appliedParams.cParams.strategy, +                                savings); +        U32 const lastBlock = lastFrameChunk & (blockSize == remaining); +        assert(blockSize <= remaining); + +        /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding +         * additional 1. We need to revisit and change this logic to be more consistent */ +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,                          dstSize_tooSmall,                          "not enough space to store compressed block"); -        if (remaining < blockSize) blockSize = remaining;          ZSTD_overflowCorrectIfNeeded(              ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); @@ -3899,8 +4572,23 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,                      MEM_writeLE24(op, cBlockHeader);                      cSize += ZSTD_blockHeaderSize;                  } -            } - +            }  /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ + +            /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data. +             * Without splitting, the maximum expansion is 3 bytes per full block. +             * An adversarial input could attempt to fudge the split detector, +             * and make it split incompressible data, resulting in more block headers. +             * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block, +             * and the splitter never creates blocks that small (current lower limit is 8 KB), +             * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit. +             * But if the goal is to not expand by more than 3-bytes per 128 KB full block, +             * then yes, it becomes possible to make the block splitter oversplit incompressible data. +             * Using @savings, we enforce an even more conservative condition, +             * requiring the presence of enough savings (at least 3 bytes) to authorize splitting, +             * otherwise only full blocks are used. +             * But being conservative is fine, +             * since splitting barely compressible blocks is not fruitful anyway */ +            savings += (S64)blockSize - (S64)cSize;              ip += blockSize;              assert(remaining >= blockSize); @@ -3919,8 +4607,10 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,  static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, -                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) -{   BYTE* const op = (BYTE*)dst; +                                    const ZSTD_CCtx_params* params, +                                    U64 pledgedSrcSize, U32 dictID) +{ +    BYTE* const op = (BYTE*)dst;      U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */      U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */      U32   const checksumFlag = params->fParams.checksumFlag>0; @@ -4001,19 +4691,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)      }  } -size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) +void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)  { -    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong, -                    "wrong cctx stage"); -    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable, -                    parameter_unsupported, -                    "incompatible with ldm"); +    assert(cctx->stage == ZSTDcs_init); +    assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable);      cctx->externSeqStore.seq = seq;      cctx->externSeqStore.size = nbSeq;      cctx->externSeqStore.capacity = nbSeq;      cctx->externSeqStore.pos = 0;      cctx->externSeqStore.posInSequence = 0; -    return 0;  } @@ -4022,7 +4708,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,                          const void* src, size_t srcSize,                                 U32 frame, U32 lastFrameChunk)  { -    ZSTD_matchState_t* const ms = &cctx->blockState.matchState; +    ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;      size_t fhSize = 0;      DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", @@ -4057,7 +4743,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,              src, (BYTE const*)src + srcSize);      } -    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); +    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax);      {   size_t const cSize = frame ?                               ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :                               ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); @@ -4078,58 +4764,90 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,      }  } -size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, -                              void* dst, size_t dstCapacity, -                        const void* src, size_t srcSize) +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, +                                        void* dst, size_t dstCapacity, +                                  const void* src, size_t srcSize)  {      DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);      return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);  } +/* NOTE: Must just wrap ZSTD_compressContinue_public() */ +size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, +                             void* dst, size_t dstCapacity, +                       const void* src, size_t srcSize) +{ +    return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); +} -size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx)  {      ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;      assert(!ZSTD_checkCParams(cParams)); -    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); +    return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog);  } -size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ +    return ZSTD_getBlockSize_deprecated(cctx); +} + +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)  {      DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); -    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); +    { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx);        RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }      return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);  } +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ +    return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); +} +  /*! ZSTD_loadDictionaryContent() :   *  @return : 0, or an error code   */ -static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, -                                         ldmState_t* ls, -                                         ZSTD_cwksp* ws, -                                         ZSTD_CCtx_params const* params, -                                         const void* src, size_t srcSize, -                                         ZSTD_dictTableLoadMethod_e dtlm) +static size_t +ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, +                        ldmState_t* ls, +                        ZSTD_cwksp* ws, +                        ZSTD_CCtx_params const* params, +                        const void* src, size_t srcSize, +                        ZSTD_dictTableLoadMethod_e dtlm, +                        ZSTD_tableFillPurpose_e tfp)  {      const BYTE* ip = (const BYTE*) src;      const BYTE* const iend = ip + srcSize;      int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; -    /* Assert that we the ms params match the params we're being given */ +    /* Assert that the ms params match the params we're being given */      ZSTD_assertEqualCParams(params->cParams, ms->cParams); -    if (srcSize > ZSTD_CHUNKSIZE_MAX) { +    {   /* Ensure large dictionaries can't cause index overflow */ +          /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.           * Dictionaries right at the edge will immediately trigger overflow           * correction, but I don't want to insert extra constraints here.           */ -        U32 const maxDictSize = ZSTD_CURRENT_MAX - 1; -        /* We must have cleared our windows when our source is this large. */ -        assert(ZSTD_window_isEmpty(ms->window)); -        if (loadLdmDict) -            assert(ZSTD_window_isEmpty(ls->window)); +        U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; + +        int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); +        if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { +            /* Some dictionary matchfinders in zstd use "short cache", +             * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each +             * CDict hashtable entry as a tag rather than as part of an index. +             * When short cache is used, we need to truncate the dictionary +             * so that its indices don't overlap with the tag. */ +            U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; +            maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); +            assert(!loadLdmDict); +        } +          /* If the dictionary is too large, only load the suffix of the dictionary. */          if (srcSize > maxDictSize) {              ip = iend - maxDictSize; @@ -4138,35 +4856,59 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,          }      } -    DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); +    if (srcSize > ZSTD_CHUNKSIZE_MAX) { +        /* We must have cleared our windows when our source is this large. */ +        assert(ZSTD_window_isEmpty(ms->window)); +        if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); +    }      ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); -    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); -    ms->forceNonContiguous = params->deterministicRefPrefix; -    if (loadLdmDict) { +    DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder); + +    if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ +        DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict");          ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);          ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); +        ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); +        DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes"); +    } + +    /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ +    {   U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31); +        if (srcSize > maxDictSize) { +            ip = iend - maxDictSize; +            src = ip; +            srcSize = maxDictSize; +        }      } +    ms->nextToUpdate = (U32)(ip - ms->window.base); +    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); +    ms->forceNonContiguous = params->deterministicRefPrefix; +      if (srcSize <= HASH_READ_SIZE) return 0;      ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); -    if (loadLdmDict) -        ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); -      switch(params->cParams.strategy)      {      case ZSTD_fast: -        ZSTD_fillHashTable(ms, iend, dtlm); +        ZSTD_fillHashTable(ms, iend, dtlm, tfp);          break;      case ZSTD_dfast: -        ZSTD_fillDoubleHashTable(ms, iend, dtlm); +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR +        ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); +#else +        assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif          break;      case ZSTD_greedy:      case ZSTD_lazy:      case ZSTD_lazy2: +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR)          assert(srcSize >= HASH_READ_SIZE);          if (ms->dedicatedDictSearch) {              assert(ms->chainTable != NULL); @@ -4174,7 +4916,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,          } else {              assert(params->useRowMatchFinder != ZSTD_ps_auto);              if (params->useRowMatchFinder == ZSTD_ps_enable) { -                size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); +                size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog);                  ZSTD_memset(ms->tagTable, 0, tagTableSize);                  ZSTD_row_update(ms, iend-HASH_READ_SIZE);                  DEBUGLOG(4, "Using row-based hash table for lazy dict"); @@ -4183,14 +4925,24 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,                  DEBUGLOG(4, "Using chain-based hash table for lazy dict");              }          } +#else +        assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif          break;      case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */      case ZSTD_btopt:      case ZSTD_btultra:      case ZSTD_btultra2: +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)          assert(srcSize >= HASH_READ_SIZE); +        DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize);          ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); +#else +        assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif          break;      default: @@ -4233,20 +4985,19 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,      {   unsigned maxSymbolValue = 255;          unsigned hasZeroWeights = 1;          size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, -            dictEnd-dictPtr, &hasZeroWeights); +            (size_t)(dictEnd-dictPtr), &hasZeroWeights);          /* We only set the loaded table as valid if it contains all non-zero           * weights. Otherwise, we set it to check */ -        if (!hasZeroWeights) +        if (!hasZeroWeights && maxSymbolValue == 255)              bs->entropy.huf.repeatMode = HUF_repeat_valid;          RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); -        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");          dictPtr += hufHeaderSize;      }      {   unsigned offcodeLog; -        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));          RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");          RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");          /* fill all offset symbols to avoid garbage at end of table */ @@ -4261,7 +5012,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,      {   short matchlengthNCount[MaxML+1];          unsigned matchlengthMaxValue = MaxML, matchlengthLog; -        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));          RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");          RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");          RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( @@ -4275,7 +5026,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,      {   short litlengthNCount[MaxLL+1];          unsigned litlengthMaxValue = MaxLL, litlengthLog; -        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));          RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");          RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");          RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( @@ -4309,7 +5060,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,                  RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");      }   }   } -    return dictPtr - (const BYTE*)dict; +    return (size_t)(dictPtr - (const BYTE*)dict);  }  /* Dictionary format : @@ -4322,11 +5073,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,   *                dictSize supposed >= 8   */  static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, -                                      ZSTD_matchState_t* ms, +                                      ZSTD_MatchState_t* ms,                                        ZSTD_cwksp* ws,                                        ZSTD_CCtx_params const* params,                                        const void* dict, size_t dictSize,                                        ZSTD_dictTableLoadMethod_e dtlm, +                                      ZSTD_tableFillPurpose_e tfp,                                        void* workspace)  {      const BYTE* dictPtr = (const BYTE*)dict; @@ -4345,7 +5097,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,      {          size_t const dictContentSize = (size_t)(dictEnd - dictPtr);          FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( -            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); +            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), "");      }      return dictID;  } @@ -4354,13 +5106,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,  *   @return : dictID, or an error code */  static size_t  ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, -                               ZSTD_matchState_t* ms, +                               ZSTD_MatchState_t* ms,                                 ldmState_t* ls,                                 ZSTD_cwksp* ws,                           const ZSTD_CCtx_params* params,                           const void* dict, size_t dictSize,                                 ZSTD_dictContentType_e dictContentType,                                 ZSTD_dictTableLoadMethod_e dtlm, +                               ZSTD_tableFillPurpose_e tfp,                                 void* workspace)  {      DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); @@ -4373,13 +5126,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,      /* dict restricted modes */      if (dictContentType == ZSTD_dct_rawContent) -        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); +        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp);      if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {          if (dictContentType == ZSTD_dct_auto) {              DEBUGLOG(4, "raw content dictionary detected");              return ZSTD_loadDictionaryContent( -                ms, ls, ws, params, dict, dictSize, dtlm); +                ms, ls, ws, params, dict, dictSize, dtlm, tfp);          }          RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");          assert(0);   /* impossible */ @@ -4387,13 +5140,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,      /* dict as full zstd dictionary */      return ZSTD_loadZstdDictionary( -        bs, ms, ws, params, dict, dictSize, dtlm, workspace); +        bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace);  }  #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)  #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)  /*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both   * @return : 0, or an error code */  static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,                                      const void* dict, size_t dictSize, @@ -4426,11 +5180,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,                          cctx->blockState.prevCBlock, &cctx->blockState.matchState,                          &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,                          cdict->dictContentSize, cdict->dictContentType, dtlm, -                        cctx->entropyWorkspace) +                        ZSTD_tfp_forCCtx, cctx->tmpWorkspace)                : ZSTD_compress_insertDictionary(                          cctx->blockState.prevCBlock, &cctx->blockState.matchState,                          &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, -                        dictContentType, dtlm, cctx->entropyWorkspace); +                        dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace);          FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");          assert(dictID <= UINT_MAX);          cctx->dictID = (U32)dictID; @@ -4471,11 +5225,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,                                              &cctxParams, pledgedSrcSize);  } -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +static size_t +ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)  {      ZSTD_CCtx_params cctxParams; -    { -        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); +    {   ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);          ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);      }      DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); @@ -4483,9 +5237,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di                                         &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);  } +size_t +ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ +    return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); +} +  size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)  { -    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +    return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel);  } @@ -4496,14 +5256,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)  {      BYTE* const ostart = (BYTE*)dst;      BYTE* op = ostart; -    size_t fhSize = 0;      DEBUGLOG(4, "ZSTD_writeEpilogue");      RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");      /* special case : empty frame */      if (cctx->stage == ZSTDcs_init) { -        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); +        size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);          FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");          dstCapacity -= fhSize;          op += fhSize; @@ -4513,8 +5272,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)      if (cctx->stage != ZSTDcs_ending) {          /* write one last empty block, make it the "last" block */          U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; -        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); -        MEM_writeLE32(op, cBlockHeader24); +        ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3); +        RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue"); +        MEM_writeLE24(op, cBlockHeader24);          op += ZSTD_blockHeaderSize;          dstCapacity -= ZSTD_blockHeaderSize;      } @@ -4528,7 +5288,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)      }      cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */ -    return op-ostart; +    return (size_t)(op-ostart);  }  void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) @@ -4537,9 +5297,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)      (void)extraCSize;  } -size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, -                         void* dst, size_t dstCapacity, -                   const void* src, size_t srcSize) +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, +                               void* dst, size_t dstCapacity, +                         const void* src, size_t srcSize)  {      size_t endResult;      size_t const cSize = ZSTD_compressContinue_internal(cctx, @@ -4563,6 +5323,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,      return cSize + endResult;  } +/* NOTE: Must just wrap ZSTD_compressEnd_public() */ +size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, +                        void* dst, size_t dstCapacity, +                  const void* src, size_t srcSize) +{ +    return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} +  size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,                                 void* dst, size_t dstCapacity,                           const void* src, size_t srcSize, @@ -4591,7 +5359,7 @@ size_t ZSTD_compress_advanced_internal(      FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,                           dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,                           params, srcSize, ZSTDb_not_buffered) , ""); -    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +    return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);  }  size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, @@ -4709,7 +5477,7 @@ static size_t ZSTD_initCDict_internal(          {   size_t const dictID = ZSTD_compress_insertDictionary(                      &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,                      ¶ms, cdict->dictContent, cdict->dictContentSize, -                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); +                    dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace);              FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");              assert(dictID <= (size_t)(U32)-1);              cdict->dictID = (U32)dictID; @@ -4719,14 +5487,16 @@ static size_t ZSTD_initCDict_internal(      return 0;  } -static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, -                                      ZSTD_dictLoadMethod_e dictLoadMethod, -                                      ZSTD_compressionParameters cParams, -                                      ZSTD_paramSwitch_e useRowMatchFinder, -                                      U32 enableDedicatedDictSearch, -                                      ZSTD_customMem customMem) +static ZSTD_CDict* +ZSTD_createCDict_advanced_internal(size_t dictSize, +                                ZSTD_dictLoadMethod_e dictLoadMethod, +                                ZSTD_compressionParameters cParams, +                                ZSTD_ParamSwitch_e useRowMatchFinder, +                                int enableDedicatedDictSearch, +                                ZSTD_customMem customMem)  {      if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; +    DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize);      {   size_t const workspaceSize =              ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + @@ -4763,6 +5533,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,  {      ZSTD_CCtx_params cctxParams;      ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); +    DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);      ZSTD_CCtxParams_init(&cctxParams, 0);      cctxParams.cParams = cParams;      cctxParams.customMem = customMem; @@ -4783,7 +5554,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(      ZSTD_compressionParameters cParams;      ZSTD_CDict* cdict; -    DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); +    DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);      if (!customMem.customAlloc ^ !customMem.customFree) return NULL;      if (cctxParams.enableDedicatedDictSearch) { @@ -4802,7 +5573,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(              &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);      } -    DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch); +    DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch);      cctxParams.cParams = cParams;      cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); @@ -4810,10 +5581,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(                          dictLoadMethod, cctxParams.cParams,                          cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,                          customMem); -    if (!cdict) -        return NULL; -    if (ZSTD_isError( ZSTD_initCDict_internal(cdict, +    if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict,                                      dict, dictSize,                                      dictLoadMethod, dictContentType,                                      cctxParams) )) { @@ -4867,7 +5636,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)   *  workspaceSize: Use ZSTD_estimateCDictSize()   *                 to determine how large workspace must be.   *  cParams : use ZSTD_getCParams() to transform a compression level - *            into its relevants cParams. + *            into its relevant cParams.   * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)   *  Note : there is no corresponding "free" function.   *         Since workspace was allocated externally, it must be freed externally. @@ -4879,7 +5648,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(                                   ZSTD_dictContentType_e dictContentType,                                   ZSTD_compressionParameters cParams)  { -    ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); +    ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);      /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */      size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);      size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) @@ -4890,6 +5659,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(      ZSTD_CDict* cdict;      ZSTD_CCtx_params params; +    DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize);      if ((size_t)workspace & 7) return NULL;  /* 8-aligned */      { @@ -4900,14 +5670,13 @@ const ZSTD_CDict* ZSTD_initStaticCDict(          ZSTD_cwksp_move(&cdict->workspace, &ws);      } -    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", -        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));      if (workspaceSize < neededSize) return NULL;      ZSTD_CCtxParams_init(¶ms, 0);      params.cParams = cParams;      params.useRowMatchFinder = useRowMatchFinder;      cdict->useRowMatchFinder = useRowMatchFinder; +    cdict->compressionLevel = ZSTD_NO_CLEVEL;      if (ZSTD_isError( ZSTD_initCDict_internal(cdict,                                                dict, dictSize, @@ -4987,12 +5756,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced(  /* ZSTD_compressBegin_usingCDict() :   * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)  {      ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };      return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  } +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ +    return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); +} +  /*! ZSTD_compress_usingCDict_internal():   * Implementation of various ZSTD_compress_usingCDict* functions.   */ @@ -5002,7 +5776,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,                                  const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)  {      FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ -    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +    return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);  }  /*! ZSTD_compress_usingCDict_advanced(): @@ -5068,7 +5842,7 @@ size_t ZSTD_CStreamOutSize(void)      return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;  } -static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) +static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)  {      if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))          return ZSTD_cpm_attachDict; @@ -5199,30 +5973,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)  static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)  { -    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; -    if (hintInSize==0) hintInSize = cctx->blockSize; -    return hintInSize; +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { +        return cctx->blockSizeMax - cctx->stableIn_notConsumed; +    } +    assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); +    {   size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; +        if (hintInSize==0) hintInSize = cctx->blockSizeMax; +        return hintInSize; +    }  }  /* ZSTD_compressStream_generic():   *  internal function for all *compressStream*() variants - *  non-static, because can be called from zstdmt_compress.c - * @return : hint size for next input */ + * @return : hint size for next input to complete ongoing block */  static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,                                            ZSTD_outBuffer* output,                                            ZSTD_inBuffer* input,                                            ZSTD_EndDirective const flushMode)  { -    const char* const istart = (const char*)input->src; -    const char* const iend = input->size != 0 ? istart + input->size : istart; -    const char* ip = input->pos != 0 ? istart + input->pos : istart; -    char* const ostart = (char*)output->dst; -    char* const oend = output->size != 0 ? ostart + output->size : ostart; -    char* op = output->pos != 0 ? ostart + output->pos : ostart; +    const char* const istart = (assert(input != NULL), (const char*)input->src); +    const char* const iend = (istart != NULL) ? istart + input->size : istart; +    const char* ip = (istart != NULL) ? istart + input->pos : istart; +    char* const ostart = (assert(output != NULL), (char*)output->dst); +    char* const oend = (ostart != NULL) ? ostart + output->size : ostart; +    char* op = (ostart != NULL) ? ostart + output->pos : ostart;      U32 someMoreWork = 1;      /* check expectations */ -    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); +    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); +    assert(zcs != NULL); +    if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { +        assert(input->pos >= zcs->stableIn_notConsumed); +        input->pos -= zcs->stableIn_notConsumed; +        if (ip) ip -= zcs->stableIn_notConsumed; +        zcs->stableIn_notConsumed = 0; +    }      if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {          assert(zcs->inBuff != NULL);          assert(zcs->inBuffSize > 0); @@ -5231,8 +6016,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,          assert(zcs->outBuff !=  NULL);          assert(zcs->outBuffSize > 0);      } -    assert(output->pos <= output->size); +    if (input->src == NULL) assert(input->size == 0);      assert(input->pos <= input->size); +    if (output->dst == NULL) assert(output->size == 0); +    assert(output->pos <= output->size);      assert((U32)flushMode <= (U32)ZSTD_e_end);      while (someMoreWork) { @@ -5243,12 +6030,13 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,          case zcss_load:              if ( (flushMode == ZSTD_e_end) -              && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip)     /* Enough output space */ +              && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip))     /* Enough output space */                  || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)  /* OR we are allowed to return dstSizeTooSmall */                && (zcs->inBuffPos == 0) ) {                  /* shortcut to compression pass directly into output buffer */ -                size_t const cSize = ZSTD_compressEnd(zcs, -                                                op, oend-op, ip, iend-ip); +                size_t const cSize = ZSTD_compressEnd_public(zcs, +                                                op, (size_t)(oend-op), +                                                ip, (size_t)(iend-ip));                  DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);                  FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");                  ip = iend; @@ -5262,10 +6050,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,                  size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;                  size_t const loaded = ZSTD_limitCopy(                                          zcs->inBuff + zcs->inBuffPos, toLoad, -                                        ip, iend-ip); +                                        ip, (size_t)(iend-ip));                  zcs->inBuffPos += loaded; -                if (loaded != 0) -                    ip += loaded; +                if (ip) ip += loaded;                  if ( (flushMode == ZSTD_e_continue)                    && (zcs->inBuffPos < zcs->inBuffTarget) ) {                      /* not enough input to fill full block : stop here */ @@ -5276,16 +6063,29 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,                      /* empty */                      someMoreWork = 0; break;                  } +            } else { +                assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); +                if ( (flushMode == ZSTD_e_continue) +                  && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) { +                    /* can't compress a full block : stop here */ +                    zcs->stableIn_notConsumed = (size_t)(iend - ip); +                    ip = iend;  /* pretend to have consumed input */ +                    someMoreWork = 0; break; +                } +                if ( (flushMode == ZSTD_e_flush) +                  && (ip == iend) ) { +                    /* empty */ +                    someMoreWork = 0; break; +                }              }              /* compress current block (note : this stage cannot be stopped in the middle) */              DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);              {   int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);                  void* cDst;                  size_t cSize; -                size_t oSize = oend-op; -                size_t const iSize = inputBuffered -                    ? zcs->inBuffPos - zcs->inToCompress -                    : MIN((size_t)(iend - ip), zcs->blockSize); +                size_t oSize = (size_t)(oend-op); +                size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress +                                                   : MIN((size_t)(iend - ip), zcs->blockSizeMax);                  if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)                      cDst = op;   /* compress into output buffer, to skip flush stage */                  else @@ -5293,34 +6093,31 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,                  if (inputBuffered) {                      unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);                      cSize = lastBlock ? -                            ZSTD_compressEnd(zcs, cDst, oSize, +                            ZSTD_compressEnd_public(zcs, cDst, oSize,                                          zcs->inBuff + zcs->inToCompress, iSize) : -                            ZSTD_compressContinue(zcs, cDst, oSize, +                            ZSTD_compressContinue_public(zcs, cDst, oSize,                                          zcs->inBuff + zcs->inToCompress, iSize);                      FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");                      zcs->frameEnded = lastBlock;                      /* prepare next block */ -                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; +                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax;                      if (zcs->inBuffTarget > zcs->inBuffSize) -                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; +                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax;                      DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",                              (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);                      if (!lastBlock)                          assert(zcs->inBuffTarget <= zcs->inBuffSize);                      zcs->inToCompress = zcs->inBuffPos; -                } else { -                    unsigned const lastBlock = (ip + iSize == iend); -                    assert(flushMode == ZSTD_e_end /* Already validated */); +                } else { /* !inputBuffered, hence ZSTD_bm_stable */ +                    unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend);                      cSize = lastBlock ? -                            ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : -                            ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); +                            ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : +                            ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize);                      /* Consume the input prior to error checking to mirror buffered mode. */ -                    if (iSize > 0) -                        ip += iSize; +                    if (ip) ip += iSize;                      FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");                      zcs->frameEnded = lastBlock; -                    if (lastBlock) -                        assert(ip == iend); +                    if (lastBlock) assert(ip == iend);                  }                  if (cDst == op) {  /* no need to flush */                      op += cSize; @@ -5369,8 +6166,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,          }      } -    input->pos = ip - istart; -    output->pos = op - ostart; +    input->pos = (size_t)(ip - istart); +    output->pos = (size_t)(op - ostart);      if (zcs->frameEnded) return 0;      return ZSTD_nextInputSizeHint(zcs);  } @@ -5390,8 +6187,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf  /* After a compression call set the expected input/output buffer.   * This is validated at the start of the next compression call.   */ -static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input) +static void +ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input)  { +    DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)");      if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {          cctx->expectedInBuffer = *input;      } @@ -5410,22 +6209,27 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,  {      if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {          ZSTD_inBuffer const expect = cctx->expectedInBuffer; -        if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size) -            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!"); -        if (endOp != ZSTD_e_end) -            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!"); +        if (expect.src != input->src || expect.pos != input->pos) +            RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!");      } +    (void)endOp;      if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {          size_t const outBufferSize = output->size - output->pos;          if (cctx->expectedOutBufferSize != outBufferSize) -            RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!"); +            RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!");      }      return 0;  } +/* + * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. + * Otherwise, it's ignored. + * @return: 0 on success, or a ZSTD_error code otherwise. + */  static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,                                               ZSTD_EndDirective endOp, -                                             size_t inSize) { +                                             size_t inSize) +{      ZSTD_CCtx_params params = cctx->requestedParams;      ZSTD_prefixDict const prefixDict = cctx->prefixDict;      FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ @@ -5438,21 +6242,24 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,           */          params.compressionLevel = cctx->cdict->compressionLevel;      } -    DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); -    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-fix pledgedSrcSize */ -    { -        size_t const dictSize = prefixDict.dict +    DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage"); +    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-determine pledgedSrcSize */ + +    {   size_t const dictSize = prefixDict.dict                  ? prefixDict.dictSize                  : (cctx->cdict ? cctx->cdict->dictContentSize : 0); -        ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); +        ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);          params.cParams = ZSTD_getCParamsFromCCtxParams(                  ¶ms, cctx->pledgedSrcSizePlusOne-1,                  dictSize, mode);      } -    params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); +    params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams);      params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams);      params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); +    params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); +    params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); +    params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel);      {   U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;          assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); @@ -5468,7 +6275,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,              /* for small input: avoid automatic flush on reaching end of block, since              * it would require to add a 3-bytes null block to end frame              */ -            cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); +            cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize);          } else {              cctx->inBuffTarget = 0;          } @@ -5479,6 +6286,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,      return 0;  } +/* @return provides a minimum amount of data remaining to be flushed from internal buffers + */  size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,                               ZSTD_outBuffer* output,                               ZSTD_inBuffer* input, @@ -5493,8 +6302,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,      /* transparent initialization stage */      if (cctx->streamStage == zcss_init) { -        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed"); -        ZSTD_setBufferExpectations(cctx, output, input);    /* Set initial buffer expectations now that we've initialized */ +        size_t const inputSize = input->size - input->pos;  /* no obligation to start from pos==0 */ +        size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; +        if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ +          && (endOp == ZSTD_e_continue)                             /* no flush requested, more input to come */ +          && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) {              /* not even reached one block yet */ +            if (cctx->stableIn_notConsumed) {  /* not the first time */ +                /* check stable source guarantees */ +                RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); +                RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); +            } +            /* pretend input was consumed, to give a sense forward progress */ +            input->pos = input->size; +            /* save stable inBuffer, for later control, and flush/end */ +            cctx->expectedInBuffer = *input; +            /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ +            cctx->stableIn_notConsumed += inputSize; +            /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ +            return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format);  /* at least some header to produce */ +        } +        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); +        ZSTD_setBufferExpectations(cctx, output, input);   /* Set initial buffer expectations now that we've initialized */      }      /* end of transparent initialization stage */ @@ -5512,13 +6340,20 @@ size_t ZSTD_compressStream2_simpleArgs (                        const void* src, size_t srcSize, size_t* srcPos,                              ZSTD_EndDirective endOp)  { -    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; -    ZSTD_inBuffer  input  = { src, srcSize, *srcPos }; +    ZSTD_outBuffer output; +    ZSTD_inBuffer  input; +    output.dst = dst; +    output.size = dstCapacity; +    output.pos = *dstPos; +    input.src = src; +    input.size = srcSize; +    input.pos = *srcPos;      /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ -    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); -    *dstPos = output.pos; -    *srcPos = input.pos; -    return cErr; +    {   size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); +        *dstPos = output.pos; +        *srcPos = input.pos; +        return cErr; +    }  }  size_t ZSTD_compress2(ZSTD_CCtx* cctx, @@ -5541,6 +6376,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,          /* Reset to the original values. */          cctx->requestedParams.inBufferMode = originalInBufferMode;          cctx->requestedParams.outBufferMode = originalOutBufferMode; +          FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");          if (result != 0) {  /* compression not completed, due to lack of output space */              assert(oPos == dstCapacity); @@ -5551,64 +6387,67 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,      }  } -typedef struct { -    U32 idx;             /* Index in array of ZSTD_Sequence */ -    U32 posInSequence;   /* Position within sequence at idx */ -    size_t posInSrc;        /* Number of bytes given by sequences provided so far */ -} ZSTD_sequencePosition; -  /* ZSTD_validateSequence() : - * @offCode : is presumed to follow format required by ZSTD_storeSeq() + * @offBase : must use the format required by ZSTD_storeSeq()   * @returns a ZSTD error code if sequence is not valid   */  static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, -                      size_t posInSrc, U32 windowLog, size_t dictSize) +ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch, +                      size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)  { -    U32 const windowSize = 1 << windowLog; +    U32 const windowSize = 1u << windowLog;      /* posInSrc represents the amount of data the decoder would decode up to this point.       * As long as the amount of data decoded is less than or equal to window size, offsets may be       * larger than the total length of output decoded in order to reference the dict, even larger than       * window size. After output surpasses windowSize, we're limited to windowSize offsets again.       */      size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; -    RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); -    RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); +    size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; +    RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); +    /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ +    RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");      return 0;  }  /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ -static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) +static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)  { -    U32 offCode = STORE_OFFSET(rawOffset); +    U32 offBase = OFFSET_TO_OFFBASE(rawOffset);      if (!ll0 && rawOffset == rep[0]) { -        offCode = STORE_REPCODE_1; +        offBase = REPCODE1_TO_OFFBASE;      } else if (rawOffset == rep[1]) { -        offCode = STORE_REPCODE(2 - ll0); +        offBase = REPCODE_TO_OFFBASE(2 - ll0);      } else if (rawOffset == rep[2]) { -        offCode = STORE_REPCODE(3 - ll0); +        offBase = REPCODE_TO_OFFBASE(3 - ll0);      } else if (ll0 && rawOffset == rep[0] - 1) { -        offCode = STORE_REPCODE_3; +        offBase = REPCODE3_TO_OFFBASE;      } -    return offCode; +    return offBase;  } -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. +/* This function scans through an array of ZSTD_Sequence, + * storing the sequences it reads, until it reaches a block delimiter. + * Note that the block delimiter includes the last literals of the block. + * @blockSize must be == sum(sequence_lengths). + * @returns @blockSize on success, and a ZSTD_error otherwise.   */  static size_t -ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, -                                              ZSTD_sequencePosition* seqPos, -                                        const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -                                        const void* src, size_t blockSize) +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, +                                   ZSTD_SequencePosition* seqPos, +                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, +                             const void* src, size_t blockSize, +                                   ZSTD_ParamSwitch_e externalRepSearch)  {      U32 idx = seqPos->idx; +    U32 const startIdx = idx;      BYTE const* ip = (BYTE const*)(src);      const BYTE* const iend = ip + blockSize; -    repcodes_t updatedRepcodes; +    Repcodes_t updatedRepcodes;      U32 dictSize; +    DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize); +      if (cctx->cdict) {          dictSize = (U32)cctx->cdict->dictContentSize;      } else if (cctx->prefixDict.dict) { @@ -5616,27 +6455,60 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,      } else {          dictSize = 0;      } -    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); -    for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); +    for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {          U32 const litLength = inSeqs[idx].litLength; -        U32 const ll0 = (litLength == 0);          U32 const matchLength = inSeqs[idx].matchLength; -        U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); -        ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); +        U32 offBase; -        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); +        if (externalRepSearch == ZSTD_ps_disable) { +            offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); +        } else { +            U32 const ll0 = (litLength == 0); +            offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); +            ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); +        } + +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);          if (cctx->appliedParams.validateSequences) {              seqPos->posInSrc += litLength + matchLength; -            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, -                                                cctx->appliedParams.cParams.windowLog, dictSize), +            FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, +                                                seqPos->posInSrc, +                                                cctx->appliedParams.cParams.windowLog, dictSize, +                                                ZSTD_hasExtSeqProd(&cctx->appliedParams)),                                                  "Sequence validation failed");          } -        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, +        RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,                          "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); -        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);          ip += matchLength + litLength;      } -    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); +    RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found."); + +    /* If we skipped repcode search while parsing, we need to update repcodes now */ +    assert(externalRepSearch != ZSTD_ps_auto); +    assert(idx >= startIdx); +    if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { +        U32* const rep = updatedRepcodes.rep; +        U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ + +        if (lastSeqIdx >= startIdx + 2) { +            rep[2] = inSeqs[lastSeqIdx - 2].offset; +            rep[1] = inSeqs[lastSeqIdx - 1].offset; +            rep[0] = inSeqs[lastSeqIdx].offset; +        } else if (lastSeqIdx == startIdx + 1) { +            rep[2] = rep[0]; +            rep[1] = inSeqs[lastSeqIdx - 1].offset; +            rep[0] = inSeqs[lastSeqIdx].offset; +        } else { +            assert(lastSeqIdx == startIdx); +            rep[2] = rep[1]; +            rep[1] = rep[0]; +            rep[0] = inSeqs[lastSeqIdx].offset; +        } +    } + +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));      if (inSeqs[idx].litLength) {          DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); @@ -5644,37 +6516,43 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,          ip += inSeqs[idx].litLength;          seqPos->posInSrc += inSeqs[idx].litLength;      } -    RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); +    RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!");      seqPos->idx = idx+1; -    return 0; +    return blockSize;  } -/* Returns the number of bytes to move the current read position back by. Only non-zero - * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something - * went wrong. +/* + * This function attempts to scan through @blockSize bytes in @src + * represented by the sequences in @inSeqs, + * storing any (partial) sequences.   * - * This function will attempt to scan through blockSize bytes represented by the sequences - * in inSeqs, storing any (partial) sequences. + * Occasionally, we may want to reduce the actual number of bytes consumed from @src + * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH.   * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. + * @returns the number of bytes consumed from @src, necessarily <= @blockSize. + * Otherwise, it may return a ZSTD error if something went wrong.   */  static size_t -ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, -                                   const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -                                   const void* src, size_t blockSize) +ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx, +                               ZSTD_SequencePosition* seqPos, +                         const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, +                         const void* src, size_t blockSize, +                               ZSTD_ParamSwitch_e externalRepSearch)  {      U32 idx = seqPos->idx;      U32 startPosInSequence = seqPos->posInSequence;      U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;      size_t dictSize; -    BYTE const* ip = (BYTE const*)(src); -    BYTE const* iend = ip + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */ -    repcodes_t updatedRepcodes; +    const BYTE* const istart = (const BYTE*)(src); +    const BYTE* ip = istart; +    const BYTE* iend = istart + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */ +    Repcodes_t updatedRepcodes;      U32 bytesAdjustment = 0;      U32 finalMatchSplit = 0; +    /* TODO(embg) support fast parsing mode in noBlockDelim mode */ +    (void)externalRepSearch; +      if (cctx->cdict) {          dictSize = cctx->cdict->dictContentSize;      } else if (cctx->prefixDict.dict) { @@ -5682,15 +6560,15 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*      } else {          dictSize = 0;      } -    DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); +    DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);      DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); -    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));      while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {          const ZSTD_Sequence currSeq = inSeqs[idx];          U32 litLength = currSeq.litLength;          U32 matchLength = currSeq.matchLength;          U32 const rawOffset = currSeq.offset; -        U32 offCode; +        U32 offBase;          /* Modify the sequence depending on where endPosInSequence lies */          if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { @@ -5704,7 +6582,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*              /* Move to the next sequence */              endPosInSequence -= currSeq.litLength + currSeq.matchLength;              startPosInSequence = 0; -            idx++;          } else {              /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence                 does not reach the end of the match. So, we have to split the sequence */ @@ -5744,58 +6621,113 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*          }          /* Check if this offset can be represented with a repcode */          {   U32 const ll0 = (litLength == 0); -            offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); -            ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); +            offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); +            ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);          }          if (cctx->appliedParams.validateSequences) {              seqPos->posInSrc += litLength + matchLength; -            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, -                                                   cctx->appliedParams.cParams.windowLog, dictSize), +            FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, +                                                   cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),                                                     "Sequence validation failed");          } -        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); -        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); +        RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,                          "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); -        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);          ip += matchLength + litLength; +        if (!finalMatchSplit) +            idx++; /* Next Sequence */      }      DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);      assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);      seqPos->idx = idx;      seqPos->posInSequence = endPosInSequence; -    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));      iend -= bytesAdjustment;      if (ip != iend) {          /* Store any last literals */ -        U32 lastLLSize = (U32)(iend - ip); +        U32 const lastLLSize = (U32)(iend - ip);          assert(ip <= iend);          DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);          ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);          seqPos->posInSrc += lastLLSize;      } -    return bytesAdjustment; +    return (size_t)(iend-istart);  } -typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, -                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -                                       const void* src, size_t blockSize); -static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) +/* @seqPos represents a position within @inSeqs, + * it is read and updated by this function, + * once the goal to produce a block of size @blockSize is reached. + * @return: nb of bytes consumed from @src, necessarily <= @blockSize. + */ +typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx, +                                        ZSTD_SequencePosition* seqPos, +                                  const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, +                                  const void* src, size_t blockSize, +                                        ZSTD_ParamSwitch_e externalRepSearch); + +static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode)  { -    ZSTD_sequenceCopier sequenceCopier = NULL; -    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); +    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode));      if (mode == ZSTD_sf_explicitBlockDelimiters) { -        return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; -    } else if (mode == ZSTD_sf_noBlockDelimiters) { -        return ZSTD_copySequencesToSeqStoreNoBlockDelim; +        return ZSTD_transferSequences_wBlockDelim; +    } +    assert(mode == ZSTD_sf_noBlockDelimiters); +    return ZSTD_transferSequences_noDelim; +} + +/* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ +static size_t +blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos) +{ +    int end = 0; +    size_t blockSize = 0; +    size_t spos = seqPos.idx; +    DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); +    assert(spos <= inSeqsSize); +    while (spos < inSeqsSize) { +        end = (inSeqs[spos].offset == 0); +        blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; +        if (end) { +            if (inSeqs[spos].matchLength != 0) +                RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); +            break; +        } +        spos++; +    } +    if (!end) +        RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); +    return blockSize; +} + +static size_t determine_blockSize(ZSTD_SequenceFormat_e mode, +                           size_t blockSize, size_t remaining, +                     const ZSTD_Sequence* inSeqs, size_t inSeqsSize, +                           ZSTD_SequencePosition seqPos) +{ +    DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); +    if (mode == ZSTD_sf_noBlockDelimiters) { +        /* Note: more a "target" block size */ +        return MIN(remaining, blockSize); +    } +    assert(mode == ZSTD_sf_explicitBlockDelimiters); +    {   size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); +        FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); +        if (explicitBlockSize > blockSize) +            RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); +        if (explicitBlockSize > remaining) +            RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); +        return explicitBlockSize;      } -    assert(sequenceCopier != NULL); -    return sequenceCopier;  } -/* Compress, block-by-block, all of the sequences given. +/* Compress all provided sequences, block-by-block.   *   * Returns the cumulative size of all compressed blocks (including their headers),   * otherwise a ZSTD error. @@ -5807,15 +6739,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,                            const void* src, size_t srcSize)  {      size_t cSize = 0; -    U32 lastBlock; -    size_t blockSize; -    size_t compressedSeqsSize;      size_t remaining = srcSize; -    ZSTD_sequencePosition seqPos = {0, 0, 0}; +    ZSTD_SequencePosition seqPos = {0, 0, 0}; -    BYTE const* ip = (BYTE const*)src; +    const BYTE* ip = (BYTE const*)src;      BYTE* op = (BYTE*)dst; -    ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); +    ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);      DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);      /* Special case: empty frame */ @@ -5829,22 +6758,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,      }      while (remaining) { +        size_t compressedSeqsSize;          size_t cBlockSize; -        size_t additionalByteAdjustment; -        lastBlock = remaining <= cctx->blockSize; -        blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; +        size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, +                                        cctx->blockSizeMax, remaining, +                                        inSeqs, inSeqsSize, seqPos); +        U32 const lastBlock = (blockSize == remaining); +        FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); +        assert(blockSize <= remaining);          ZSTD_resetSeqStore(&cctx->seqStore); -        DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); -        additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); -        FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); -        blockSize -= additionalByteAdjustment; +        blockSize = sequenceCopier(cctx, +                                   &seqPos, inSeqs, inSeqsSize, +                                   ip, blockSize, +                                   cctx->appliedParams.searchForExternalRepcodes); +        FORWARD_IF_ERROR(blockSize, "Bad sequence copy");          /* If blocks are too small, emit as a nocompress block */ -        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { +        /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding +         * additional 1. We need to revisit and change this logic to be more consistent */ +        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {              cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);              FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); -            DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); +            DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize);              cSize += cBlockSize;              ip += blockSize;              op += cBlockSize; @@ -5853,35 +6789,36 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,              continue;          } +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");          compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,                                  &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,                                  &cctx->appliedParams,                                  op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,                                  blockSize, -                                cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, +                                cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,                                  cctx->bmi2);          FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); -        DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); +        DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);          if (!cctx->isFirstBlock &&              ZSTD_maybeRLE(&cctx->seqStore) && -            ZSTD_isRLE((BYTE const*)src, srcSize)) { -            /* We don't want to emit our first block as a RLE even if it qualifies because -            * doing so will cause the decoder (cli only) to throw a "should consume all input error." -            * This is only an issue for zstd <= v1.4.3 -            */ +            ZSTD_isRLE(ip, blockSize)) { +            /* Note: don't emit the first block as RLE even if it qualifies because +             * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error +             * "should consume all input error." +             */              compressedSeqsSize = 1;          }          if (compressedSeqsSize == 0) {              /* ZSTD_noCompressBlock writes the block header as well */              cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); -            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); -            DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize); +            FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); +            DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize);          } else if (compressedSeqsSize == 1) {              cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); -            FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed"); -            DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize); +            FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); +            DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize);          } else {              U32 cBlockHeader;              /* Error checking and repcodes update */ @@ -5893,11 +6830,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,              cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);              MEM_writeLE24(op, cBlockHeader);              cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; -            DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize); +            DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);          }          cSize += cBlockSize; -        DEBUGLOG(4, "cSize running total: %zu", cSize);          if (lastBlock) {              break; @@ -5908,41 +6844,50 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,              dstCapacity -= cBlockSize;              cctx->isFirstBlock = 0;          } +        DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);      } +    DEBUGLOG(4, "cSize final total: %zu", cSize);      return cSize;  } -size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, +size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, +                              void* dst, size_t dstCapacity,                                const ZSTD_Sequence* inSeqs, size_t inSeqsSize,                                const void* src, size_t srcSize)  {      BYTE* op = (BYTE*)dst;      size_t cSize = 0; -    size_t compressedBlocksSize = 0; -    size_t frameHeaderSize = 0;      /* Transparent initialization stage, same as compressStream2() */ -    DEBUGLOG(3, "ZSTD_compressSequences()"); +    DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity);      assert(cctx != NULL);      FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); +      /* Begin writing output, starting with frame header */ -    frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); -    op += frameHeaderSize; -    dstCapacity -= frameHeaderSize; -    cSize += frameHeaderSize; +    {   size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, +                    &cctx->appliedParams, srcSize, cctx->dictID); +        op += frameHeaderSize; +        assert(frameHeaderSize <= dstCapacity); +        dstCapacity -= frameHeaderSize; +        cSize += frameHeaderSize; +    }      if (cctx->appliedParams.fParams.checksumFlag && srcSize) {          xxh64_update(&cctx->xxhState, src, srcSize);      } -    /* cSize includes block header size and compressed sequences size */ -    compressedBlocksSize = ZSTD_compressSequences_internal(cctx, + +    /* Now generate compressed blocks */ +    {   size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx,                                                             op, dstCapacity,                                                             inSeqs, inSeqsSize,                                                             src, srcSize); -    FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); -    cSize += compressedBlocksSize; -    dstCapacity -= compressedBlocksSize; +        FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); +        cSize += cBlocksSize; +        assert(cBlocksSize <= dstCapacity); +        dstCapacity -= cBlocksSize; +    } +    /* Complete with frame checksum, if needed */      if (cctx->appliedParams.fParams.checksumFlag) {          U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);          RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); @@ -5951,26 +6896,557 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci          cSize += 4;      } -    DEBUGLOG(3, "Final compressed size: %zu", cSize); +    DEBUGLOG(4, "Final compressed size: %zu", cSize); +    return cSize; +} + + +#if defined(__AVX2__) + +#include <immintrin.h>  /* AVX2 intrinsics */ + +/* + * Convert 2 sequences per iteration, using AVX2 intrinsics: + *   - offset -> offBase = offset + 2 + *   - litLength -> (U16) litLength + *   - matchLength -> (U16)(matchLength - 3) + *   - rep is ignored + * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). + * + * At the end, instead of extracting two __m128i, + * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1, + * then store the lower 16 bytes in one go. + * + * @returns 0 on succes, with no long length detected + * @returns > 0 if there is one long length (> 65535), + * indicating the position, and type. + */ +static size_t convertSequences_noRepcodes( +    SeqDef* dstSeqs, +    const ZSTD_Sequence* inSeqs, +    size_t nbSequences) +{ +    /* +     * addition: +     *   For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0) +     */ +    const __m256i addition = _mm256_setr_epi32( +        ZSTD_REP_NUM, 0, -MINMATCH, 0,    /* for sequence i */ +        ZSTD_REP_NUM, 0, -MINMATCH, 0     /* for sequence i+1 */ +    ); + +    /* limit: check if there is a long length */ +    const __m256i limit = _mm256_set1_epi32(65535); + +    /* +     * shuffle mask for byte-level rearrangement in each 128-bit half: +     * +     * Input layout (after addition) per 128-bit half: +     *   [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ] +     * We only need: +     *   offBase (4 bytes) = offset+2 +     *   litLength (2 bytes) = low 2 bytes of litLength +     *   mlBase (2 bytes) = low 2 bytes of (matchLength) +     * => Bytes [0..3, 4..5, 8..9], zero the rest. +     */ +    const __m256i mask = _mm256_setr_epi8( +        /* For the lower 128 bits => sequence i */ +         0, 1, 2, 3,       /* offset+2 */ +         4, 5,             /* litLength (16 bits) */ +         8, 9,             /* matchLength (16 bits) */ +         (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, +         (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + +        /* For the upper 128 bits => sequence i+1 */ +        16,17,18,19,       /* offset+2 */ +        20,21,             /* litLength */ +        24,25,             /* matchLength */ +        (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, +        (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80 +    ); + +    /* +     * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8). +     * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3]. +     * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1. +     */ +#define PERM_LANE_0X_E8 0xE8  /* [0,2,2,3] in lane indices */ + +    size_t longLen = 0, i = 0; + +    /* AVX permutation depends on the specific definition of target structures */ +    ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); +    ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); +    ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); +    ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); +    ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + +    /* Process 2 sequences per loop iteration */ +    for (; i + 1 < nbSequences; i += 2) { +        /* Load 2 ZSTD_Sequence (32 bytes) */ +        __m256i vin  = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]); + +        /* Add {2, 0, -3, 0} in each 128-bit half */ +        __m256i vadd = _mm256_add_epi32(vin, addition); + +        /* Check for long length */ +        __m256i ll_cmp  = _mm256_cmpgt_epi32(vadd, limit);  /* 0xFFFFFFFF for element > 65535 */ +        int ll_res  = _mm256_movemask_epi8(ll_cmp); + +        /* Shuffle bytes so each half gives us the 8 bytes we need */ +        __m256i vshf = _mm256_shuffle_epi8(vadd, mask); +        /* +         * Now: +         *   Lane0 = seq0's 8 bytes +         *   Lane1 = 0 +         *   Lane2 = seq1's 8 bytes +         *   Lane3 = 0 +         */ + +        /* Permute 64-bit lanes => move Lane2 down into Lane1. */ +        __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8); +        /* +         * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1]. +         * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them. +         */ + +        /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */ +        _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm)); +        /* +         * This writes out 16 bytes total: +         *   - offset 0..7  => seq0 (offBase, litLength, mlBase) +         *   - offset 8..15 => seq1 (offBase, litLength, mlBase) +         */ + +        /* check (unlikely) long lengths > 65535 +         * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27] +         * => combined mask = 0x0FF00FF0 +         */ +        if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) { +            /* long length detected: let's figure out which one*/ +            if (inSeqs[i].matchLength > 65535+MINMATCH) { +                assert(longLen == 0); +                longLen = i + 1; +            } +            if (inSeqs[i].litLength > 65535) { +                assert(longLen == 0); +                longLen = i + nbSequences + 1; +            } +            if (inSeqs[i+1].matchLength > 65535+MINMATCH) { +                assert(longLen == 0); +                longLen = i + 1 + 1; +            } +            if (inSeqs[i+1].litLength > 65535) { +                assert(longLen == 0); +                longLen = i + 1 + nbSequences + 1; +            } +        } +    } + +    /* Handle leftover if @nbSequences is odd */ +    if (i < nbSequences) { +        /* process last sequence */ +        assert(i == nbSequences - 1); +        dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset); +        dstSeqs[i].litLength = (U16)inSeqs[i].litLength; +        dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH); +        /* check (unlikely) long lengths > 65535 */ +        if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) { +            assert(longLen == 0); +            longLen = i + 1; +        } +        if (UNLIKELY(inSeqs[i].litLength > 65535)) { +            assert(longLen == 0); +            longLen = i + nbSequences + 1; +        } +    } + +    return longLen; +} + +/* the vector implementation could also be ported to SSSE3, + * but since this implementation is targeting modern systems (>= Sapphire Rapid), + * it's not useful to develop and maintain code for older pre-AVX2 platforms */ + +#else /* no AVX2 */ + +static size_t convertSequences_noRepcodes( +    SeqDef* dstSeqs, +    const ZSTD_Sequence* inSeqs, +    size_t nbSequences) +{ +    size_t longLen = 0; +    size_t n; +    for (n=0; n<nbSequences; n++) { +        dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); +        dstSeqs[n].litLength = (U16)inSeqs[n].litLength; +        dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); +        /* check for long length > 65535 */ +        if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { +            assert(longLen == 0); +            longLen = n + 1; +        } +        if (UNLIKELY(inSeqs[n].litLength > 65535)) { +            assert(longLen == 0); +            longLen = n + nbSequences + 1; +        } +    } +    return longLen; +} + +#endif + +/* + * Precondition: Sequences must end on an explicit Block Delimiter + * @return: 0 on success, or an error code. + * Note: Sequence validation functionality has been disabled (removed). + * This is helpful to generate a lean main pipeline, improving performance. + * It may be re-inserted later. + */ +size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, +                const ZSTD_Sequence* const inSeqs, size_t nbSequences, +                int repcodeResolution) +{ +    Repcodes_t updatedRepcodes; +    size_t seqNb = 0; + +    DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences); + +    RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid, +                    "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); + +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + +    /* check end condition */ +    assert(nbSequences >= 1); +    assert(inSeqs[nbSequences-1].matchLength == 0); +    assert(inSeqs[nbSequences-1].offset == 0); + +    /* Convert Sequences from public format to internal format */ +    if (!repcodeResolution) { +        size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1); +        cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1; +        if (longl) { +            DEBUGLOG(5, "long length"); +            assert(cctx->seqStore.longLengthType == ZSTD_llt_none); +            if (longl <= nbSequences-1) { +                DEBUGLOG(5, "long match length detected at pos %zu", longl-1); +                cctx->seqStore.longLengthType = ZSTD_llt_matchLength; +                cctx->seqStore.longLengthPos = (U32)(longl-1); +            } else { +                DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences); +                assert(longl <= 2* (nbSequences-1)); +                cctx->seqStore.longLengthType = ZSTD_llt_literalLength; +                cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1); +            } +        } +    } else { +        for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) { +            U32 const litLength = inSeqs[seqNb].litLength; +            U32 const matchLength = inSeqs[seqNb].matchLength; +            U32 const ll0 = (litLength == 0); +            U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); + +            DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); +            ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); +            ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); +        } +    } + +    /* If we skipped repcode search while parsing, we need to update repcodes now */ +    if (!repcodeResolution && nbSequences > 1) { +        U32* const rep = updatedRepcodes.rep; + +        if (nbSequences >= 4) { +            U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */ +            rep[2] = inSeqs[lastSeqIdx - 2].offset; +            rep[1] = inSeqs[lastSeqIdx - 1].offset; +            rep[0] = inSeqs[lastSeqIdx].offset; +        } else if (nbSequences == 3) { +            rep[2] = rep[0]; +            rep[1] = inSeqs[0].offset; +            rep[0] = inSeqs[1].offset; +        } else { +            assert(nbSequences == 2); +            rep[2] = rep[1]; +            rep[1] = rep[0]; +            rep[0] = inSeqs[0].offset; +        } +    } + +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); + +    return 0; +} + +#if defined(ZSTD_ARCH_X86_AVX2) + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ +    size_t i; +    __m256i const zeroVec = _mm256_setzero_si256(); +    __m256i sumVec = zeroVec;  /* accumulates match+lit in 32-bit lanes */ +    ZSTD_ALIGNED(32) U32 tmp[8];      /* temporary buffer for reduction */ +    size_t mSum = 0, lSum = 0; +    ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + +    /* Process 2 structs (32 bytes) at a time */ +    for (i = 0; i + 2 <= nbSeqs; i += 2) { +        /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */ +        __m256i data     = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]); +        /* check end of block signal */ +        __m256i cmp      = _mm256_cmpeq_epi32(data, zeroVec); +        int cmp_res      = _mm256_movemask_epi8(cmp); +        /* indices for match lengths correspond to bits [8..11], [24..27] +         * => combined mask = 0x0F000F00 */ +        ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); +        if (cmp_res & 0x0F000F00) break; +        /* Accumulate in sumVec */ +        sumVec           = _mm256_add_epi32(sumVec, data); +    } + +    /* Horizontal reduction */ +    _mm256_store_si256((__m256i*)tmp, sumVec); +    lSum = tmp[1] + tmp[5]; +    mSum = tmp[2] + tmp[6]; + +    /* Handle the leftover */ +    for (; i < nbSeqs; i++) { +        lSum += seqs[i].litLength; +        mSum += seqs[i].matchLength; +        if (seqs[i].matchLength == 0) break; /* end of block */ +    } + +    if (i==nbSeqs) { +        /* reaching end of sequences: end of block signal was not present */ +        BlockSummary bs; +        bs.nbSequences = ERROR(externalSequences_invalid); +        return bs; +    } +    {   BlockSummary bs; +        bs.nbSequences = i+1; +        bs.blockSize = lSum + mSum; +        bs.litSize = lSum; +        return bs; +    } +} + +#else + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ +    size_t totalMatchSize = 0; +    size_t litSize = 0; +    size_t n; +    assert(seqs); +    for (n=0; n<nbSeqs; n++) { +        totalMatchSize += seqs[n].matchLength; +        litSize += seqs[n].litLength; +        if (seqs[n].matchLength == 0) { +            assert(seqs[n].offset == 0); +            break; +        } +    } +    if (n==nbSeqs) { +        BlockSummary bs; +        bs.nbSequences = ERROR(externalSequences_invalid); +        return bs; +    } +    {   BlockSummary bs; +        bs.nbSequences = n+1; +        bs.blockSize = litSize + totalMatchSize; +        bs.litSize = litSize; +        return bs; +    } +} +#endif + + +static size_t +ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx* cctx, +                                void* dst, size_t dstCapacity, +                          const ZSTD_Sequence* inSeqs, size_t nbSequences, +                          const void* literals, size_t litSize, size_t srcSize) +{ +    size_t remaining = srcSize; +    size_t cSize = 0; +    BYTE* op = (BYTE*)dst; +    int const repcodeResolution = (cctx->appliedParams.searchForExternalRepcodes == ZSTD_ps_enable); +    assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto); + +    DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize); +    RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block"); + +    /* Special case: empty frame */ +    if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) { +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); +        RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header"); +        MEM_writeLE24(op, cBlockHeader24); +        op += ZSTD_blockHeaderSize; +        dstCapacity -= ZSTD_blockHeaderSize; +        cSize += ZSTD_blockHeaderSize; +    } + +    while (nbSequences) { +        size_t compressedSeqsSize, cBlockSize, conversionStatus; +        BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences); +        U32 const lastBlock = (block.nbSequences == nbSequences); +        FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block"); +        assert(block.nbSequences <= nbSequences); +        RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer"); +        ZSTD_resetSeqStore(&cctx->seqStore); + +        conversionStatus = ZSTD_convertBlockSequences(cctx, +                            inSeqs, block.nbSequences, +                            repcodeResolution); +        FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion"); +        inSeqs += block.nbSequences; +        nbSequences -= block.nbSequences; +        remaining -= block.blockSize; + +        /* Note: when blockSize is very small, other variant send it uncompressed. +         * Here, we still send the sequences, because we don't have the original source to send it uncompressed. +         * One could imagine in theory reproducing the source from the sequences, +         * but that's complex and costly memory intensive, and goes against the objectives of this variant. */ + +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); + +        compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( +                                op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, +                                literals, block.litSize, +                                &cctx->seqStore, +                                &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, +                                &cctx->appliedParams, +                                cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, +                                cctx->bmi2); +        FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); +        /* note: the spec forbids for any compressed block to be larger than maximum block size */ +        if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0; +        DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); +        litSize -= block.litSize; +        literals = (const char*)literals + block.litSize; + +        /* Note: difficult to check source for RLE block when only Literals are provided, +         * but it could be considered from analyzing the sequence directly */ + +        if (compressedSeqsSize == 0) { +            /* Sending uncompressed blocks is out of reach, because the source is not provided. +             * In theory, one could use the sequences to regenerate the source, like a decompressor, +             * but it's complex, and memory hungry, killing the purpose of this variant. +             * Current outcome: generate an error code. +             */ +            RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block"); +        } else { +            U32 cBlockHeader; +            assert(compressedSeqsSize > 1); /* no RLE */ +            /* Error checking and repcodes update */ +            ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); +            if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) +                cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + +            /* Write block header into beginning of block*/ +            cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); +            MEM_writeLE24(op, cBlockHeader); +            cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; +            DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); +        } + +        cSize += cBlockSize; +        op += cBlockSize; +        dstCapacity -= cBlockSize; +        cctx->isFirstBlock = 0; +        DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); + +        if (lastBlock) { +            assert(nbSequences == 0); +            break; +        } +    } + +    RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed"); +    RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize); +    DEBUGLOG(4, "cSize final total: %zu", cSize); +    return cSize; +} + +size_t +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, +                    void* dst, size_t dstCapacity, +                    const ZSTD_Sequence* inSeqs, size_t inSeqsSize, +                    const void* literals, size_t litSize, size_t litCapacity, +                    size_t decompressedSize) +{ +    BYTE* op = (BYTE*)dst; +    size_t cSize = 0; + +    /* Transparent initialization stage, same as compressStream2() */ +    DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity); +    assert(cctx != NULL); +    if (litCapacity < litSize) { +        RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)"); +    } +    FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed"); + +    if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) { +        RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters"); +    } +    if (cctx->appliedParams.validateSequences) { +        RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation"); +    } +    if (cctx->appliedParams.fParams.checksumFlag) { +        RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum"); +    } + +    /* Begin writing output, starting with frame header */ +    {   size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, +                    &cctx->appliedParams, decompressedSize, cctx->dictID); +        op += frameHeaderSize; +        assert(frameHeaderSize <= dstCapacity); +        dstCapacity -= frameHeaderSize; +        cSize += frameHeaderSize; +    } + +    /* Now generate compressed blocks */ +    {   size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, +                                            op, dstCapacity, +                                            inSeqs, inSeqsSize, +                                            literals, litSize, decompressedSize); +        FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); +        cSize += cBlocksSize; +        assert(cBlocksSize <= dstCapacity); +        dstCapacity -= cBlocksSize; +    } + +    DEBUGLOG(4, "Final compressed size: %zu", cSize);      return cSize;  }  /*======   Finalize   ======*/ +static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) +{ +    const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; +    const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); +    return stableInput ? zcs->expectedInBuffer : nullInput; +} +  /*! ZSTD_flushStream() :   * @return : amount of data remaining to flush */  size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)  { -    ZSTD_inBuffer input = { NULL, 0, 0 }; +    ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); +    input.size = input.pos; /* do not ingest more input during flush */      return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);  } -  size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)  { -    ZSTD_inBuffer input = { NULL, 0, 0 }; +    ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);      size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); -    FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); +    FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed");      if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */      /* single thread mode : attempt to calculate remaining to flush more precisely */      {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; @@ -6046,7 +7522,7 @@ static void ZSTD_dedicatedDictSearch_revertCParams(      }  } -static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)  {      switch (mode) {      case ZSTD_cpm_unknown: @@ -6070,8 +7546,8 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo   * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.   *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.   *        Use dictSize == 0 for unknown or unused. - *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) + *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)  {      U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);      U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); @@ -6092,7 +7568,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,              cp.targetLength = (unsigned)(-clampedCompressionLevel);          }          /* refine parameters based on srcSize & dictSize */ -        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); +        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto);      }  } @@ -6109,7 +7585,9 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l   *  same idea as ZSTD_getCParams()   * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).   *  Fields of `ZSTD_frameParameters` are set to default values */ -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { +static ZSTD_parameters +ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) +{      ZSTD_parameters params;      ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);      DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); @@ -6123,7 +7601,34 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo   *  same idea as ZSTD_getCParams()   * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).   *  Fields of `ZSTD_frameParameters` are set to default values */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{      if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;      return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);  } + +void ZSTD_registerSequenceProducer( +    ZSTD_CCtx* zc, +    void* extSeqProdState, +    ZSTD_sequenceProducer_F extSeqProdFunc) +{ +    assert(zc != NULL); +    ZSTD_CCtxParams_registerSequenceProducer( +        &zc->requestedParams, extSeqProdState, extSeqProdFunc +    ); +} + +void ZSTD_CCtxParams_registerSequenceProducer( +  ZSTD_CCtx_params* params, +  void* extSeqProdState, +  ZSTD_sequenceProducer_F extSeqProdFunc) +{ +    assert(params != NULL); +    if (extSeqProdFunc != NULL) { +        params->extSeqProdFunc = extSeqProdFunc; +        params->extSeqProdState = extSeqProdState; +    } else { +        params->extSeqProdFunc = NULL; +        params->extSeqProdState = NULL; +    } +} | 
