#include <limits.h>
#include <stddef.h>
#include <string.h>
#include <stdlib.h>
#include <pthread.h>
#include "../zstd.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
Go to the source code of this file.
Macros | |
#define | __has_attribute(x) 0 |
#define | __has_builtin(x) 0 |
#define | __has_feature(x) 0 |
#define | _DIVSUFSORT_H 1 |
#define | _FILE_OFFSET_BITS 64 |
#define | _FORCE_HAS_FORMAT_STRING(...) |
#define | _LARGEFILE64_SOURCE |
#define | ADVANCED_SEQS STORED_SEQS |
#define | ALPHABET_SIZE (256) |
#define | assert(condition) ((void)0) /* disable assert (default) */ |
#define | B(name, bit) X(name, f7b, bit) |
#define | BIT0 1 |
#define | BIT1 2 |
#define | BIT4 16 |
#define | BIT5 32 |
#define | BIT6 64 |
#define | BIT7 128 |
#define | BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) |
#define | BITCOST_ACCURACY 8 |
#define | BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) |
#define | BITSTREAM_H_MODULE |
#define | BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") |
#define | BOUNDCHECK(cParam, val) |
#define | BOUNDED(min, val, max) (MAX(min,MIN(val,max))) |
#define | BUCKET_A(_c0) bucket_A[(_c0)] |
#define | BUCKET_A_SIZE (ALPHABET_SIZE) |
#define | BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) |
#define | BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) |
#define | BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) |
#define | BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) |
#define | C(name, bit) X(name, f1c, bit) |
#define | C(name, bit) X(name, f7c, bit) |
#define | CACHELINE_SIZE 64 |
#define | CHECK_DBOUNDS(p, v) |
#define | CHECK_F(f) { CHECK_V_F(_var_err__, f); } |
#define | CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e |
#define | CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) |
#define | CLAMP_TYPE(cParam, val, type) |
#define | COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ |
#define | COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) |
#define | COPY16(d, s) { ZSTD_copy16(d,s); d+=16; s+=16; } |
#define | COPY8(d, s) { ZSTD_copy8(d,s); d+=8; s+=8; } |
#define | COVER_DEFAULT_SPLITPOINT 1.0 |
#define | COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) |
#define | D(name, bit) X(name, f1d, bit) |
#define | DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 |
#define | DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT |
#define | DDICT_HASHSET_RESIZE_FACTOR 2 |
#define | DDICT_HASHSET_TABLE_BASE_SIZE 64 |
#define | DEBUG_H_12987983217 |
#define | DEBUG_PRINTHEX(l, p, n) {} |
#define | DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) |
#define | DEBUGLEVEL 0 |
#define | DEBUGLOG(l, ...) {} /* disabled */ |
#define | DEFAULT_ACCEL 1 |
#define | DEFAULT_F 20 |
#define | DefaultMaxOff 28 |
#define | DICTLISTSIZE_DEFAULT 10000 |
#define | DISPLAY(...) |
#define | DISPLAY(...) |
#define | DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } |
#define | DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) |
#define | DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) |
#define | DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ |
#define | DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) |
#define | DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) |
#define | DISPLAYUPDATE(l, ...) |
#define | DONT_VECTORIZE |
#define | DYNAMIC_BMI2 0 |
#define | ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE) |
#define | ERR_QUOTE(str) #str |
#define | ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ |
#define | ERROR(name) ZSTD_ERROR(name) |
#define | ERROR_H_MODULE |
#define | FASTCOVER_DEFAULT_SPLITPOINT 0.75 |
#define | FASTCOVER_MAX_ACCEL 10 |
#define | FASTCOVER_MAX_F 31 |
#define | FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) |
#define | FORCE_INLINE_ATTR |
#define | FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR |
#define | FORCE_NOINLINE static |
#define | FORWARD_IF_ERROR(err, ...) |
#define | FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) |
#define | FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) |
#define | FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */) |
#define | FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8) |
#define | FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned)) |
#define | FSE_CAT(X, Y) X##Y |
#define | FSE_CAT(X, Y) X##Y |
#define | FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
#define | FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) |
#define | FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2)) |
#define | FSE_DECODE_TYPE FSE_decode_t |
#define | FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) |
#define | FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) |
#define | FSE_DEFAULT_MEMORY_USAGE 13 |
#define | FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2) |
#define | FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) |
#define | FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog))) |
#define | FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) |
#define | FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) |
#define | FSE_FUNCTION_EXTENSION |
#define | FSE_FUNCTION_NAME(X, Y) FSE_CAT(X,Y) |
#define | FSE_FUNCTION_NAME(X, Y) FSE_CAT(X,Y) |
#define | FSE_FUNCTION_TYPE BYTE |
#define | FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) |
#define | FSE_H |
#define | FSE_H_FSE_STATIC_LINKING_ONLY |
#define | FSE_isError ERR_isError |
#define | FSE_isError ERR_isError |
#define | FSE_isError ERR_isError |
#define | FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE |
#define | FSE_MAX_MEMORY_USAGE 14 |
#define | FSE_MAX_SYMBOL_VALUE 255 |
#define | FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) |
#define | FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG) |
#define | FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1) |
#define | FSE_MIN_TABLELOG 5 |
#define | FSE_NCOUNTBOUND 512 |
#define | FSE_PUBLIC_API |
#define | FSE_QUOTE(str) #str |
#define | FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
#define | FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ |
#define | FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_STATIC_LINKING_ONLY |
#define | FSE_TABLELOG_ABSOLUTE_MAX 15 |
#define | FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) |
#define | FSE_TYPE_NAME(X, Y) FSE_CAT(X,Y) |
#define | FSE_TYPE_NAME(X, Y) FSE_CAT(X,Y) |
#define | FSE_VERSION_MAJOR 0 |
#define | FSE_VERSION_MINOR 9 |
#define | FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) |
#define | FSE_VERSION_RELEASE 0 |
#define | FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) |
#define | GB *(1U<<30) |
#define | GB *(1U<<30) |
#define | GEAR_ITER_ONCE() |
#define | GEAR_ITER_ONCE() |
#define | GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) |
#define | GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) |
#define | GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) |
#define | GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) |
#define | GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) |
#define | GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) |
#define | GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) |
#define | GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) |
#define | GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) |
#define | HASH_READ_SIZE 8 |
#define | HBUFFSIZE 256 /* should prove large enough for all entropy headers */ |
#define | HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR |
#define | HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) |
#define | HIST_WKSP_SIZE_U32 1024 |
#define | HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) |
#define | HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) |
#define | HUF_ASM_DECL HUF_EXTERN_C |
#define | HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) |
#define | HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ |
#define | HUF_BLOCKSIZE_MAX (128 * 1024) |
#define | HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
#define | HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ |
#define | HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } |
#define | HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } |
#define | HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) |
#define | HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ |
#define | HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) |
#define | HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) |
#define | HUF_CTABLEBOUND 129 |
#define | HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) |
#define | HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) |
#define | HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) |
#define | HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) |
#define | HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) |
#define | HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) |
#define | HUF_DECODER_FAST_TABLELOG 11 |
#define | HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) |
#define | HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) |
#define | HUF_DGEN(fn) |
#define | HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) |
#define | HUF_EXTERN_C |
#define | HUF_FAST_BMI2_ATTRS |
#define | HUF_H_298734234 |
#define | HUF_isError ERR_isError |
#define | HUF_isError ERR_isError |
#define | HUF_isError ERR_isError |
#define | HUF_NEED_BMI2_FUNCTION 0 |
#define | HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra |
#define | HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned)) |
#define | HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1) |
#define | HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
#define | HUF_SYMBOLVALUE_MAX 255 |
#define | HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ |
#define | HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ |
#define | HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ |
#define | HUF_WORKSPACE_MAX_ALIGNMENT 8 |
#define | HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) |
#define | HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) |
#define | INLINE __inline |
#define | INLINE_KEYWORD |
#define | JOB_ERROR(e) |
#define | KB *(1 <<10) |
#define | KB *(1 <<10) |
#define | kLazySkippingStep 8 |
#define | kSearchStrength 8 |
#define | LDM_BATCH_SIZE 64 |
#define | LDM_BUCKET_SIZE_LOG 3 |
#define | LDM_HASH_RLOG 7 |
#define | LDM_MIN_MATCH_LENGTH 64 |
#define | LIKELY(x) (x) |
#define | Litbits 8 |
#define | LitHufLog 11 |
#define | LL_DEFAULTNORMLOG 6 /* for static allocation */ |
#define | LLFSELog 9 |
#define | LLIMIT 64 /* heuristic determined experimentally */ |
#define | LOCALDISPLAYLEVEL(displayLevel, l, ...) |
#define | LOCALDISPLAYLEVEL(displayLevel, l, ...) |
#define | LOCALDISPLAYUPDATE(displayLevel, l, ...) |
#define | LOCALDISPLAYUPDATE(displayLevel, l, ...) |
#define | LONG_OFFSETS_MAX_EXTRA_BITS_32 |
#define | LONGNBSEQ 0x7F00 |
#define | MAP_EMPTY_VALUE ((U32)-1) |
#define | MAX(a, b) ((a)>(b) ? (a) : (b)) |
#define | MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 |
#define | MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) |
#define | MaxLit ((1<<Litbits) - 1) |
#define | MaxLL 35 |
#define | MaxLLBits 16 |
#define | MaxML 52 |
#define | MaxMLBits 16 |
#define | MaxOff 31 |
#define | MAXREPOFFSET 1024 |
#define | MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ |
#define | MB *(1 <<20) |
#define | MB *(1 <<20) |
#define | MEM_H_MODULE |
#define | MEM_MODULE |
#define | MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ |
#define | MERGE_CHECK(a, b, c) |
#define | MIN(a, b) ((a)<(b) ? (a) : (b)) |
#define | MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ |
#define | MIN_LITERALS_FOR_4_STREAMS 6 |
#define | MIN_SEQUENCES_BLOCK_SPLITTING 300 |
#define | MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ |
#define | MINMATCH 3 |
#define | MINMATCHLENGTH 7 /* heuristic determined experimentally */ |
#define | MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ |
#define | ML_DEFAULTNORMLOG 6 /* for static allocation */ |
#define | MLFSELog 9 |
#define | NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)] |
#define | NOISELENGTH 32 |
#define | OF_DEFAULTNORMLOG 5 /* for static allocation */ |
#define | OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM) |
#define | OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) |
#define | OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) |
#define | OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ |
#define | OFFCODE_MAX 30 /* only applicable to first block */ |
#define | OffFSELog 8 |
#define | OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM) |
#define | POOL_H |
#define | PREFETCH_AREA(p, s) |
#define | PREFETCH_L1(ptr) (void)(ptr) /* disabled */ |
#define | PREFETCH_L2(ptr) (void)(ptr) /* disabled */ |
#define | PREFIX(name) ZSTD_error_##name |
#define | RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) |
#define | RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) |
#define | RANK_POSITION_MAX_COUNT_LOG 32 |
#define | RANK_POSITION_TABLE_SIZE 192 |
#define | RAWLOG(l, ...) {} /* disabled */ |
#define | REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) |
#define | REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) |
#define | REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) |
#define | REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ |
#define | RETURN_ERROR(err, ...) |
#define | RETURN_ERROR_IF(cond, err, ...) |
#define | RSYNC_LENGTH 32 |
#define | RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX |
#define | RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG) |
#define | SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) |
#define | SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) |
#define | SS_BLOCKSIZE (1024) |
#define | SS_INSERTIONSORT_THRESHOLD (8) |
#define | SS_MISORT_STACKSIZE (16) |
#define | SS_SMERGE_STACKSIZE (32) |
#define | STACK_POP(_a, _b, _c, _d) |
#define | STACK_POP5(_a, _b, _c, _d, _e) |
#define | STACK_PUSH(_a, _b, _c, _d) |
#define | STACK_PUSH5(_a, _b, _c, _d, _e) |
#define | STACK_SIZE SS_SMERGE_STACKSIZE |
#define | STACK_SIZE TR_STACKSIZE |
#define | STARTNODE (HUF_SYMBOLVALUE_MAX+1) |
#define | STATIC_BMI2 0 |
#define | STORED_SEQS 8 |
#define | STORED_SEQS_MASK (STORED_SEQS-1) |
#define | STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) |
#define | STREAM_ACCUMULATOR_MIN_32 25 |
#define | STREAM_ACCUMULATOR_MIN_64 57 |
#define | SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ |
#define | SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 |
#define | SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 |
#define | SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) |
#define | TARGET_ATTRIBUTE(target) |
#define | THREADING_H_938743 |
#define | TR_INSERTIONSORT_THRESHOLD (8) |
#define | TR_STACKSIZE (64) |
#define | UNLIKELY(x) (x) |
#define | UNUSED_ATTR |
#define | WEIGHT(stat, opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) |
#define | WILDCOPY_OVERLENGTH 32 |
#define | WILDCOPY_VECLEN 16 |
#define | WIN_CDECL |
#define | X(name, r, bit) |
#define | XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) |
#define | XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) |
#define | XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) |
#define | XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) |
#define | XXH128_hash_t XXH_IPREF(XXH128_hash_t) |
#define | XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) |
#define | XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) |
#define | XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) |
#define | XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) |
#define | XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) |
#define | XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) |
#define | XXH32_createState(void) XXH_NAME2(XXH_NAMESPACE, XXH32_createState) |
Allocates an XXH32_state_t. More... | |
#define | XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) |
#define | XXH32_ENDJMP 0 |
#define | XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) |
#define | XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) |
#define | XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) |
#define | XXH32_state_s XXH_IPREF(XXH32_state_s) |
#define | XXH32_state_t XXH_IPREF(XXH32_state_t) |
The opaque state struct for the XXH32 streaming API. More... | |
#define | XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) |
#define | XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) |
#define | XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) |
#define | XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) |
#define | XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) |
#define | XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) |
#define | XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) |
#define | XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) |
#define | XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) |
#define | XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) |
#define | XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) |
#define | XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) |
#define | XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) |
#define | XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) |
#define | XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) |
#define | XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) |
#define | XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) |
#define | XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) |
#define | XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) |
#define | XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) |
#define | XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) |
#define | XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) |
#define | XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) |
#define | XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) |
#define | XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) |
#define | XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) |
#define | XXH3_state_s XXH_IPREF(XXH3_state_s) |
#define | XXH3_state_t XXH_IPREF(XXH3_state_t) |
#define | XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) |
#define | XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) |
#define | XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) |
#define | XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) |
#define | XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) |
#define | XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) |
#define | XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) |
#define | XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) |
#define | XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) |
#define | XXH64_state_s XXH_IPREF(XXH64_state_s) |
#define | XXH64_state_t XXH_IPREF(XXH64_state_t) |
#define | XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) |
#define | XXH_ASSERT(c) ((void)0) |
#define | XXH_CAT(A, B) A##B |
#define | XXH_COMPILER_GUARD(var) ((void)0) |
#define | XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() |
Whether the target is little endian. More... | |
#define | XXH_DEBUGLEVEL DEBUGLEVEL |
Sets the debugging level. More... | |
#define | XXH_ERROR XXH_IPREF(XXH_ERROR) |
#define | XXH_errorcode XXH_IPREF(XXH_errorcode) |
#define | XXH_FALLTHROUGH |
#define | XXH_FORCE_ALIGN_CHECK 1 |
#define | XXH_FORCE_INLINE static |
#define | XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) |
#define | XXH_get32bits(p) XXH_readLE32_align(p, align) |
#define | XXH_get64bits(p) XXH_readLE64_align(p, align) |
#define | XXH_HAS_ATTRIBUTE(x) __has_attribute(x) |
#define | XXH_HAS_BUILTIN(x) __has_builtin(x) |
#define | XXH_HAS_C_ATTRIBUTE(x) 0 |
#define | XXH_HAS_CPP_ATTRIBUTE(x) 0 |
#define | XXH_IMPLEM_13a8737387 |
#define | XXH_IMPLEMENTATION |
#define | XXH_INLINE_ALL |
#define | XXH_INLINE_ALL_31684351384 |
#define | XXH_IPREF(Id) XXH_NAMESPACE ## Id |
#define | XXH_NAME2(A, B) XXH_CAT(A,B) |
#define | XXH_NAMESPACE ZSTD_ |
#define | XXH_NAMESPACE XXH_INLINE_ |
#define | XXH_NO_INLINE static |
#define | XXH_NO_INLINE_HINTS 0 |
#define | XXH_NO_XXH3 |
#define | XXH_OK XXH_IPREF(XXH_OK) |
#define | XXH_PRIME32_1 0x9E3779B1U |
#define | XXH_PRIME32_2 0x85EBCA77U |
#define | XXH_PRIME32_3 0xC2B2AE3DU |
#define | XXH_PRIME32_4 0x27D4EB2FU |
#define | XXH_PRIME32_5 0x165667B1U |
#define | XXH_PRIME64_1 0x9E3779B185EBCA87ULL |
#define | XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL |
#define | XXH_PRIME64_3 0x165667B19E3779F9ULL |
#define | XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL |
#define | XXH_PRIME64_5 0x27D4EB2F165667C5ULL |
#define | XXH_PRIVATE_API |
#define | XXH_PROCESS1 |
#define | XXH_PROCESS4 |
#define | XXH_PUBLIC_API static |
#define | XXH_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r)))) |
#define | XXH_rotl64(x, r) (((x) << (r)) | ((x) >> (64 - (r)))) |
#define | XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) |
#define | XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) |
#define | XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ |
#define | XXH_STATIC_LINKING_ONLY |
#define | XXH_VERSION_MAJOR 0 |
#define | XXH_VERSION_MINOR 8 |
#define | XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) |
#define | XXH_VERSION_RELEASE 1 |
#define | XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) |
#define | XXHASH_H_5627135585666179 1 |
#define | XXHASH_H_STATIC_13879238742 |
#define | ZDICT_CONTENTSIZE_MIN 128 |
#define | ZDICT_DEPRECATED(message) |
#define | ZDICT_DICTSIZE_MIN 256 |
#define | ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) |
#define | ZDICT_MAX_SAMPLES_SIZE (2000U << 20) |
#define | ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) |
#define | ZDICT_STATIC_LINKING_ONLY |
#define | ZDICTLIB_API ZDICTLIB_VISIBLE |
#define | ZDICTLIB_HIDDEN |
#define | ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE |
#define | ZDICTLIB_VISIBLE |
#define | ZSTD_ADDRESS_SANITIZER 0 |
#define | ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T)) |
#define | ZSTD_ALLOCATIONS_H |
#define | ZSTD_ASM_SUPPORTED 0 |
#define | ZSTD_BITS_H |
#define | ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ |
#define | ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) |
#define | ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls |
#define | ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls |
#define | ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64)) |
#define | ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) |
#define | ZSTD_calloc(n, s) calloc((n), (s)) |
#define | ZSTD_CCOMMON_H_MODULE |
#define | ZSTD_CET_ENDBRANCH |
#define | ZSTD_CHUNKSIZE_MAX |
#define | ZSTD_CLEVELS_H |
#define | ZSTD_COMMON_CPU_H |
#define | ZSTD_COMPILER_H |
#define | ZSTD_COMPRESS_ADVANCED_H |
#define | ZSTD_COMPRESS_H |
#define | ZSTD_COMPRESS_HEAPMODE 0 |
#define | ZSTD_COMPRESS_LITERALS_H |
#define | ZSTD_COMPRESS_SEQUENCES_H |
#define | ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) |
#define | ZSTD_CWKSP_ALIGNMENT_BYTES 64 |
#define | ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 |
#define | ZSTD_CWKSP_H |
#define | ZSTD_DATAFLOW_SANITIZER 0 |
#define | ZSTD_DDICT_H |
#define | ZSTD_DEC_BLOCK_H |
#define | ZSTD_DECODER_INTERNAL_BUFFER (1 << 16) |
#define | ZSTD_DECOMPRESS_INTERNAL_H |
#define | ZSTD_DEPS_COMMON |
#define | ZSTD_DEPS_MALLOC |
#define | ZSTD_DEPS_MATH64 |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MALLOC |
#define | ZSTD_DEPS_NEED_MATH64 |
#define | ZSTD_DEPS_NEED_MATH64 |
#define | ZSTD_DISABLE_ASM 1 |
#define | ZSTD_div64(dividend, divisor) ((dividend) / (divisor)) |
#define | ZSTD_DOUBLE_FAST_H |
#define | ZSTD_DUBT_UNSORTED_MARK |
#define | ZSTD_ENABLE_ASM_X86_64_BMI2 0 |
#define | ZSTD_ERROR(name) ((size_t)-PREFIX(name)) |
#define | ZSTD_ERRORS_H_398273423 |
#define | ZSTD_FALLTHROUGH |
#define | ZSTD_FAST_H |
#define | ZSTD_FOR_EACH_DICT_MODE(X, ...) |
#define | ZSTD_FOR_EACH_MLS(X, dictMode) |
#define | ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) |
#define | ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) |
#define | ZSTD_FRAMECHECKSUMSIZE 4 |
#define | ZSTD_FRAMEIDSIZE 4 /* magic number size */ |
#define | ZSTD_free(p) free((p)) |
#define | ZSTD_GEN_DFAST_FN(dictMode, mls) |
#define | ZSTD_GEN_FAST_FN(dictMode, mls, step) |
#define | ZSTD_HAS_C_ATTRIBUTE(x) 0 |
#define | ZSTD_HAS_CPP_ATTRIBUTE(x) 0 |
#define | ZSTD_HASHLOG3_MAX 17 |
#define | ZSTD_HAVE_WEAK_SYMBOLS 0 |
#define | ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls |
#define | ZSTD_HEAPMODE 1 |
#define | ZSTD_HIDE_ASM_FUNCTION(func) |
#define | ZSTD_HUFFDTABLE_CAPACITY_LOG 12 |
#define | ZSTD_INDEXOVERFLOW_MARGIN (16 MB) |
#define | ZSTD_isError ERR_isError /* for inlining */ |
#define | ZSTD_LAZY_DDSS_BUCKET_LOG 2 |
#define | ZSTD_LAZY_H |
#define | ZSTD_LBMAX (128 << 10) |
#define | ZSTD_LBMIN 64 |
#define | ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT |
#define | ZSTD_LDM_GEARTAB_H |
#define | ZSTD_LDM_H |
#define | ZSTD_LEGACY_SUPPORT 0 |
#define | ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX) |
#define | ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ |
#define | ZSTD_malloc(s) malloc(s) |
#define | ZSTD_MAX_CLEVEL 22 |
#define | ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8) |
#define | ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ |
#define | ZSTD_MAX_NB_BLOCK_SPLITS 196 |
#define | ZSTD_MAX_PRICE (1<<30) |
#define | ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) |
#define | ZSTD_memcpy(d, s, l) memcpy((d),(s),(l)) |
#define | ZSTD_memmove(d, s, l) memmove((d),(s),(l)) |
#define | ZSTD_MEMORY_SANITIZER 0 |
#define | ZSTD_memset(p, v, l) memset((p),(v),(l)) |
#define | ZSTD_MULTITHREAD |
#define | ZSTD_NO_CLEVEL 0 |
#define | ZSTD_NO_FORWARD_PROGRESS_MAX 16 |
#define | ZSTD_OPT_H |
#define | ZSTD_OPT_NUM (1<<12) |
#define | ZSTD_PORTABILITY_MACROS_H |
#define | ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ |
#define | ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) |
#define | ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) |
#define | ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) |
#define | ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) |
#define | ZSTD_pthread_cond_t pthread_cond_t |
#define | ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) |
#define | ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) |
#define | ZSTD_pthread_join(a) pthread_join((a),NULL) |
#define | ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) |
#define | ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) |
#define | ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) |
#define | ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) |
#define | ZSTD_pthread_mutex_t pthread_mutex_t |
#define | ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) |
#define | ZSTD_pthread_t pthread_t |
#define | ZSTD_REP_NUM 3 /* number of repcodes */ |
#define | ZSTD_RESIZE_SEQPOOL 0 |
#define | ZSTD_ROLL_HASH_CHAR_OFFSET 10 |
#define | ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) |
#define | ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ |
#define | ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ |
#define | ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ |
#define | ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) |
#define | ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog |
#define | ZSTD_ROWSIZE 16 |
#define | ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE |
#define | ZSTD_SHORT_CACHE_TAG_BITS 8 |
#define | ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) |
#define | ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) |
#define | ZSTD_STATIC_LINKING_ONLY |
#define | ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ |
#define | ZSTD_STATIC_LINKING_ONLY |
#define | ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ |
#define | ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ |
#define | ZSTD_SWITCH_MLS(X, dictMode) |
#define | ZSTD_SWITCH_ROWLOG(dictMode, mls) |
#define | ZSTD_SWITCH_SEARCH_METHOD(dictMode) |
#define | ZSTD_TRACE 0 |
#define | ZSTD_TRACE_H |
#define | ZSTD_UNREACHABLE { assert(0); } |
#define | ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) |
#define | ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) |
#define | ZSTD_WEAK_ATTR |
#define | ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0 |
#define | ZSTD_WINDOW_START_INDEX 2 |
#define | ZSTD_WINDOWLOG_ABSOLUTEMIN 10 |
#define | ZSTD_WORKSPACETOOLARGE_FACTOR 3 |
#define | ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 |
#define | ZSTD_ZDICT_H |
#define | ZSTD_ZDICT_H_STATIC |
#define | ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE |
#define | ZSTDERRORLIB_HIDDEN |
#define | ZSTDERRORLIB_VISIBLE |
#define | ZSTDMT_COMPRESS_H |
#define | ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) |
#define | ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) |
#define | ZSTDMT_JOBSIZE_MIN (512 KB) |
#define | ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256) |
#define | ZSTDMT_OVERLAPLOG_DEFAULT 0 |
Typedefs | |
typedef struct buffer_s | buffer_t |
typedef unsigned char | BYTE |
typedef struct COVER_best_s | COVER_best_t |
typedef struct COVER_dictSelection | COVER_dictSelection_t |
typedef struct COVER_map_pair_t_s | COVER_map_pair_t |
typedef struct COVER_map_s | COVER_map_t |
typedef struct COVER_tryParameters_data_s | COVER_tryParameters_data_t |
typedef ZSTD_ErrorCode | ERR_enum |
typedef struct FASTCOVER_tryParameters_data_s | FASTCOVER_tryParameters_data_t |
typedef unsigned | FSE_CTable |
typedef unsigned | FSE_DTable |
typedef size_t | HUF_CElt |
typedef void(* | HUF_DecompressFastLoopFn) (HUF_DecompressFastArgs *) |
typedef size_t(* | HUF_DecompressUsingDTableFn) (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
typedef U32 | HUF_DTable |
typedef nodeElt | huffNodeTable[2 *(HUF_SYMBOLVALUE_MAX+1)] |
typedef struct nodeElt_s | nodeElt |
typedef struct POOL_ctx_s | POOL_ctx |
typedef void(* | POOL_function) (void *) |
typedef struct POOL_job_s | POOL_job |
typedef rankValCol_t | rankVal_t[HUF_TABLELOG_MAX] |
typedef U32 | rankValCol_t[HUF_TABLELOG_MAX+1] |
typedef struct repcodes_s | repcodes_t |
typedef signed short | S16 |
typedef signed int | S32 |
typedef signed long long | S64 |
typedef signed char | S8 |
typedef struct seqDef_s | seqDef |
typedef struct _trbudget_t | trbudget_t |
typedef unsigned short | U16 |
typedef unsigned int | U32 |
typedef unsigned long long | U64 |
typedef unsigned char | U8 |
typedef struct XXH32_state_s | XXH32_state_t |
typedef struct XXH64_state_s | XXH64_state_t |
The opaque state struct for the XXH64 streaming API. More... | |
typedef XXH32_hash_t | xxh_u32 |
typedef unsigned char | xxh_u8 |
typedef size_t(* | ZSTD_blockCompressor) (ZSTD_matchState_t *bs, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
typedef size_t(* | ZSTD_decompressSequences_t) (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
typedef U32(* | ZSTD_getAllMatchesFn) (ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat) |
typedef struct ZSTD_matchState_t | ZSTD_matchState_t |
typedef struct ZSTD_prefixDict_s | ZSTD_prefixDict |
typedef size_t(* | ZSTD_sequenceCopier) (ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) |
typedef U64 | ZSTD_VecMask |
typedef struct ZSTDMT_bufferPool_s | ZSTDMT_bufferPool |
typedef struct ZSTDMT_CCtx_s | ZSTDMT_CCtx |
typedef ZSTDMT_bufferPool | ZSTDMT_seqPool |
typedef unsigned long long | XXH64_hash_t |
Functions | |
static INLINE_KEYWORD UNUSED_ATTR void | _force_has_format_string (const char *format,...) |
static int | allBytesIdentical (const void *src, size_t srcSize) |
MEM_STATIC void | BIT_addBits (BIT_CStream_t *bitC, size_t value, unsigned nbBits) |
MEM_STATIC void | BIT_addBitsFast (BIT_CStream_t *bitC, size_t value, unsigned nbBits) |
MEM_STATIC size_t | BIT_closeCStream (BIT_CStream_t *bitC) |
MEM_STATIC unsigned | BIT_endOfDStream (const BIT_DStream_t *bitD) |
MEM_STATIC void | BIT_flushBits (BIT_CStream_t *bitC) |
MEM_STATIC void | BIT_flushBitsFast (BIT_CStream_t *bitC) |
MEM_STATIC FORCE_INLINE_ATTR size_t | BIT_getLowerBits (size_t bitContainer, U32 const nbBits) |
MEM_STATIC FORCE_INLINE_ATTR size_t | BIT_getMiddleBits (size_t bitContainer, U32 const start, U32 const nbBits) |
MEM_STATIC FORCE_INLINE_ATTR size_t | BIT_getUpperBits (size_t bitContainer, U32 const start) |
MEM_STATIC size_t | BIT_initCStream (BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity) |
MEM_STATIC size_t | BIT_initDStream (BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize) |
MEM_STATIC FORCE_INLINE_ATTR size_t | BIT_lookBits (const BIT_DStream_t *bitD, U32 nbBits) |
MEM_STATIC size_t | BIT_lookBitsFast (const BIT_DStream_t *bitD, U32 nbBits) |
MEM_STATIC size_t | BIT_readBits (BIT_DStream_t *bitD, unsigned nbBits) |
MEM_STATIC size_t | BIT_readBitsFast (BIT_DStream_t *bitD, unsigned nbBits) |
MEM_STATIC BIT_DStream_status | BIT_reloadDStream (BIT_DStream_t *bitD) |
MEM_STATIC BIT_DStream_status | BIT_reloadDStreamFast (BIT_DStream_t *bitD) |
MEM_STATIC FORCE_INLINE_ATTR void | BIT_skipBits (BIT_DStream_t *bitD, U32 nbBits) |
static size_t | blockSize_explicitDelimiter (const ZSTD_Sequence *inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) |
static size_t | blockSize_noDelimiter (size_t blockSize, size_t remaining) |
static rawSeqStore_t | bufferToSeq (buffer_t buffer) |
static int | construct_BWT (const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) |
static int | construct_BWT_indexes (const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m, unsigned char *num_indexes, int *indexes) |
static void | construct_SA (const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) |
void | COVER_best_destroy (COVER_best_t *best) |
void | COVER_best_finish (COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection) |
void | COVER_best_init (COVER_best_t *best) |
void | COVER_best_start (COVER_best_t *best) |
void | COVER_best_wait (COVER_best_t *best) |
static size_t | COVER_buildDictionary (const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, void *dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters) |
static int | COVER_checkParameters (ZDICT_cover_params_t parameters, size_t maxDictSize) |
size_t | COVER_checkTotalCompressedSize (const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity) |
static int | COVER_cmp (COVER_ctx_t *ctx, const void *lp, const void *rp) |
static int | COVER_cmp8 (COVER_ctx_t *ctx, const void *lp, const void *rp) |
COVER_epoch_info_t | COVER_computeEpochs (U32 maxDictSize, U32 nbDmers, U32 k, U32 passes) |
static void | COVER_ctx_destroy (COVER_ctx_t *ctx) |
static size_t | COVER_ctx_init (COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, unsigned d, double splitPoint) |
COVER_dictSelection_t | COVER_dictSelectionError (size_t error) |
void | COVER_dictSelectionFree (COVER_dictSelection_t selection) |
unsigned | COVER_dictSelectionIsError (COVER_dictSelection_t selection) |
static void | COVER_group (COVER_ctx_t *ctx, const void *group, const void *groupEnd) |
static void | COVER_groupBy (const void *data, size_t count, size_t size, COVER_ctx_t *ctx, int(*cmp)(COVER_ctx_t *, const void *, const void *), void(*grp)(COVER_ctx_t *, const void *, const void *)) |
static const size_t * | COVER_lower_bound (const size_t *first, const size_t *last, size_t value) |
static U32 * | COVER_map_at (COVER_map_t *map, U32 key) |
static void | COVER_map_clear (COVER_map_t *map) |
static void | COVER_map_destroy (COVER_map_t *map) |
static U32 | COVER_map_hash (COVER_map_t *map, U32 key) |
static U32 | COVER_map_index (COVER_map_t *map, U32 key) |
static int | COVER_map_init (COVER_map_t *map, U32 size) |
static void | COVER_map_remove (COVER_map_t *map, U32 key) |
COVER_dictSelection_t | COVER_selectDict (BYTE *customDictContent, size_t dictBufferCapacity, size_t dictContentSize, const BYTE *samplesBuffer, const size_t *samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t *offsets, size_t totalCompressedSize) |
static COVER_segment_t | COVER_selectSegment (const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, U32 begin, U32 end, ZDICT_cover_params_t parameters) |
static int WIN_CDECL | COVER_strict_cmp (const void *lp, const void *rp) |
static int WIN_CDECL | COVER_strict_cmp8 (const void *lp, const void *rp) |
size_t | COVER_sum (const size_t *samplesSizes, unsigned nbSamples) |
static void | COVER_tryParameters (void *opaque) |
void | COVER_warnOnSmallCorpus (size_t maxDictSize, size_t nbDmers, int displayLevel) |
static size_t | determine_blockSize (ZSTD_sequenceFormat_e mode, size_t blockSize, size_t remaining, const ZSTD_Sequence *inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) |
int | divbwt (const unsigned char *T, unsigned char *U, int *A, int n, unsigned char *num_indexes, int *indexes, int openMP) |
int | divsufsort (const unsigned char *T, int *SA, int n, int openMP) |
ERR_STATIC ERR_enum | ERR_getErrorCode (size_t code) |
const ERR_STATIC char * | ERR_getErrorName (size_t code) |
const char * | ERR_getErrorString (ERR_enum code) |
ERR_STATIC unsigned | ERR_isError (size_t code) |
static size_t | FASTCOVER_buildDictionary (const FASTCOVER_ctx_t *ctx, U32 *freqs, void *dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters, U16 *segmentFreqs) |
static int | FASTCOVER_checkParameters (ZDICT_cover_params_t parameters, size_t maxDictSize, unsigned f, unsigned accel) |
static void | FASTCOVER_computeFrequency (U32 *freqs, const FASTCOVER_ctx_t *ctx) |
static void | FASTCOVER_convertToCoverParams (ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t *coverParams) |
static void | FASTCOVER_convertToFastCoverParams (ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t *fastCoverParams, unsigned f, unsigned accel) |
static void | FASTCOVER_ctx_destroy (FASTCOVER_ctx_t *ctx) |
static size_t | FASTCOVER_ctx_init (FASTCOVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, unsigned d, double splitPoint, unsigned f, FASTCOVER_accel_t accelParams) |
static size_t | FASTCOVER_hashPtrToIndex (const void *p, U32 f, unsigned d) |
static COVER_segment_t | FASTCOVER_selectSegment (const FASTCOVER_ctx_t *ctx, U32 *freqs, U32 begin, U32 end, ZDICT_cover_params_t parameters, U16 *segmentFreqs) |
static void | FASTCOVER_tryParameters (void *opaque) |
static syncPoint_t | findSynchronizationPoint (ZSTDMT_CCtx const *mtctx, ZSTD_inBuffer const input) |
MEM_STATIC U32 | FSE_bitCost (const void *symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) |
FSE_PUBLIC_API size_t | FSE_buildCTable (FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) |
size_t | FSE_buildCTable_rle (FSE_CTable *ct, unsigned char symbolValue) |
size_t | FSE_buildCTable_wksp (FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize) |
static size_t | FSE_buildDTable_internal (FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize) |
FSE_PUBLIC_API size_t | FSE_buildDTable_wksp (FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize) |
FSE_PUBLIC_API size_t | FSE_compress_usingCTable (void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct) |
static size_t | FSE_compress_usingCTable_generic (void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast) |
FSE_PUBLIC_API size_t | FSE_compressBound (size_t size) |
static unsigned char | FSE_decodeSymbol (FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
static unsigned char | FSE_decodeSymbolFast (FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
FORCE_INLINE_TEMPLATE size_t | FSE_decompress_usingDTable_generic (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt, const unsigned fast) |
size_t | FSE_decompress_wksp_bmi2 (void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workSpace, size_t wkspSize, int bmi2) |
FORCE_INLINE_TEMPLATE size_t | FSE_decompress_wksp_body (void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workSpace, size_t wkspSize, int bmi2) |
static size_t | FSE_decompress_wksp_body_default (void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workSpace, size_t wkspSize) |
static void | FSE_encodeSymbol (BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol) |
static unsigned | FSE_endOfDState (const FSE_DState_t *DStatePtr) |
static void | FSE_flushCState (BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr) |
const FSE_PUBLIC_API char * | FSE_getErrorName (size_t code) |
MEM_STATIC U32 | FSE_getMaxNbBits (const void *symbolTTPtr, U32 symbolValue) |
static void | FSE_initCState (FSE_CState_t *CStatePtr, const FSE_CTable *ct) |
MEM_STATIC void | FSE_initCState2 (FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol) |
static void | FSE_initDState (FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt) |
FSE_PUBLIC_API unsigned | FSE_isError (size_t code) |
static unsigned | FSE_minTableLog (size_t srcSize, unsigned maxSymbolValue) |
FSE_PUBLIC_API size_t | FSE_NCountWriteBound (unsigned maxSymbolValue, unsigned tableLog) |
FSE_PUBLIC_API size_t | FSE_normalizeCount (short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount) |
static size_t | FSE_normalizeM2 (short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue, short lowProbCount) |
FSE_PUBLIC_API unsigned | FSE_optimalTableLog (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) |
unsigned | FSE_optimalTableLog_internal (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) |
MEM_STATIC BYTE | FSE_peekSymbol (const FSE_DState_t *DStatePtr) |
FSE_PUBLIC_API size_t | FSE_readNCount (short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize) |
FSE_PUBLIC_API size_t | FSE_readNCount_bmi2 (short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize, int bmi2) |
FORCE_INLINE_TEMPLATE size_t | FSE_readNCount_body (short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize) |
static size_t | FSE_readNCount_body_default (short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize) |
MEM_STATIC void | FSE_updateState (FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
FSE_PUBLIC_API unsigned | FSE_versionNumber (void) |
FSE_PUBLIC_API size_t | FSE_writeNCount (void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) |
static size_t | FSE_writeNCount_generic (void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, unsigned writeIsSafe) |
size_t | HIST_count (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) |
static size_t | HIST_count_parallel_wksp (unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, HIST_checkInput_e check, U32 *const workSpace) |
unsigned | HIST_count_simple (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) |
size_t | HIST_count_wksp (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, void *workSpace, size_t workSpaceSize) |
size_t | HIST_countFast (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) |
size_t | HIST_countFast_wksp (unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, void *workSpace, size_t workSpaceSize) |
unsigned | HIST_isError (size_t code) |
FORCE_INLINE_TEMPLATE void | HUF_addBits (HUF_CStream_t *bitC, HUF_CElt elt, int idx, int kFast) |
static void * | HUF_alignUpWorkspace (void *workspace, size_t *workspaceSizePtr, size_t align) |
size_t | HUF_buildCTable_wksp (HUF_CElt *tree, const unsigned *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize) |
static void | HUF_buildCTableFromTree (HUF_CElt *CTable, nodeElt const *huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) |
static HUF_DEltX2 | HUF_buildDEltX2 (U32 symbol, U32 nbBits, U32 baseSeq, int level) |
static U32 | HUF_buildDEltX2U32 (U32 symbol, U32 nbBits, U32 baseSeq, int level) |
static U64 | HUF_buildDEltX2U64 (U32 symbol, U32 nbBits, U16 baseSeq, int level) |
static int | HUF_buildTree (nodeElt *huffNode, U32 maxSymbolValue) |
unsigned | HUF_cardinality (const unsigned *count, unsigned maxSymbolValue) |
static size_t | HUF_closeCStream (HUF_CStream_t *bitC) |
size_t | HUF_compress1X_repeat (void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int flags) |
size_t | HUF_compress1X_usingCTable (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, int flags) |
static size_t | HUF_compress1X_usingCTable_internal (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, const int flags) |
FORCE_INLINE_TEMPLATE size_t | HUF_compress1X_usingCTable_internal_body (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) |
FORCE_INLINE_TEMPLATE void | HUF_compress1X_usingCTable_internal_body_loop (HUF_CStream_t *bitC, const BYTE *ip, size_t srcSize, const HUF_CElt *ct, int kUnroll, int kFastFlush, int kLastFast) |
size_t | HUF_compress4X_repeat (void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int flags) |
size_t | HUF_compress4X_usingCTable (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, int flags) |
static size_t | HUF_compress4X_usingCTable_internal (void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable, int flags) |
static size_t | HUF_compress_internal (void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int flags) |
size_t | HUF_compressBound (size_t size) |
static size_t | HUF_compressCTable_internal (BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, HUF_nbStreams_e nbStreams, const HUF_CElt *CTable, const int flags) |
static size_t | HUF_compressWeights (void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize) |
FORCE_INLINE_TEMPLATE U32 | HUF_decodeLastSymbolX2 (void *op, BIT_DStream_t *DStream, const HUF_DEltX2 *dt, const U32 dtLog) |
HINT_INLINE size_t | HUF_decodeStreamX1 (BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX1 *const dt, const U32 dtLog) |
HINT_INLINE size_t | HUF_decodeStreamX2 (BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog) |
FORCE_INLINE_TEMPLATE BYTE | HUF_decodeSymbolX1 (BIT_DStream_t *Dstream, const HUF_DEltX1 *dt, const U32 dtLog) |
FORCE_INLINE_TEMPLATE U32 | HUF_decodeSymbolX2 (void *op, BIT_DStream_t *DStream, const HUF_DEltX2 *dt, const U32 dtLog) |
size_t | HUF_decompress1X1_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
FORCE_INLINE_TEMPLATE size_t | HUF_decompress1X1_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
size_t | HUF_decompress1X2_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
FORCE_INLINE_TEMPLATE size_t | HUF_decompress1X2_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
size_t | HUF_decompress1X_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
size_t | HUF_decompress1X_usingDTable (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, int flags) |
static size_t | HUF_decompress4X1_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
static size_t | HUF_decompress4X1_usingDTable_internal (void *dst, size_t dstSize, void const *cSrc, size_t cSrcSize, HUF_DTable const *DTable, int flags) |
FORCE_INLINE_TEMPLATE size_t | HUF_decompress4X1_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
static size_t | HUF_decompress4X1_usingDTable_internal_default (void *dst, size_t dstSize, void const *cSrc, size_t cSrcSize, HUF_DTable const *DTable) |
static HUF_FAST_BMI2_ATTRS size_t | HUF_decompress4X1_usingDTable_internal_fast (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, HUF_DecompressFastLoopFn loopFn) |
static HUF_FAST_BMI2_ATTRS void | HUF_decompress4X1_usingDTable_internal_fast_c_loop (HUF_DecompressFastArgs *args) |
static size_t | HUF_decompress4X2_DCtx_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
static size_t | HUF_decompress4X2_usingDTable_internal (void *dst, size_t dstSize, void const *cSrc, size_t cSrcSize, HUF_DTable const *DTable, int flags) |
FORCE_INLINE_TEMPLATE size_t | HUF_decompress4X2_usingDTable_internal_body (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
static size_t | HUF_decompress4X2_usingDTable_internal_default (void *dst, size_t dstSize, void const *cSrc, size_t cSrcSize, HUF_DTable const *DTable) |
static HUF_FAST_BMI2_ATTRS size_t | HUF_decompress4X2_usingDTable_internal_fast (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, HUF_DecompressFastLoopFn loopFn) |
static HUF_FAST_BMI2_ATTRS void | HUF_decompress4X2_usingDTable_internal_fast_c_loop (HUF_DecompressFastArgs *args) |
size_t | HUF_decompress4X_hufOnly_wksp (HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workSpace, size_t wkspSize, int flags) |
size_t | HUF_decompress4X_usingDTable (void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable, int flags) |
static size_t | HUF_DecompressFastArgs_init (HUF_DecompressFastArgs *args, void *dst, size_t dstSize, void const *src, size_t srcSize, const HUF_DTable *DTable) |
static U64 | HUF_DEltX1_set4 (BYTE symbol, BYTE nbBits) |
FORCE_INLINE_TEMPLATE void | HUF_encodeSymbol (HUF_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable, int idx, int fast) |
static HUF_CElt | HUF_endMark (void) |
size_t | HUF_estimateCompressedSize (const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) |
static void | HUF_fillDTableX2 (HUF_DEltX2 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 *rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) |
static void | HUF_fillDTableX2ForWeight (HUF_DEltX2 *DTableRank, sortedSymbol_t const *begin, sortedSymbol_t const *end, U32 nbBits, U32 tableLog, U16 baseSeq, int const level) |
static void | HUF_fillDTableX2Level2 (HUF_DEltX2 *DTable, U32 targetLog, const U32 consumedBits, const U32 *rankVal, const int minWeight, const int maxWeight1, const sortedSymbol_t *sortedSymbols, U32 const *rankStart, U32 nbBitsBaseline, U16 baseSeq) |
FORCE_INLINE_TEMPLATE void | HUF_flushBits (HUF_CStream_t *bitC, int kFast) |
static DTableDesc | HUF_getDTableDesc (const HUF_DTable *table) |
const char * | HUF_getErrorName (size_t code) |
static U32 | HUF_getIndex (U32 const count) |
static size_t | HUF_getNbBits (HUF_CElt elt) |
static size_t | HUF_getNbBitsFast (HUF_CElt elt) |
U32 | HUF_getNbBitsFromCTable (const HUF_CElt *symbolTable, U32 symbolValue) |
static size_t | HUF_getValue (HUF_CElt elt) |
static size_t | HUF_getValueFast (HUF_CElt elt) |
static size_t | HUF_initCStream (HUF_CStream_t *bitC, void *startPtr, size_t dstCapacity) |
static size_t | HUF_initFastDStream (BYTE const *ip) |
static size_t | HUF_initRemainingDStream (BIT_DStream_t *bit, HUF_DecompressFastArgs const *args, int stream, BYTE *segmentEnd) |
HINT_INLINE void | HUF_insertionSort (nodeElt huffNode[], int const low, int const high) |
unsigned | HUF_isError (size_t code) |
MEM_STATIC int | HUF_isSorted (nodeElt huffNode[], U32 const maxSymbolValue1) |
FORCE_INLINE_TEMPLATE void | HUF_mergeIndex1 (HUF_CStream_t *bitC) |
unsigned | HUF_minTableLog (unsigned symbolCardinality) |
unsigned | HUF_optimalTableLog (unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void *workSpace, size_t wkspSize, HUF_CElt *table, const unsigned *count, int flags) |
static int | HUF_quickSortPartition (nodeElt arr[], int const low, int const high) |
size_t | HUF_readCTable (HUF_CElt *CTable, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *hasZeroWeights) |
size_t | HUF_readDTableX1_wksp (HUF_DTable *DTable, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int flags) |
size_t | HUF_readDTableX2_wksp (HUF_DTable *DTable, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int flags) |
size_t | HUF_readStats (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | HUF_readStats_body (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workSpace, size_t wkspSize, int bmi2) |
static size_t | HUF_readStats_body_default (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workSpace, size_t wkspSize) |
size_t | HUF_readStats_wksp (BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t wkspSize, int flags) |
static U32 | HUF_rescaleStats (BYTE *huffWeight, U32 *rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog) |
U32 | HUF_selectDecoder (size_t dstSize, size_t cSrcSize) |
static U32 | HUF_setMaxHeight (nodeElt *huffNode, U32 lastNonNull, U32 targetNbBits) |
static void | HUF_setNbBits (HUF_CElt *elt, size_t nbBits) |
static void | HUF_setValue (HUF_CElt *elt, size_t value) |
static void | HUF_simpleQuickSort (nodeElt arr[], int low, int high) |
static void | HUF_sort (nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) |
static void | HUF_swapNodes (nodeElt *a, nodeElt *b) |
static size_t | HUF_tightCompressBound (size_t srcSize, size_t tableLog) |
int | HUF_validateCTable (const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) |
size_t | HUF_writeCTable_wksp (void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize) |
FORCE_INLINE_TEMPLATE void | HUF_zeroIndex1 (HUF_CStream_t *bitC) |
static ZSTD_inBuffer | inBuffer_forEndFlush (const ZSTD_CStream *zcs) |
static int | isIncluded (const void *in, const void *container, size_t length) |
static int | isQueueFull (POOL_ctx const *ctx) |
static rawSeq | maybeSplitSequence (rawSeqStore_t *rawSeqStore, U32 const remaining, U32 const minMatch) |
MEM_STATIC unsigned | MEM_32bits (void) |
MEM_STATIC unsigned | MEM_64bits (void) |
MEM_STATIC void | MEM_check (void) |
MEM_STATIC unsigned | MEM_isLittleEndian (void) |
MEM_STATIC U16 | MEM_read16 (const void *memPtr) |
MEM_STATIC U32 | MEM_read32 (const void *memPtr) |
MEM_STATIC U64 | MEM_read64 (const void *memPtr) |
MEM_STATIC U32 | MEM_readBE32 (const void *memPtr) |
MEM_STATIC U64 | MEM_readBE64 (const void *memPtr) |
MEM_STATIC size_t | MEM_readBEST (const void *memPtr) |
MEM_STATIC U16 | MEM_readLE16 (const void *memPtr) |
MEM_STATIC U32 | MEM_readLE24 (const void *memPtr) |
MEM_STATIC U32 | MEM_readLE32 (const void *memPtr) |
MEM_STATIC U64 | MEM_readLE64 (const void *memPtr) |
MEM_STATIC size_t | MEM_readLEST (const void *memPtr) |
MEM_STATIC size_t | MEM_readST (const void *memPtr) |
MEM_STATIC U32 | MEM_swap32 (U32 in) |
MEM_STATIC U32 | MEM_swap32_fallback (U32 in) |
MEM_STATIC U64 | MEM_swap64 (U64 in) |
MEM_STATIC U64 | MEM_swap64_fallback (U64 in) |
MEM_STATIC size_t | MEM_swapST (size_t in) |
MEM_STATIC void | MEM_write16 (void *memPtr, U16 value) |
MEM_STATIC void | MEM_write32 (void *memPtr, U32 value) |
MEM_STATIC void | MEM_write64 (void *memPtr, U64 value) |
MEM_STATIC void | MEM_writeBE32 (void *memPtr, U32 val32) |
MEM_STATIC void | MEM_writeBE64 (void *memPtr, U64 val64) |
MEM_STATIC void | MEM_writeBEST (void *memPtr, size_t val) |
MEM_STATIC void | MEM_writeLE16 (void *memPtr, U16 val) |
MEM_STATIC void | MEM_writeLE24 (void *memPtr, U32 val) |
MEM_STATIC void | MEM_writeLE32 (void *memPtr, U32 val32) |
MEM_STATIC void | MEM_writeLE64 (void *memPtr, U64 val64) |
MEM_STATIC void | MEM_writeLEST (void *memPtr, size_t val) |
void | POOL_add (POOL_ctx *ctx, POOL_function function, void *opaque) |
static void | POOL_add_internal (POOL_ctx *ctx, POOL_function function, void *opaque) |
POOL_ctx * | POOL_create (size_t numThreads, size_t queueSize) |
POOL_ctx * | POOL_create_advanced (size_t numThreads, size_t queueSize, ZSTD_customMem customMem) |
void | POOL_free (POOL_ctx *ctx) |
static void | POOL_join (POOL_ctx *ctx) |
void | POOL_joinJobs (POOL_ctx *ctx) |
int | POOL_resize (POOL_ctx *ctx, size_t numThreads) |
static int | POOL_resize_internal (POOL_ctx *ctx, size_t numThreads) |
size_t | POOL_sizeof (const POOL_ctx *ctx) |
static void * | POOL_thread (void *opaque) |
int | POOL_tryAdd (POOL_ctx *ctx, POOL_function function, void *opaque) |
static size_t | readSkippableFrameSize (void const *src, size_t srcSize) |
static buffer_t | seqToBuffer (rawSeqStore_t seq) |
static COVER_dictSelection_t | setDictSelection (BYTE *buf, size_t s, size_t csz) |
static int | sort_typeBstar (const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int openMP) |
static INLINE void | ss_blockswap (int *a, int *b, int n) |
static INLINE int | ss_compare (const unsigned char *T, const int *p1, const int *p2, int depth) |
static void | ss_inplacemerge (const unsigned char *T, const int *PA, int *first, int *middle, int *last, int depth) |
static void | ss_insertionsort (const unsigned char *T, const int *PA, int *first, int *last, int depth) |
static INLINE int | ss_isqrt (int x) |
static void | ss_mergebackward (const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) |
static void | ss_mergeforward (const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) |
static INLINE void | ss_rotate (int *first, int *middle, int *last) |
static void | ss_swapmerge (const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int bufsize, int depth) |
static void | sssort (const unsigned char *T, const int *PA, int *first, int *last, int *buf, int bufsize, int depth, int n, int lastsuffix) |
static U32 | sum_u32 (const unsigned table[], size_t nbElts) |
static void | tr_copy (int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) |
static INLINE void | tr_fixdown (const int *ISAd, int *SA, int i, int size) |
static void | tr_heapsort (const int *ISAd, int *SA, int size) |
static INLINE int | tr_ilg (int n) |
static void | tr_insertionsort (const int *ISAd, int *first, int *last) |
static void | tr_introsort (int *ISA, const int *ISAd, int *SA, int *first, int *last, trbudget_t *budget) |
static INLINE int * | tr_median3 (const int *ISAd, int *v1, int *v2, int *v3) |
static INLINE int * | tr_median5 (const int *ISAd, int *v1, int *v2, int *v3, int *v4, int *v5) |
static void | tr_partialcopy (int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) |
static INLINE void | tr_partition (const int *ISAd, int *first, int *middle, int *last, int **pa, int **pb, int v) |
static INLINE int * | tr_pivot (const int *ISAd, int *first, int *last) |
static INLINE int | trbudget_check (trbudget_t *budget, int size) |
static INLINE void | trbudget_init (trbudget_t *budget, int chance, int incval) |
static void | trsort (int *ISA, int *SA, int n, int depth) |
static void | writeBlockHeader (void *op, size_t cSize, size_t blockSize, U32 lastBlock) |
XXH_PUBLIC_API XXH32_hash_t | XXH32 (const void *input, size_t length, XXH32_hash_t seed) |
Calculates the 32-bit hash of input using xxHash32. More... | |
static xxh_u32 | XXH32_avalanche (xxh_u32 h32) |
XXH_PUBLIC_API void | XXH32_canonicalFromHash (XXH32_canonical_t *dst, XXH32_hash_t hash) |
Converts an XXH32_hash_t to a big endian XXH32_canonical_t. More... | |
XXH_PUBLIC_API void | XXH32_copyState (XXH32_state_t *dst_state, const XXH32_state_t *src_state) |
Copies one XXH32_state_t to another. More... | |
XXH_PUBLIC_API XXH32_hash_t | XXH32_digest (const XXH32_state_t *statePtr) |
Returns the calculated hash value from an XXH32_state_t. More... | |
XXH_FORCE_INLINE xxh_u32 | XXH32_endian_align (const xxh_u8 *input, size_t len, xxh_u32 seed, XXH_alignment align) |
static xxh_u32 | XXH32_finalize (xxh_u32 h32, const xxh_u8 *ptr, size_t len, XXH_alignment align) |
XXH_PUBLIC_API XXH_errorcode | XXH32_freeState (XXH32_state_t *statePtr) |
Frees an XXH32_state_t. More... | |
XXH_PUBLIC_API XXH32_hash_t | XXH32_hashFromCanonical (const XXH32_canonical_t *src) |
Converts an XXH32_canonical_t to a native XXH32_hash_t. More... | |
XXH_PUBLIC_API XXH_errorcode | XXH32_reset (XXH32_state_t *statePtr, XXH32_hash_t seed) |
Resets an XXH32_state_t to begin a new hash. More... | |
static xxh_u32 | XXH32_round (xxh_u32 acc, xxh_u32 input) |
XXH_PUBLIC_API XXH_errorcode | XXH32_update (XXH32_state_t *statePtr, const void *input, size_t length) |
Consumes a block of input to an XXH32_state_t. More... | |
XXH_PUBLIC_API XXH64_hash_t | XXH64 (const void *input, size_t length, XXH64_hash_t seed) |
Calculates the 64-bit hash of input using xxHash64. More... | |
static xxh_u64 | XXH64_avalanche (xxh_u64 h64) |
XXH_PUBLIC_API void | XXH64_canonicalFromHash (XXH64_canonical_t *dst, XXH64_hash_t hash) |
XXH_PUBLIC_API void | XXH64_copyState (XXH64_state_t *dst_state, const XXH64_state_t *src_state) |
XXH_PUBLIC_API XXH64_hash_t | XXH64_digest (const XXH64_state_t *statePtr) |
XXH_FORCE_INLINE xxh_u64 | XXH64_endian_align (const xxh_u8 *input, size_t len, xxh_u64 seed, XXH_alignment align) |
static xxh_u64 | XXH64_finalize (xxh_u64 h64, const xxh_u8 *ptr, size_t len, XXH_alignment align) |
XXH_PUBLIC_API XXH_errorcode | XXH64_freeState (XXH64_state_t *statePtr) |
XXH_PUBLIC_API XXH64_hash_t | XXH64_hashFromCanonical (const XXH64_canonical_t *src) |
static xxh_u64 | XXH64_mergeRound (xxh_u64 acc, xxh_u64 val) |
XXH_PUBLIC_API XXH_errorcode | XXH64_reset (XXH64_state_t *statePtr, XXH64_hash_t seed) |
static xxh_u64 | XXH64_round (xxh_u64 acc, xxh_u64 input) |
XXH_PUBLIC_API XXH_errorcode | XXH64_update (XXH64_state_t *statePtr, const void *input, size_t length) |
static void | XXH_free (void *p) |
static int | XXH_isLittleEndian (void) |
static void * | XXH_malloc (size_t s) |
static void * | XXH_memcpy (void *dest, const void *src, size_t size) |
static xxh_u32 | XXH_read32 (const void *memPtr) |
static xxh_u32 | XXH_readBE32 (const void *ptr) |
XXH_FORCE_INLINE xxh_u32 | XXH_readLE32 (const void *ptr) |
XXH_FORCE_INLINE xxh_u32 | XXH_readLE32_align (const void *ptr, XXH_alignment align) |
static xxh_u32 | XXH_swap32 (xxh_u32 x) |
ZDICTLIB_STATIC_API size_t | ZDICT_addEntropyTablesFromBuffer (void *dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples) |
static size_t | ZDICT_addEntropyTablesFromBuffer_advanced (void *dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_params_t params) |
static size_t | ZDICT_analyzeEntropy (void *dstBuffer, size_t maxDstSize, int compressionLevel, const void *srcBuffer, const size_t *fileSizes, unsigned nbFiles, const void *dictBuffer, size_t dictBufferSize, unsigned notificationLevel) |
static dictItem | ZDICT_analyzePos (BYTE *doneMarks, const int *suffix, U32 start, const void *buffer, U32 minRatio, U32 notificationLevel) |
static clock_t | ZDICT_clockSpan (clock_t nPrevious) |
static size_t | ZDICT_count (const void *pIn, const void *pMatch) |
static void | ZDICT_countEStats (EStats_ress_t esr, const ZSTD_parameters *params, unsigned *countLit, unsigned *offsetcodeCount, unsigned *matchlengthCount, unsigned *litlengthCount, U32 *repOffsets, const void *src, size_t srcSize, U32 notificationLevel) |
static U32 | ZDICT_dictSize (const dictItem *dictList) |
static void | ZDICT_fillNoise (void *buffer, size_t length) |
ZDICTLIB_API size_t | ZDICT_finalizeDictionary (void *dstDictBuffer, size_t maxDictSize, const void *dictContent, size_t dictContentSize, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_params_t parameters) |
static void | ZDICT_flatLit (unsigned *countLit) |
ZDICTLIB_API size_t | ZDICT_getDictHeaderSize (const void *dictBuffer, size_t dictSize) |
ZDICTLIB_API unsigned | ZDICT_getDictID (const void *dictBuffer, size_t dictSize) |
const ZDICTLIB_API char * | ZDICT_getErrorName (size_t errorCode) |
static void | ZDICT_initDictItem (dictItem *d) |
static void | ZDICT_insertDictItem (dictItem *table, U32 maxSize, dictItem elt, const void *buffer) |
static void | ZDICT_insertSortCount (offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count) |
ZDICTLIB_API unsigned | ZDICT_isError (size_t errorCode) |
static U32 | ZDICT_maxRep (U32 const reps[ZSTD_REP_NUM]) |
ZDICTLIB_STATIC_API size_t | ZDICT_optimizeTrainFromBuffer_cover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t *parameters) |
ZDICTLIB_STATIC_API size_t | ZDICT_optimizeTrainFromBuffer_fastCover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t *parameters) |
static void | ZDICT_printHex (const void *ptr, size_t length) |
static void | ZDICT_removeDictItem (dictItem *table, U32 id) |
static size_t | ZDICT_totalSampleSize (const size_t *fileSizes, unsigned nbFiles) |
static size_t | ZDICT_trainBuffer_legacy (dictItem *dictList, U32 dictListSize, const void *const buffer, size_t bufferSize, const size_t *fileSizes, unsigned nbFiles, unsigned minRatio, U32 notificationLevel) |
ZDICTLIB_API size_t | ZDICT_trainFromBuffer (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples) |
ZDICTLIB_STATIC_API size_t | ZDICT_trainFromBuffer_cover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) |
ZDICTLIB_STATIC_API size_t | ZDICT_trainFromBuffer_fastCover (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters) |
ZDICTLIB_STATIC_API size_t | ZDICT_trainFromBuffer_legacy (void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters) |
static size_t | ZDICT_trainFromBuffer_unsafe_legacy (void *dictBuffer, size_t maxDictSize, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) |
static U32 | ZDICT_tryMerge (dictItem *table, dictItem elt, U32 eltNbToSkip, const void *buffer) |
ZSTD_compressionParameters | ZSTD_adjustCParams (ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) |
static ZSTD_compressionParameters | ZSTD_adjustCParams_internal (ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, ZSTD_cParamMode_e mode, ZSTD_paramSwitch_e useRowMatchFinder) |
static void | ZSTD_advanceHashSalt (ZSTD_matchState_t *ms) |
static int | ZSTD_allocateChainTable (const ZSTD_strategy strategy, const ZSTD_paramSwitch_e useRowMatchFinder, const U32 forDDSDict) |
static void | ZSTD_allocateLiteralsBuffer (ZSTD_DCtx *dctx, void *const dst, const size_t dstCapacity, const size_t litSize, const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) |
static void | ZSTD_assertEqualCParams (ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) |
static U64 | ZSTD_bitmix (U64 val, U64 len) |
MEM_STATIC U32 | ZSTD_bitWeight (U32 stat) |
static int | ZSTD_blockSplitterEnabled (ZSTD_CCtx_params *cctxParams) |
static void | ZSTD_blockState_confirmRepcodesAndEntropyTables (ZSTD_blockState_t *const bs) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_BtFindBestMatch (ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offBasePtr, const U32 mls, const ZSTD_dictMode_e dictMode) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_btGetAllMatches_internal (ZSTD_match_t *matches, ZSTD_matchState_t *ms, U32 *nextToUpdate3, const BYTE *ip, const BYTE *const iHighLimit, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat, const ZSTD_dictMode_e dictMode, const U32 mls) |
size_t | ZSTD_buildBlockEntropyStats (const seqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, ZSTD_entropyCTablesMetadata_t *entropyMetadata, void *workspace, size_t wkspSize) |
static size_t | ZSTD_buildBlockEntropyStats_literals (void *const src, size_t srcSize, const ZSTD_hufCTables_t *prevHuf, ZSTD_hufCTables_t *nextHuf, ZSTD_hufCTablesMetadata_t *hufMetadata, const int literalsCompressionIsDisabled, void *workspace, size_t wkspSize, int hufFlags) |
static size_t | ZSTD_buildBlockEntropyStats_sequences (const seqStore_t *seqStorePtr, const ZSTD_fseCTables_t *prevEntropy, ZSTD_fseCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, ZSTD_fseCTablesMetadata_t *fseMetadata, void *workspace, size_t wkspSize) |
size_t | ZSTD_buildCTable (void *dst, size_t dstCapacity, FSE_CTable *nextCTable, U32 FSELog, symbolEncodingType_e type, unsigned *count, U32 max, const BYTE *codeTable, size_t nbSeq, const S16 *defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable *prevCTable, size_t prevCTableSize, void *entropyWorkspace, size_t entropyWorkspaceSize) |
static ZSTD_symbolEncodingTypeStats_t | ZSTD_buildDummySequencesStatistics (ZSTD_fseCTables_t *nextEntropy) |
static size_t | ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize (seqStore_t *seqStore, ZSTD_CCtx *zc) |
void | ZSTD_buildFSETable (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U8 *nbAdditionalBits, unsigned tableLog, void *wksp, size_t wkspSize, int bmi2) |
FORCE_INLINE_TEMPLATE void | ZSTD_buildFSETable_body (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U8 *nbAdditionalBits, unsigned tableLog, void *wksp, size_t wkspSize) |
static void | ZSTD_buildFSETable_body_default (ZSTD_seqSymbol *dt, const short *normalizedCounter, unsigned maxSymbolValue, const U32 *baseValue, const U8 *nbAdditionalBits, unsigned tableLog, void *wksp, size_t wkspSize) |
static size_t | ZSTD_buildSeqStore (ZSTD_CCtx *zc, const void *src, size_t srcSize) |
static size_t | ZSTD_buildSeqTable (ZSTD_seqSymbol *DTableSpace, const ZSTD_seqSymbol **DTablePtr, symbolEncodingType_e type, unsigned max, U32 maxLog, const void *src, size_t srcSize, const U32 *baseValue, const U8 *nbAdditionalBits, const ZSTD_seqSymbol *defaultTable, U32 flagRepeatTable, int ddictIsCold, int nbSeq, U32 *wksp, size_t wkspSize, int bmi2) |
static void | ZSTD_buildSeqTable_rle (ZSTD_seqSymbol *dt, U32 baseValue, U8 nbAddBits) |
static ZSTD_symbolEncodingTypeStats_t | ZSTD_buildSequencesStatistics (const seqStore_t *seqStorePtr, size_t nbSeq, const ZSTD_fseCTables_t *prevEntropy, ZSTD_fseCTables_t *nextEntropy, BYTE *dst, const BYTE *const dstEnd, ZSTD_strategy strategy, unsigned *countWorkspace, void *entropyWorkspace, size_t entropyWkspSize) |
size_t | ZSTD_CCtx_getParameter (ZSTD_CCtx const *cctx, ZSTD_cParameter param, int *value) |
static size_t | ZSTD_CCtx_init_compressStream2 (ZSTD_CCtx *cctx, ZSTD_EndDirective endOp, size_t inSize) |
size_t | ZSTD_CCtx_loadDictionary (ZSTD_CCtx *cctx, const void *dict, size_t dictSize) |
size_t | ZSTD_CCtx_loadDictionary_advanced (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) |
size_t | ZSTD_CCtx_loadDictionary_byReference (ZSTD_CCtx *cctx, const void *dict, size_t dictSize) |
size_t | ZSTD_CCtx_refCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict) |
size_t | ZSTD_CCtx_refPrefix (ZSTD_CCtx *cctx, const void *prefix, size_t prefixSize) |
size_t | ZSTD_CCtx_refPrefix_advanced (ZSTD_CCtx *cctx, const void *prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) |
size_t | ZSTD_CCtx_refThreadPool (ZSTD_CCtx *cctx, ZSTD_threadPool *pool) |
size_t | ZSTD_CCtx_reset (ZSTD_CCtx *cctx, ZSTD_ResetDirective reset) |
size_t | ZSTD_CCtx_setCParams (ZSTD_CCtx *cctx, ZSTD_compressionParameters cparams) |
size_t | ZSTD_CCtx_setFParams (ZSTD_CCtx *cctx, ZSTD_frameParameters fparams) |
size_t | ZSTD_CCtx_setParameter (ZSTD_CCtx *cctx, ZSTD_cParameter param, int value) |
size_t | ZSTD_CCtx_setParametersUsingCCtxParams (ZSTD_CCtx *cctx, const ZSTD_CCtx_params *params) |
size_t | ZSTD_CCtx_setParams (ZSTD_CCtx *cctx, ZSTD_parameters params) |
size_t | ZSTD_CCtx_setPledgedSrcSize (ZSTD_CCtx *cctx, unsigned long long pledgedSrcSize) |
void | ZSTD_CCtx_trace (ZSTD_CCtx *cctx, size_t extraCSize) |
size_t | ZSTD_CCtxParams_getParameter (ZSTD_CCtx_params const *CCtxParams, ZSTD_cParameter param, int *value) |
size_t | ZSTD_CCtxParams_init (ZSTD_CCtx_params *cctxParams, int compressionLevel) |
size_t | ZSTD_CCtxParams_init_advanced (ZSTD_CCtx_params *cctxParams, ZSTD_parameters params) |
static void | ZSTD_CCtxParams_init_internal (ZSTD_CCtx_params *cctxParams, const ZSTD_parameters *params, int compressionLevel) |
size_t | ZSTD_CCtxParams_reset (ZSTD_CCtx_params *params) |
size_t | ZSTD_CCtxParams_setParameter (ZSTD_CCtx_params *CCtxParams, ZSTD_cParameter param, int value) |
static void | ZSTD_CCtxParams_setZstdParams (ZSTD_CCtx_params *cctxParams, const ZSTD_parameters *params) |
static int | ZSTD_CDictIndicesAreTagged (const ZSTD_compressionParameters *const cParams) |
static size_t | ZSTD_checkBufferStability (ZSTD_CCtx const *cctx, ZSTD_outBuffer const *output, ZSTD_inBuffer const *input, ZSTD_EndDirective endOp) |
void | ZSTD_checkContinuity (ZSTD_DCtx *dctx, const void *dst, size_t dstSize) |
size_t | ZSTD_checkCParams (ZSTD_compressionParameters cParams) |
MEM_STATIC void | ZSTD_checkDictValidity (const ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_matchState_t **dictMatchStatePtr) |
static size_t | ZSTD_checkOutBuffer (ZSTD_DStream const *zds, ZSTD_outBuffer const *output) |
static ZSTD_compressionParameters | ZSTD_clampCParams (ZSTD_compressionParameters cParams) |
static void | ZSTD_clearAllDicts (ZSTD_CCtx *cctx) |
static void | ZSTD_clearDict (ZSTD_DCtx *dctx) |
MEM_STATIC int | ZSTD_comparePackedTags (size_t packedTag1, size_t packedTag2) |
size_t | ZSTD_compress (void *dst, size_t dstCapacity, const void *src, size_t srcSize, int compressionLevel) |
size_t | ZSTD_compress2 (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_compress_advanced (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, ZSTD_parameters params) |
size_t | ZSTD_compress_advanced_internal (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, const ZSTD_CCtx_params *params) |
static size_t | ZSTD_compress_frameChunk (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk) |
static size_t | ZSTD_compress_insertDictionary (ZSTD_compressedBlockState_t *bs, ZSTD_matchState_t *ms, ldmState_t *ls, ZSTD_cwksp *ws, const ZSTD_CCtx_params *params, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void *workspace) |
size_t | ZSTD_compress_usingCDict (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict) |
size_t | ZSTD_compress_usingCDict_advanced (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict, ZSTD_frameParameters fParams) |
static size_t | ZSTD_compress_usingCDict_internal (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict, ZSTD_frameParameters fParams) |
size_t | ZSTD_compress_usingDict (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, int compressionLevel) |
size_t | ZSTD_compressBegin (ZSTD_CCtx *cctx, int compressionLevel) |
size_t | ZSTD_compressBegin_advanced (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) |
size_t | ZSTD_compressBegin_advanced_internal (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize) |
static size_t | ZSTD_compressBegin_internal (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) |
size_t | ZSTD_compressBegin_usingCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict) |
size_t | ZSTD_compressBegin_usingCDict_advanced (ZSTD_CCtx *const cctx, const ZSTD_CDict *const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) |
size_t | ZSTD_compressBegin_usingCDict_deprecated (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict) |
static size_t | ZSTD_compressBegin_usingCDict_internal (ZSTD_CCtx *const cctx, const ZSTD_CDict *const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) |
size_t | ZSTD_compressBegin_usingDict (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel) |
static size_t | ZSTD_compressBegin_usingDict_deprecated (ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel) |
size_t | ZSTD_compressBlock (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btlazy2 (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btlazy2_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btlazy2_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btopt (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btopt_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btopt_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btultra (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btultra2 (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btultra_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_btultra_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_deprecated (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_compressBlock_doubleFast (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_doubleFast_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_doubleFast_dictMatchState_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls) |
size_t | ZSTD_compressBlock_doubleFast_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
static size_t | ZSTD_compressBlock_doubleFast_extDict_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_doubleFast_noDict_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls) |
size_t | ZSTD_compressBlock_fast (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_fast_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_fast_dictMatchState_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep) |
size_t | ZSTD_compressBlock_fast_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
static size_t | ZSTD_compressBlock_fast_extDict_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_fast_noDict_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep) |
size_t | ZSTD_compressBlock_greedy (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_dedicatedDictSearch (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_dedicatedDictSearch_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_dictMatchState_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_extDict_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_greedy_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
static size_t | ZSTD_compressBlock_internal (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame) |
size_t | ZSTD_compressBlock_lazy (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2 (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_dedicatedDictSearch (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_dedicatedDictSearch_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_dictMatchState_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_extDict_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy2_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy_dedicatedDictSearch (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy_dedicatedDictSearch_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy_dictMatchState (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy_dictMatchState_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
size_t | ZSTD_compressBlock_lazy_extDict (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_lazy_extDict_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth) |
size_t | ZSTD_compressBlock_lazy_extDict_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_lazy_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth, ZSTD_dictMode_e const dictMode) |
size_t | ZSTD_compressBlock_lazy_row (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
static size_t | ZSTD_compressBlock_opt0 (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const ZSTD_dictMode_e dictMode) |
static size_t | ZSTD_compressBlock_opt2 (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const ZSTD_dictMode_e dictMode) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_compressBlock_opt_generic (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize, const int optLevel, const ZSTD_dictMode_e dictMode) |
static size_t | ZSTD_compressBlock_splitBlock (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock) |
static size_t | ZSTD_compressBlock_splitBlock_internal (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t blockSize, U32 lastBlock, U32 nbSeq) |
static size_t | ZSTD_compressBlock_targetCBlockSize (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock) |
static size_t | ZSTD_compressBlock_targetCBlockSize_body (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const size_t bss, U32 lastBlock) |
size_t | ZSTD_compressBound (size_t srcSize) |
size_t | ZSTD_compressCCtx (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, int compressionLevel) |
size_t | ZSTD_compressContinue (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static size_t | ZSTD_compressContinue_internal (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk) |
size_t | ZSTD_compressContinue_public (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static int | ZSTD_compressedLiterals (optState_t const *const optPtr) |
size_t | ZSTD_compressEnd (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_compressEnd_public (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_compressLiterals (void *dst, size_t dstCapacity, const void *src, size_t srcSize, void *entropyWorkspace, size_t entropyWorkspaceSize, const ZSTD_hufCTables_t *prevHuf, ZSTD_hufCTables_t *nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, int suspectUncompressible, int bmi2) |
size_t | ZSTD_compressRleLiteralsBlock (void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static size_t | ZSTD_compressSeqStore_singleBlock (ZSTD_CCtx *zc, const seqStore_t *const seqStore, repcodes_t *const dRep, repcodes_t *const cRep, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock, U32 isPartition) |
size_t | ZSTD_compressSequences (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const ZSTD_Sequence *inSeqs, size_t inSeqsSize, const void *src, size_t srcSize) |
static size_t | ZSTD_compressSequences_internal (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const ZSTD_Sequence *inSeqs, size_t inSeqsSize, const void *src, size_t srcSize) |
size_t | ZSTD_compressStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input) |
size_t | ZSTD_compressStream2 (ZSTD_CCtx *cctx, ZSTD_outBuffer *output, ZSTD_inBuffer *input, ZSTD_EndDirective endOp) |
size_t | ZSTD_compressStream2_simpleArgs (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, size_t *dstPos, const void *src, size_t srcSize, size_t *srcPos, ZSTD_EndDirective endOp) |
static size_t | ZSTD_compressStream_generic (ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input, ZSTD_EndDirective const flushMode) |
static size_t | ZSTD_compressSubBlock (const ZSTD_entropyCTables_t *entropy, const ZSTD_entropyCTablesMetadata_t *entropyMetadata, const seqDef *sequences, size_t nbSeq, const BYTE *literals, size_t litSize, const BYTE *llCode, const BYTE *mlCode, const BYTE *ofCode, const ZSTD_CCtx_params *cctxParams, void *dst, size_t dstCapacity, const int bmi2, int writeLitEntropy, int writeSeqEntropy, int *litEntropyWritten, int *seqEntropyWritten, U32 lastBlock) |
static size_t | ZSTD_compressSubBlock_literal (const HUF_CElt *hufTable, const ZSTD_hufCTablesMetadata_t *hufMetadata, const BYTE *literals, size_t litSize, void *dst, size_t dstSize, const int bmi2, int writeEntropy, int *entropyWritten) |
static size_t | ZSTD_compressSubBlock_multi (const seqStore_t *seqStorePtr, const ZSTD_compressedBlockState_t *prevCBlock, ZSTD_compressedBlockState_t *nextCBlock, const ZSTD_entropyCTablesMetadata_t *entropyMetadata, const ZSTD_CCtx_params *cctxParams, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const int bmi2, U32 lastBlock, void *workspace, size_t wkspSize) |
static size_t | ZSTD_compressSubBlock_sequences (const ZSTD_fseCTables_t *fseTables, const ZSTD_fseCTablesMetadata_t *fseMetadata, const seqDef *sequences, size_t nbSeq, const BYTE *llCode, const BYTE *mlCode, const BYTE *ofCode, const ZSTD_CCtx_params *cctxParams, void *dst, size_t dstCapacity, const int bmi2, int writeEntropy, int *entropyWritten) |
size_t | ZSTD_compressSuperBlock (ZSTD_CCtx *zc, void *dst, size_t dstCapacity, void const *src, size_t srcSize, unsigned lastBlock) |
static void | ZSTD_copy16 (void *dst, const void *src) |
static void | ZSTD_copy4 (void *dst, const void *src) |
static void | ZSTD_copy8 (void *dst, const void *src) |
static void | ZSTD_copyBlockSequences (ZSTD_CCtx *zc) |
size_t | ZSTD_copyCCtx (ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize) |
static size_t | ZSTD_copyCCtx_internal (ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, ZSTD_frameParameters fParams, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) |
static void | ZSTD_copyCDictTableIntoCCtx (U32 *dst, U32 const *src, size_t tableSize, ZSTD_compressionParameters const *cParams) |
void | ZSTD_copyDCtx (ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx) |
void | ZSTD_copyDDictParameters (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict) |
static size_t | ZSTD_copyRawBlock (void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_copySequencesToSeqStoreExplicitBlockDelim (ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) |
size_t | ZSTD_copySequencesToSeqStoreNoBlockDelim (ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) |
MEM_STATIC size_t | ZSTD_count (const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit) |
MEM_STATIC size_t | ZSTD_count_2segments (const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart) |
MEM_STATIC unsigned | ZSTD_countLeadingZeros32 (U32 val) |
MEM_STATIC unsigned | ZSTD_countLeadingZeros32_fallback (U32 val) |
MEM_STATIC unsigned | ZSTD_countLeadingZeros64 (U64 val) |
static size_t | ZSTD_countSeqStoreLiteralsBytes (const seqStore_t *const seqStore) |
static size_t | ZSTD_countSeqStoreMatchBytes (const seqStore_t *const seqStore) |
MEM_STATIC unsigned | ZSTD_countTrailingZeros32 (U32 val) |
MEM_STATIC unsigned | ZSTD_countTrailingZeros32_fallback (U32 val) |
MEM_STATIC unsigned | ZSTD_countTrailingZeros64 (U64 val) |
static size_t | ZSTD_cParam_clampBounds (ZSTD_cParameter cParam, int *value) |
ZSTD_bounds | ZSTD_cParam_getBounds (ZSTD_cParameter param) |
MEM_STATIC int | ZSTD_cParam_withinBounds (ZSTD_cParameter cParam, int value) |
MEM_STATIC ZSTD_cpuid_t | ZSTD_cpuid (void) |
MEM_STATIC int | ZSTD_cpuSupportsBmi2 (void) |
ZSTD_CCtx * | ZSTD_createCCtx (void) |
ZSTD_CCtx * | ZSTD_createCCtx_advanced (ZSTD_customMem customMem) |
ZSTD_CCtx_params * | ZSTD_createCCtxParams (void) |
static ZSTD_CCtx_params * | ZSTD_createCCtxParams_advanced (ZSTD_customMem customMem) |
ZSTD_CDict * | ZSTD_createCDict (const void *dict, size_t dictSize, int compressionLevel) |
ZSTD_CDict * | ZSTD_createCDict_advanced (const void *dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) |
ZSTD_CDict * | ZSTD_createCDict_advanced2 (const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params *originalCctxParams, ZSTD_customMem customMem) |
static ZSTD_CDict * | ZSTD_createCDict_advanced_internal (size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, ZSTD_paramSwitch_e useRowMatchFinder, U32 enableDedicatedDictSearch, ZSTD_customMem customMem) |
ZSTD_CDict * | ZSTD_createCDict_byReference (const void *dict, size_t dictSize, int compressionLevel) |
ZSTD_CStream * | ZSTD_createCStream (void) |
ZSTD_CStream * | ZSTD_createCStream_advanced (ZSTD_customMem customMem) |
ZSTD_DCtx * | ZSTD_createDCtx (void) |
ZSTD_DCtx * | ZSTD_createDCtx_advanced (ZSTD_customMem customMem) |
static ZSTD_DCtx * | ZSTD_createDCtx_internal (ZSTD_customMem customMem) |
ZSTD_DDict * | ZSTD_createDDict (const void *dict, size_t dictSize) |
ZSTD_DDict * | ZSTD_createDDict_advanced (const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) |
ZSTD_DDict * | ZSTD_createDDict_byReference (const void *dictBuffer, size_t dictSize) |
static ZSTD_DDictHashSet * | ZSTD_createDDictHashSet (ZSTD_customMem customMem) |
ZSTD_DStream * | ZSTD_createDStream (void) |
ZSTD_DStream * | ZSTD_createDStream_advanced (ZSTD_customMem customMem) |
POOL_ctx * | ZSTD_createThreadPool (size_t numThreads) |
size_t | ZSTD_crossEntropyCost (short const *norm, unsigned accuracyLog, unsigned const *count, unsigned const max) |
size_t | ZSTD_CStreamInSize (void) |
size_t | ZSTD_CStreamOutSize (void) |
MEM_STATIC void * | ZSTD_customCalloc (size_t size, ZSTD_customMem customMem) |
MEM_STATIC void | ZSTD_customFree (void *ptr, ZSTD_customMem customMem) |
MEM_STATIC void * | ZSTD_customMalloc (size_t size, ZSTD_customMem customMem) |
MEM_STATIC size_t | ZSTD_cwksp_align (size_t size, size_t const align) |
MEM_STATIC size_t | ZSTD_cwksp_aligned_alloc_size (size_t size) |
MEM_STATIC size_t | ZSTD_cwksp_alloc_size (size_t size) |
MEM_STATIC void | ZSTD_cwksp_assert_internal_consistency (ZSTD_cwksp *ws) |
MEM_STATIC size_t | ZSTD_cwksp_available_space (ZSTD_cwksp *ws) |
MEM_STATIC void | ZSTD_cwksp_bump_oversized_duration (ZSTD_cwksp *ws, size_t additionalNeededSpace) |
MEM_STATIC size_t | ZSTD_cwksp_bytes_to_align_ptr (void *ptr, const size_t alignBytes) |
MEM_STATIC int | ZSTD_cwksp_check_available (ZSTD_cwksp *ws, size_t additionalNeededSpace) |
MEM_STATIC int | ZSTD_cwksp_check_too_large (ZSTD_cwksp *ws, size_t additionalNeededSpace) |
MEM_STATIC int | ZSTD_cwksp_check_wasteful (ZSTD_cwksp *ws, size_t additionalNeededSpace) |
MEM_STATIC void | ZSTD_cwksp_clean_tables (ZSTD_cwksp *ws) |
MEM_STATIC void | ZSTD_cwksp_clear (ZSTD_cwksp *ws) |
MEM_STATIC void | ZSTD_cwksp_clear_tables (ZSTD_cwksp *ws) |
MEM_STATIC size_t | ZSTD_cwksp_create (ZSTD_cwksp *ws, size_t size, ZSTD_customMem customMem) |
MEM_STATIC int | ZSTD_cwksp_estimated_space_within_bounds (const ZSTD_cwksp *const ws, size_t const estimatedSpace) |
MEM_STATIC void | ZSTD_cwksp_free (ZSTD_cwksp *ws, ZSTD_customMem customMem) |
MEM_STATIC void | ZSTD_cwksp_init (ZSTD_cwksp *ws, void *start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) |
MEM_STATIC void * | ZSTD_cwksp_initialAllocStart (ZSTD_cwksp *ws) |
MEM_STATIC size_t | ZSTD_cwksp_internal_advance_phase (ZSTD_cwksp *ws, ZSTD_cwksp_alloc_phase_e phase) |
MEM_STATIC void | ZSTD_cwksp_mark_tables_clean (ZSTD_cwksp *ws) |
MEM_STATIC void | ZSTD_cwksp_mark_tables_dirty (ZSTD_cwksp *ws) |
MEM_STATIC void | ZSTD_cwksp_move (ZSTD_cwksp *dst, ZSTD_cwksp *src) |
MEM_STATIC int | ZSTD_cwksp_owns_buffer (const ZSTD_cwksp *ws, const void *ptr) |
MEM_STATIC void * | ZSTD_cwksp_reserve_aligned (ZSTD_cwksp *ws, size_t bytes) |
MEM_STATIC void * | ZSTD_cwksp_reserve_aligned_init_once (ZSTD_cwksp *ws, size_t bytes) |
MEM_STATIC BYTE * | ZSTD_cwksp_reserve_buffer (ZSTD_cwksp *ws, size_t bytes) |
MEM_STATIC int | ZSTD_cwksp_reserve_failed (const ZSTD_cwksp *ws) |
MEM_STATIC void * | ZSTD_cwksp_reserve_internal (ZSTD_cwksp *ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) |
MEM_STATIC void * | ZSTD_cwksp_reserve_internal_buffer_space (ZSTD_cwksp *ws, size_t const bytes) |
MEM_STATIC void * | ZSTD_cwksp_reserve_object (ZSTD_cwksp *ws, size_t bytes) |
MEM_STATIC void * | ZSTD_cwksp_reserve_table (ZSTD_cwksp *ws, size_t bytes) |
MEM_STATIC size_t | ZSTD_cwksp_sizeof (const ZSTD_cwksp *ws) |
MEM_STATIC size_t | ZSTD_cwksp_slack_space_required (void) |
MEM_STATIC size_t | ZSTD_cwksp_used (const ZSTD_cwksp *ws) |
U32 | ZSTD_cycleLog (U32 hashLog, ZSTD_strategy strat) |
MEM_STATIC int | ZSTD_DCtx_get_bmi2 (const struct ZSTD_DCtx_s *dctx) |
size_t | ZSTD_DCtx_getParameter (ZSTD_DCtx *dctx, ZSTD_dParameter param, int *value) |
static int | ZSTD_DCtx_isOverflow (ZSTD_DStream *zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) |
static int | ZSTD_DCtx_isOversizedTooLong (ZSTD_DStream *zds) |
size_t | ZSTD_DCtx_loadDictionary (ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
size_t | ZSTD_DCtx_loadDictionary_advanced (ZSTD_DCtx *dctx, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) |
size_t | ZSTD_DCtx_loadDictionary_byReference (ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
size_t | ZSTD_DCtx_refDDict (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict) |
size_t | ZSTD_DCtx_refPrefix (ZSTD_DCtx *dctx, const void *prefix, size_t prefixSize) |
size_t | ZSTD_DCtx_refPrefix_advanced (ZSTD_DCtx *dctx, const void *prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) |
size_t | ZSTD_DCtx_reset (ZSTD_DCtx *dctx, ZSTD_ResetDirective reset) |
static void | ZSTD_DCtx_resetParameters (ZSTD_DCtx *dctx) |
static void | ZSTD_DCtx_selectFrameDDict (ZSTD_DCtx *dctx) |
size_t | ZSTD_DCtx_setFormat (ZSTD_DCtx *dctx, ZSTD_format_e format) |
size_t | ZSTD_DCtx_setMaxWindowSize (ZSTD_DCtx *dctx, size_t maxWindowSize) |
size_t | ZSTD_DCtx_setParameter (ZSTD_DCtx *dctx, ZSTD_dParameter dParam, int value) |
static void | ZSTD_DCtx_trace_end (ZSTD_DCtx const *dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming) |
static void | ZSTD_DCtx_updateOversizedDuration (ZSTD_DStream *zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) |
const void * | ZSTD_DDict_dictContent (const ZSTD_DDict *ddict) |
size_t | ZSTD_DDict_dictSize (const ZSTD_DDict *ddict) |
static size_t | ZSTD_DDictHashSet_addDDict (ZSTD_DDictHashSet *hashSet, const ZSTD_DDict *ddict, ZSTD_customMem customMem) |
static size_t | ZSTD_DDictHashSet_emplaceDDict (ZSTD_DDictHashSet *hashSet, const ZSTD_DDict *ddict) |
static size_t | ZSTD_DDictHashSet_expand (ZSTD_DDictHashSet *hashSet, ZSTD_customMem customMem) |
static const ZSTD_DDict * | ZSTD_DDictHashSet_getDDict (ZSTD_DDictHashSet *hashSet, U32 dictID) |
static size_t | ZSTD_DDictHashSet_getIndex (const ZSTD_DDictHashSet *hashSet, U32 dictID) |
static size_t | ZSTD_decodeFrameHeader (ZSTD_DCtx *dctx, const void *src, size_t headerSize) |
size_t | ZSTD_decodeLiteralsBlock (ZSTD_DCtx *dctx, const void *src, size_t srcSize, void *dst, size_t dstCapacity, const streaming_operation streaming) |
size_t | ZSTD_decodeSeqHeaders (ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize) |
FORCE_INLINE_TEMPLATE seq_t | ZSTD_decodeSequence (seqState_t *seqState, const ZSTD_longOffset_e longOffsets) |
size_t | ZSTD_decodingBufferSize_min (unsigned long long windowSize, unsigned long long frameContentSize) |
size_t | ZSTD_decompress (void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static size_t | ZSTD_decompress_insertDictionary (ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
size_t | ZSTD_decompress_usingDDict (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict) |
size_t | ZSTD_decompress_usingDict (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize) |
size_t | ZSTD_decompressBegin (ZSTD_DCtx *dctx) |
size_t | ZSTD_decompressBegin_usingDDict (ZSTD_DCtx *dctx, const ZSTD_DDict *ddict) |
size_t | ZSTD_decompressBegin_usingDict (ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
size_t | ZSTD_decompressBlock (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_decompressBlock_deprecated (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
size_t | ZSTD_decompressBlock_internal (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const int frame, const streaming_operation streaming) |
unsigned long long | ZSTD_decompressBound (const void *src, size_t srcSize) |
size_t | ZSTD_decompressContinue (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static size_t | ZSTD_decompressContinueStream (ZSTD_DStream *zds, char **op, char *oend, void const *src, size_t srcSize) |
size_t | ZSTD_decompressDCtx (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static size_t | ZSTD_decompressFrame (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr) |
size_t | ZSTD_decompressionMargin (void const *src, size_t srcSize) |
static size_t | ZSTD_decompressMultiFrame (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, const ZSTD_DDict *ddict) |
static size_t | ZSTD_decompressSequences (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE | ZSTD_decompressSequences_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE | ZSTD_decompressSequences_bodySplitLitBuffer (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
static size_t | ZSTD_decompressSequences_default (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
static size_t | ZSTD_decompressSequencesLong (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_decompressSequencesLong_body (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
static size_t | ZSTD_decompressSequencesLong_default (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
static size_t | ZSTD_decompressSequencesSplitLitBuffer (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
static size_t | ZSTD_decompressSequencesSplitLitBuffer_default (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
size_t | ZSTD_decompressStream (ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input) |
size_t | ZSTD_decompressStream_simpleArgs (ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, size_t *dstPos, const void *src, size_t srcSize, size_t *srcPos) |
static ZSTD_compressionParameters | ZSTD_dedicatedDictSearch_getCParams (int const compressionLevel, size_t const dictSize) |
static int | ZSTD_dedicatedDictSearch_isSupported (const ZSTD_compressionParameters *cParams) |
void | ZSTD_dedicatedDictSearch_lazy_loadDictionary (ZSTD_matchState_t *ms, const BYTE *const ip) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_dedicatedDictSearch_lazy_search (size_t *offsetPtr, size_t ml, U32 nbAttempts, const ZSTD_matchState_t *const dms, const BYTE *const ip, const BYTE *const iLimit, const BYTE *const prefixStart, const U32 curr, const U32 dictLimit, const size_t ddsIdx) |
static void | ZSTD_dedicatedDictSearch_revertCParams (ZSTD_compressionParameters *cParams) |
int | ZSTD_defaultCLevel (void) |
static size_t | ZSTD_deriveBlockSplits (ZSTD_CCtx *zc, U32 partitions[], U32 nbSeq) |
static void | ZSTD_deriveBlockSplitsHelper (seqStoreSplits *splits, size_t startIdx, size_t endIdx, ZSTD_CCtx *zc, const seqStore_t *origSeqStore) |
static void | ZSTD_deriveSeqStoreChunk (seqStore_t *resultSeqStore, const seqStore_t *originalSeqStore, size_t startIdx, size_t endIdx) |
static U32 | ZSTD_dictAndWindowLog (U32 windowLog, U64 srcSize, U64 dictSize) |
static FSE_repeat | ZSTD_dictNCountRepeat (short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) |
static int | ZSTD_dictTooBig (size_t const loadedDictSize) |
static U32 | ZSTD_downscaleStats (unsigned *table, U32 lastEltIndex, U32 shift, base_directive_e base1) |
ZSTD_bounds | ZSTD_dParam_getBounds (ZSTD_dParameter dParam) |
static int | ZSTD_dParam_withinBounds (ZSTD_dParameter dParam, int value) |
size_t | ZSTD_DStreamInSize (void) |
size_t | ZSTD_DStreamOutSize (void) |
static size_t | ZSTD_DUBT_findBestMatch (ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iend, size_t *offBasePtr, U32 const mls, const ZSTD_dictMode_e dictMode) |
static size_t | ZSTD_DUBT_findBetterDictMatch (const ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, size_t bestLength, U32 nbCompares, U32 const mls, const ZSTD_dictMode_e dictMode) |
size_t | ZSTD_encodeSequences (void *dst, size_t dstCapacity, FSE_CTable const *CTable_MatchLength, BYTE const *mlCodeTable, FSE_CTable const *CTable_OffsetBits, BYTE const *ofCodeTable, FSE_CTable const *CTable_LitLength, BYTE const *llCodeTable, seqDef const *sequences, size_t nbSeq, int longOffsets, int bmi2) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_encodeSequences_body (void *dst, size_t dstCapacity, FSE_CTable const *CTable_MatchLength, BYTE const *mlCodeTable, FSE_CTable const *CTable_OffsetBits, BYTE const *ofCodeTable, FSE_CTable const *CTable_LitLength, BYTE const *llCodeTable, seqDef const *sequences, size_t nbSeq, int longOffsets) |
static size_t | ZSTD_encodeSequences_default (void *dst, size_t dstCapacity, FSE_CTable const *CTable_MatchLength, BYTE const *mlCodeTable, FSE_CTable const *CTable_OffsetBits, BYTE const *ofCodeTable, FSE_CTable const *CTable_LitLength, BYTE const *llCodeTable, seqDef const *sequences, size_t nbSeq, int longOffsets) |
size_t | ZSTD_endStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output) |
MEM_STATIC size_t | ZSTD_entropyCompressSeqStore (const seqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, void *dst, size_t dstCapacity, size_t srcSize, void *entropyWorkspace, size_t entropyWkspSize, int bmi2) |
MEM_STATIC size_t | ZSTD_entropyCompressSeqStore_internal (const seqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, void *dst, size_t dstCapacity, void *entropyWorkspace, size_t entropyWkspSize, const int bmi2) |
static size_t | ZSTD_entropyCost (unsigned const *count, unsigned const max, size_t const total) |
static ZSTD_frameSizeInfo | ZSTD_errorFrameSizeInfo (size_t ret) |
static size_t | ZSTD_estimateBlockSize (const BYTE *literals, size_t litSize, const BYTE *ofCodeTable, const BYTE *llCodeTable, const BYTE *mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t *entropy, const ZSTD_entropyCTablesMetadata_t *entropyMetadata, void *workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) |
static size_t | ZSTD_estimateBlockSize_literal (const BYTE *literals, size_t litSize, const ZSTD_hufCTables_t *huf, const ZSTD_hufCTablesMetadata_t *hufMetadata, void *workspace, size_t wkspSize, int writeEntropy) |
static size_t | ZSTD_estimateBlockSize_sequences (const BYTE *ofCodeTable, const BYTE *llCodeTable, const BYTE *mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t *fseTables, const ZSTD_fseCTablesMetadata_t *fseMetadata, void *workspace, size_t wkspSize, int writeEntropy) |
static size_t | ZSTD_estimateBlockSize_symbolType (symbolEncodingType_e type, const BYTE *codeTable, size_t nbSeq, unsigned maxCode, const FSE_CTable *fseCTable, const U8 *additionalBits, short const *defaultNorm, U32 defaultNormLog, U32 defaultMax, void *workspace, size_t wkspSize) |
size_t | ZSTD_estimateCCtxSize (int compressionLevel) |
static size_t | ZSTD_estimateCCtxSize_internal (int compressionLevel) |
size_t | ZSTD_estimateCCtxSize_usingCCtxParams (const ZSTD_CCtx_params *params) |
static size_t | ZSTD_estimateCCtxSize_usingCCtxParams_internal (const ZSTD_compressionParameters *cParams, const ldmParams_t *ldmParams, const int isStatic, const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, const U64 pledgedSrcSize, int useSequenceProducer, size_t maxBlockSize) |
size_t | ZSTD_estimateCCtxSize_usingCParams (ZSTD_compressionParameters cParams) |
size_t | ZSTD_estimateCDictSize (size_t dictSize, int compressionLevel) |
size_t | ZSTD_estimateCDictSize_advanced (size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) |
size_t | ZSTD_estimateCStreamSize (int compressionLevel) |
static size_t | ZSTD_estimateCStreamSize_internal (int compressionLevel) |
size_t | ZSTD_estimateCStreamSize_usingCCtxParams (const ZSTD_CCtx_params *params) |
size_t | ZSTD_estimateCStreamSize_usingCParams (ZSTD_compressionParameters cParams) |
size_t | ZSTD_estimateDCtxSize (void) |
size_t | ZSTD_estimateDDictSize (size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) |
size_t | ZSTD_estimateDStreamSize (size_t windowSize) |
size_t | ZSTD_estimateDStreamSize_fromFrame (const void *src, size_t srcSize) |
static size_t | ZSTD_estimateSubBlockSize (const BYTE *literals, size_t litSize, const BYTE *ofCodeTable, const BYTE *llCodeTable, const BYTE *mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t *entropy, const ZSTD_entropyCTablesMetadata_t *entropyMetadata, void *workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) |
static size_t | ZSTD_estimateSubBlockSize_literal (const BYTE *literals, size_t litSize, const ZSTD_hufCTables_t *huf, const ZSTD_hufCTablesMetadata_t *hufMetadata, void *workspace, size_t wkspSize, int writeEntropy) |
static size_t | ZSTD_estimateSubBlockSize_sequences (const BYTE *ofCodeTable, const BYTE *llCodeTable, const BYTE *mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t *fseTables, const ZSTD_fseCTablesMetadata_t *fseMetadata, void *workspace, size_t wkspSize, int writeEntropy) |
static size_t | ZSTD_estimateSubBlockSize_symbolType (symbolEncodingType_e type, const BYTE *codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable *fseCTable, const U8 *additionalBits, short const *defaultNorm, U32 defaultNormLog, U32 defaultMax, void *workspace, size_t wkspSize) |
HINT_INLINE size_t | ZSTD_execSequence (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd) |
FORCE_NOINLINE size_t | ZSTD_execSequenceEnd (BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd) |
FORCE_NOINLINE size_t | ZSTD_execSequenceEndSplitLitBuffer (BYTE *op, BYTE *const oend, const BYTE *const oend_w, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd) |
HINT_INLINE size_t | ZSTD_execSequenceSplitLitBuffer (BYTE *op, BYTE *const oend, const BYTE *const oend_w, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const prefixStart, const BYTE *const virtualStart, const BYTE *const dictEnd) |
static size_t | ZSTD_fastSequenceLengthSum (ZSTD_Sequence const *seqBuf, size_t seqBufSize) |
void | ZSTD_fillDoubleHashTable (ZSTD_matchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) |
static void | ZSTD_fillDoubleHashTableForCCtx (ZSTD_matchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm) |
static void | ZSTD_fillDoubleHashTableForCDict (ZSTD_matchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm) |
void | ZSTD_fillHashTable (ZSTD_matchState_t *ms, void const *end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) |
static void | ZSTD_fillHashTableForCCtx (ZSTD_matchState_t *ms, const void *const end, ZSTD_dictTableLoadMethod_e dtlm) |
static void | ZSTD_fillHashTableForCDict (ZSTD_matchState_t *ms, const void *const end, ZSTD_dictTableLoadMethod_e dtlm) |
static U32 | ZSTD_finalizeOffBase (U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) |
unsigned long long | ZSTD_findDecompressedSize (const void *src, size_t srcSize) |
size_t | ZSTD_findFrameCompressedSize (const void *src, size_t srcSize) |
static ZSTD_frameSizeInfo | ZSTD_findFrameSizeInfo (const void *src, size_t srcSize) |
size_t | ZSTD_flushStream (ZSTD_CStream *zcs, ZSTD_outBuffer *output) |
MEM_STATIC U32 | ZSTD_fracWeight (U32 rawStat) |
size_t | ZSTD_frameHeaderSize (const void *src, size_t srcSize) |
static size_t | ZSTD_frameHeaderSize_internal (const void *src, size_t srcSize, ZSTD_format_e format) |
size_t | ZSTD_freeCCtx (ZSTD_CCtx *cctx) |
static void | ZSTD_freeCCtxContent (ZSTD_CCtx *cctx) |
size_t | ZSTD_freeCCtxParams (ZSTD_CCtx_params *params) |
size_t | ZSTD_freeCDict (ZSTD_CDict *cdict) |
size_t | ZSTD_freeCStream (ZSTD_CStream *zcs) |
size_t | ZSTD_freeDCtx (ZSTD_DCtx *dctx) |
size_t | ZSTD_freeDDict (ZSTD_DDict *ddict) |
static void | ZSTD_freeDDictHashSet (ZSTD_DDictHashSet *hashSet, ZSTD_customMem customMem) |
size_t | ZSTD_freeDStream (ZSTD_DStream *zds) |
void | ZSTD_freeThreadPool (ZSTD_threadPool *pool) |
size_t | ZSTD_fseBitCost (FSE_CTable const *ctable, unsigned const *count, unsigned const max) |
size_t | ZSTD_generateSequences (ZSTD_CCtx *zc, ZSTD_Sequence *outSeqs, size_t outSeqsSize, const void *src, size_t srcSize) |
size_t | ZSTD_getBlockSize (const ZSTD_CCtx *cctx) |
static size_t | ZSTD_getBlockSize_deprecated (const ZSTD_CCtx *cctx) |
size_t | ZSTD_getcBlockSize (const void *src, size_t srcSize, blockProperties_t *bpPtr) |
static ZSTD_cParamMode_e | ZSTD_getCParamMode (ZSTD_CDict const *cdict, ZSTD_CCtx_params const *params, U64 pledgedSrcSize) |
static U64 | ZSTD_getCParamRowSize (U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) |
ZSTD_compressionParameters | ZSTD_getCParams (int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) |
static ZSTD_compressionParameters | ZSTD_getCParams_internal (int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) |
ZSTD_compressionParameters | ZSTD_getCParamsFromCCtxParams (const ZSTD_CCtx_params *CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) |
ZSTD_compressionParameters | ZSTD_getCParamsFromCDict (const ZSTD_CDict *cdict) |
static ZSTD_DDict const * | ZSTD_getDDict (ZSTD_DCtx *dctx) |
unsigned long long | ZSTD_getDecompressedSize (const void *src, size_t srcSize) |
unsigned | ZSTD_getDictID_fromCDict (const ZSTD_CDict *cdict) |
unsigned | ZSTD_getDictID_fromDDict (const ZSTD_DDict *ddict) |
unsigned | ZSTD_getDictID_fromDict (const void *dict, size_t dictSize) |
unsigned | ZSTD_getDictID_fromFrame (const void *src, size_t srcSize) |
ZSTDERRORLIB_API ZSTD_ErrorCode | ZSTD_getErrorCode (size_t functionResult) |
const char * | ZSTD_getErrorName (size_t code) |
const ZSTDERRORLIB_API char * | ZSTD_getErrorString (ZSTD_ErrorCode code) |
unsigned long long | ZSTD_getFrameContentSize (const void *src, size_t srcSize) |
size_t | ZSTD_getFrameHeader (ZSTD_frameHeader *zfhPtr, const void *src, size_t srcSize) |
size_t | ZSTD_getFrameHeader_advanced (ZSTD_frameHeader *zfhPtr, const void *src, size_t srcSize, ZSTD_format_e format) |
ZSTD_frameProgression | ZSTD_getFrameProgression (const ZSTD_CCtx *cctx) |
static unsigned | ZSTD_getFSEMaxSymbolValue (FSE_CTable const *ctable) |
MEM_STATIC U32 | ZSTD_getLowestMatchIndex (const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog) |
MEM_STATIC U32 | ZSTD_getLowestPrefixIndex (const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_getMatchPrice (U32 const offBase, U32 const matchLength, const optState_t *const optPtr, int const optLevel) |
static ZSTD_OffsetInfo | ZSTD_getOffsetInfo (const ZSTD_seqSymbol *offTable, int nbSeq) |
ZSTD_parameters | ZSTD_getParams (int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) |
static ZSTD_parameters | ZSTD_getParams_internal (int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) |
const seqStore_t * | ZSTD_getSeqStore (const ZSTD_CCtx *ctx) |
MEM_STATIC ZSTD_sequenceLength | ZSTD_getSequenceLength (seqStore_t const *seqStore, seqDef const *seq) |
static U32 | ZSTD_hash3 (U32 u, U32 h, U32 s) |
MEM_STATIC size_t | ZSTD_hash3Ptr (const void *ptr, U32 h) |
MEM_STATIC size_t | ZSTD_hash3PtrS (const void *ptr, U32 h, U32 s) |
static U32 | ZSTD_hash4 (U32 u, U32 h, U32 s) |
static size_t | ZSTD_hash4Ptr (const void *ptr, U32 h) |
static size_t | ZSTD_hash4PtrS (const void *ptr, U32 h, U32 s) |
static size_t | ZSTD_hash5 (U64 u, U32 h, U64 s) |
static size_t | ZSTD_hash5Ptr (const void *p, U32 h) |
static size_t | ZSTD_hash5PtrS (const void *p, U32 h, U64 s) |
static size_t | ZSTD_hash6 (U64 u, U32 h, U64 s) |
static size_t | ZSTD_hash6Ptr (const void *p, U32 h) |
static size_t | ZSTD_hash6PtrS (const void *p, U32 h, U64 s) |
static size_t | ZSTD_hash7 (U64 u, U32 h, U64 s) |
static size_t | ZSTD_hash7Ptr (const void *p, U32 h) |
static size_t | ZSTD_hash7PtrS (const void *p, U32 h, U64 s) |
static size_t | ZSTD_hash8 (U64 u, U32 h, U64 s) |
static size_t | ZSTD_hash8Ptr (const void *p, U32 h) |
static size_t | ZSTD_hash8PtrS (const void *p, U32 h, U64 s) |
MEM_STATIC FORCE_INLINE_ATTR size_t | ZSTD_hashPtr (const void *p, U32 hBits, U32 mls) |
MEM_STATIC FORCE_INLINE_ATTR size_t | ZSTD_hashPtrSalted (const void *p, U32 hBits, U32 mls, const U64 hashSalt) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_HcFindBestMatch (ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode) |
MEM_STATIC unsigned | ZSTD_highbit32 (U32 val) |
static int | ZSTD_indexTooCloseToMax (ZSTD_window_t w) |
static void | ZSTD_initCCtx (ZSTD_CCtx *cctx, ZSTD_customMem memManager) |
static size_t | ZSTD_initCDict_internal (ZSTD_CDict *cdict, const void *dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params params) |
size_t | ZSTD_initCStream (ZSTD_CStream *zcs, int compressionLevel) |
size_t | ZSTD_initCStream_advanced (ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pss) |
size_t | ZSTD_initCStream_internal (ZSTD_CStream *zcs, const void *dict, size_t dictSize, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize) |
size_t | ZSTD_initCStream_srcSize (ZSTD_CStream *zcs, int compressionLevel, unsigned long long pss) |
size_t | ZSTD_initCStream_usingCDict (ZSTD_CStream *zcs, const ZSTD_CDict *cdict) |
size_t | ZSTD_initCStream_usingCDict_advanced (ZSTD_CStream *zcs, const ZSTD_CDict *cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) |
size_t | ZSTD_initCStream_usingDict (ZSTD_CStream *zcs, const void *dict, size_t dictSize, int compressionLevel) |
static void | ZSTD_initDCtx_internal (ZSTD_DCtx *dctx) |
static size_t | ZSTD_initDDict_internal (ZSTD_DDict *ddict, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) |
size_t | ZSTD_initDStream (ZSTD_DStream *zds) |
size_t | ZSTD_initDStream_usingDDict (ZSTD_DStream *dctx, const ZSTD_DDict *ddict) |
size_t | ZSTD_initDStream_usingDict (ZSTD_DStream *zds, const void *dict, size_t dictSize) |
static void | ZSTD_initFseState (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, const ZSTD_seqSymbol *dt) |
static size_t | ZSTD_initLocalDict (ZSTD_CCtx *cctx) |
ZSTD_CCtx * | ZSTD_initStaticCCtx (void *workspace, size_t workspaceSize) |
const ZSTD_CDict * | ZSTD_initStaticCDict (void *workspace, size_t workspaceSize, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) |
ZSTD_CStream * | ZSTD_initStaticCStream (void *workspace, size_t workspaceSize) |
ZSTD_DCtx * | ZSTD_initStaticDCtx (void *workspace, size_t workspaceSize) |
const ZSTD_DDict * | ZSTD_initStaticDDict (void *sBuffer, size_t sBufferSize, const void *dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) |
ZSTD_DStream * | ZSTD_initStaticDStream (void *workspace, size_t workspaceSize) |
static void | ZSTD_initStats_ultra (ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], const void *src, size_t srcSize) |
U32 | ZSTD_insertAndFindFirstIndex (ZSTD_matchState_t *ms, const BYTE *ip) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_insertAndFindFirstIndex_internal (ZSTD_matchState_t *ms, const ZSTD_compressionParameters *const cParams, const BYTE *ip, U32 const mls, U32 const lazySkipping) |
static U32 | ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t *ms, U32 *nextToUpdate3, const BYTE *const ip) |
size_t | ZSTD_insertBlock (ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize) |
static U32 | ZSTD_insertBt1 (const ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iend, U32 const target, U32 const mls, const int extDict) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_insertBtAndGetAllMatches (ZSTD_match_t *matches, ZSTD_matchState_t *ms, U32 *nextToUpdate3, const BYTE *const ip, const BYTE *const iLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], const U32 ll0, const U32 lengthToBeat, const U32 mls) |
static void | ZSTD_insertDUBT1 (const ZSTD_matchState_t *ms, U32 curr, const BYTE *inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) |
static void | ZSTD_invalidateMatchState (ZSTD_matchState_t *ms) |
void | ZSTD_invalidateRepCodes (ZSTD_CCtx *cctx) |
static U64 | ZSTD_ipow (U64 base, U64 exponent) |
MEM_STATIC int | ZSTD_isAligned (void const *ptr, size_t align) |
unsigned | ZSTD_isError (size_t code) |
unsigned | ZSTD_isFrame (const void *buffer, size_t size) |
static int | ZSTD_isRLE (const BYTE *src, size_t length) |
static int | ZSTD_isSkipFrame (ZSTD_DCtx *dctx) |
unsigned | ZSTD_isSkippableFrame (const void *buffer, size_t size) |
static int | ZSTD_isUpdateAuthorized (ZSTD_cParameter param) |
void | ZSTD_ldm_adjustParameters (ldmParams_t *params, ZSTD_compressionParameters const *cParams) |
size_t | ZSTD_ldm_blockCompress (rawSeqStore_t *rawSeqStore, ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_paramSwitch_e useRowMatchFinder, void const *src, size_t srcSize) |
static size_t | ZSTD_ldm_countBackwardsMatch (const BYTE *pIn, const BYTE *pAnchor, const BYTE *pMatch, const BYTE *pMatchBase) |
static size_t | ZSTD_ldm_countBackwardsMatch_2segments (const BYTE *pIn, const BYTE *pAnchor, const BYTE *pMatch, const BYTE *pMatchBase, const BYTE *pExtDictStart, const BYTE *pExtDictEnd) |
static size_t | ZSTD_ldm_fillFastTables (ZSTD_matchState_t *ms, void const *end) |
void | ZSTD_ldm_fillHashTable (ldmState_t *state, const BYTE *ip, const BYTE *iend, ldmParams_t const *params) |
static size_t | ZSTD_ldm_gear_feed (ldmRollingHashState_t *state, BYTE const *data, size_t size, size_t *splits, unsigned *numSplits) |
static void | ZSTD_ldm_gear_init (ldmRollingHashState_t *state, ldmParams_t const *params) |
static void | ZSTD_ldm_gear_reset (ldmRollingHashState_t *state, BYTE const *data, size_t minMatchLength) |
size_t | ZSTD_ldm_generateSequences (ldmState_t *ldms, rawSeqStore_t *sequences, ldmParams_t const *params, void const *src, size_t srcSize) |
static size_t | ZSTD_ldm_generateSequences_internal (ldmState_t *ldmState, rawSeqStore_t *rawSeqStore, ldmParams_t const *params, void const *src, size_t srcSize) |
static ldmEntry_t * | ZSTD_ldm_getBucket (ldmState_t *ldmState, size_t hash, ldmParams_t const ldmParams) |
size_t | ZSTD_ldm_getMaxNbSeq (ldmParams_t params, size_t maxChunkSize) |
size_t | ZSTD_ldm_getTableSize (ldmParams_t params) |
static void | ZSTD_ldm_insertEntry (ldmState_t *ldmState, size_t const hash, const ldmEntry_t entry, ldmParams_t const ldmParams) |
static void | ZSTD_ldm_limitTableUpdate (ZSTD_matchState_t *ms, const BYTE *anchor) |
static void | ZSTD_ldm_reduceTable (ldmEntry_t *const table, U32 const size, U32 const reducerValue) |
void | ZSTD_ldm_skipRawSeqStoreBytes (rawSeqStore_t *rawSeqStore, size_t nbBytes) |
void | ZSTD_ldm_skipSequences (rawSeqStore_t *rawSeqStore, size_t srcSize, U32 const minMatch) |
MEM_STATIC size_t | ZSTD_limitCopy (void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
MEM_STATIC int | ZSTD_literalsCompressionIsDisabled (const ZSTD_CCtx_params *cctxParams) |
static U32 | ZSTD_litLengthPrice (U32 const litLength, const optState_t *const optPtr, int optLevel) |
MEM_STATIC U32 | ZSTD_LLcode (U32 litLength) |
size_t | ZSTD_loadCEntropy (ZSTD_compressedBlockState_t *bs, void *workspace, const void *const dict, size_t dictSize) |
size_t | ZSTD_loadDEntropy (ZSTD_entropyDTables_t *entropy, const void *const dict, size_t const dictSize) |
static size_t | ZSTD_loadDictionaryContent (ZSTD_matchState_t *ms, ldmState_t *ls, ZSTD_cwksp *ws, ZSTD_CCtx_params const *params, const void *src, size_t srcSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) |
static size_t | ZSTD_loadEntropy_intoDDict (ZSTD_DDict *ddict, ZSTD_dictContentType_e dictContentType) |
static size_t | ZSTD_loadZstdDictionary (ZSTD_compressedBlockState_t *bs, ZSTD_matchState_t *ms, ZSTD_cwksp *ws, ZSTD_CCtx_params const *params, const void *dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void *workspace) |
static ZSTD_CCtx_params | ZSTD_makeCCtxParamsFromCParams (ZSTD_compressionParameters cParams) |
MEM_STATIC ZSTD_dictMode_e | ZSTD_matchState_dictMode (const ZSTD_matchState_t *ms) |
int | ZSTD_maxCLevel (void) |
static size_t | ZSTD_maxNbSeq (size_t blockSize, unsigned minMatch, int useSequenceProducer) |
static size_t | ZSTD_maxShortOffset (void) |
static int | ZSTD_maybeRLE (seqStore_t const *seqStore) |
size_t | ZSTD_mergeBlockDelimiters (ZSTD_Sequence *sequences, size_t seqsSize) |
int | ZSTD_minCLevel (void) |
MEM_STATIC size_t | ZSTD_minGain (size_t srcSize, ZSTD_strategy strat) |
static size_t | ZSTD_minLiteralsToCompress (ZSTD_strategy strategy, HUF_repeat huf_repeat) |
MEM_STATIC U32 | ZSTD_MLcode (U32 mlBase) |
MEM_STATIC unsigned | ZSTD_NbCommonBytes (size_t val) |
static size_t | ZSTD_NCountCost (unsigned const *count, unsigned const max, size_t const nbSeq, unsigned const FSELog) |
static int | ZSTD_needSequenceEntropyTables (ZSTD_fseCTablesMetadata_t const *fseMetadata) |
MEM_STATIC repcodes_t | ZSTD_newRep (U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) |
static size_t | ZSTD_nextInputSizeHint (const ZSTD_CCtx *cctx) |
static size_t | ZSTD_nextInputSizeHint_MTorST (const ZSTD_CCtx *cctx) |
ZSTD_nextInputType_e | ZSTD_nextInputType (ZSTD_DCtx *dctx) |
size_t | ZSTD_nextSrcSizeToDecompress (ZSTD_DCtx *dctx) |
static size_t | ZSTD_nextSrcSizeToDecompressWithInputSize (ZSTD_DCtx *dctx, size_t inputSize) |
MEM_STATIC size_t | ZSTD_noCompressBlock (void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock) |
size_t | ZSTD_noCompressLiterals (void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
static void | ZSTD_opt_getNextMatchAndUpdateSeqStore (ZSTD_optLdm_t *optLdm, U32 currPosInBlock, U32 blockBytesRemaining) |
static void | ZSTD_optLdm_maybeAddMatch (ZSTD_match_t *matches, U32 *nbMatches, const ZSTD_optLdm_t *optLdm, U32 currPosInBlock) |
static void | ZSTD_optLdm_processMatchCandidate (ZSTD_optLdm_t *optLdm, ZSTD_match_t *matches, U32 *nbMatches, U32 currPosInBlock, U32 remainingBytes) |
static void | ZSTD_optLdm_skipRawSeqStoreBytes (rawSeqStore_t *rawSeqStore, size_t nbBytes) |
static void | ZSTD_overflowCorrectIfNeeded (ZSTD_matchState_t *ms, ZSTD_cwksp *ws, ZSTD_CCtx_params const *params, void const *ip, void const *iend) |
HINT_INLINE void | ZSTD_overlapCopy8 (BYTE **op, BYTE const **ip, size_t offset) |
static void | ZSTD_overrideCParams (ZSTD_compressionParameters *cParams, const ZSTD_compressionParameters *overrides) |
static size_t | ZSTD_postProcessSequenceProducerResult (ZSTD_Sequence *outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_prefetchMatch (size_t prefetchPos, seq_t const sequence, const BYTE *const prefixStart, const BYTE *const dictEnd) |
void | ZSTD_preserveUnsortedMark (U32 *const table, U32 const size, U32 const reducerValue) |
static U32 | ZSTD_rawLiteralsCost (const BYTE *const literals, U32 const litLength, const optState_t *const optPtr, int optLevel) |
MEM_STATIC U32 | ZSTD_readMINMATCH (const void *memPtr, U32 length) |
size_t | ZSTD_readSkippableFrame (void *dst, size_t dstCapacity, unsigned *magicVariant, const void *src, size_t srcSize) |
static void | ZSTD_reduceIndex (ZSTD_matchState_t *ms, ZSTD_CCtx_params const *params, const U32 reducerValue) |
static void | ZSTD_reduceTable (U32 *const table, U32 const size, U32 const reducerValue) |
static void | ZSTD_reduceTable_btlazy2 (U32 *const table, U32 const size, U32 const reducerValue) |
FORCE_INLINE_TEMPLATE void | ZSTD_reduceTable_internal (U32 *const table, U32 const size, U32 const reducerValue, int const preserveMark) |
static size_t | ZSTD_refDictContent (ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
size_t | ZSTD_referenceExternalSequences (ZSTD_CCtx *cctx, rawSeq *seq, size_t nbSeq) |
void | ZSTD_registerSequenceProducer (ZSTD_CCtx *zc, void *mState, ZSTD_sequenceProducer_F *mFinder) |
static void | ZSTD_rescaleFreqs (optState_t *const optPtr, const BYTE *const src, size_t const srcSize, int const optLevel) |
void | ZSTD_reset_compressedBlockState (ZSTD_compressedBlockState_t *bs) |
static size_t | ZSTD_reset_matchState (ZSTD_matchState_t *ms, ZSTD_cwksp *ws, const ZSTD_compressionParameters *cParams, const ZSTD_paramSwitch_e useRowMatchFinder, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) |
static size_t | ZSTD_resetCCtx_byAttachingCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) |
static size_t | ZSTD_resetCCtx_byCopyingCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) |
static size_t | ZSTD_resetCCtx_internal (ZSTD_CCtx *zc, ZSTD_CCtx_params const *params, U64 const pledgedSrcSize, size_t const loadedDictSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) |
static size_t | ZSTD_resetCCtx_usingCDict (ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) |
size_t | ZSTD_resetCStream (ZSTD_CStream *zcs, unsigned long long pss) |
size_t | ZSTD_resetDStream (ZSTD_DStream *dctx) |
void | ZSTD_resetSeqStore (seqStore_t *ssPtr) |
static ZSTD_paramSwitch_e | ZSTD_resolveBlockSplitterMode (ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters *const cParams) |
static ZSTD_paramSwitch_e | ZSTD_resolveEnableLdm (ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters *const cParams) |
static ZSTD_paramSwitch_e | ZSTD_resolveExternalRepcodeSearch (ZSTD_paramSwitch_e value, int cLevel) |
static int | ZSTD_resolveExternalSequenceValidation (int mode) |
static size_t | ZSTD_resolveMaxBlockSize (size_t maxBlockSize) |
static U32 | ZSTD_resolveRepcodeToRawOffset (const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) |
static ZSTD_paramSwitch_e | ZSTD_resolveRowMatchFinderMode (ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters *const cParams) |
MEM_STATIC size_t | ZSTD_rleCompressBlock (void *dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) |
static U64 | ZSTD_rollingHash_append (U64 hash, void const *buf, size_t size) |
MEM_STATIC U64 | ZSTD_rollingHash_compute (void const *buf, size_t size) |
MEM_STATIC U64 | ZSTD_rollingHash_primePower (U32 length) |
MEM_STATIC U64 | ZSTD_rollingHash_rotate (U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower) |
MEM_STATIC U16 | ZSTD_rotateRight_U16 (U16 const value, U32 count) |
MEM_STATIC U32 | ZSTD_rotateRight_U32 (U32 const value, U32 count) |
MEM_STATIC U64 | ZSTD_rotateRight_U64 (U64 const value, U32 count) |
FORCE_INLINE_TEMPLATE void | ZSTD_row_fillHashCache (ZSTD_matchState_t *ms, const BYTE *base, U32 const rowLog, U32 const mls, U32 idx, const BYTE *const iLimit) |
FORCE_INLINE_TEMPLATE ZSTD_VecMask | ZSTD_row_getMatchMask (const BYTE *const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_row_matchMaskGroupWidth (const U32 rowEntries) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_row_nextCachedHash (U32 *cache, U32 const *hashTable, BYTE const *tagTable, BYTE const *base, U32 idx, U32 const hashLog, U32 const rowLog, U32 const mls, U64 const hashSalt) |
FORCE_INLINE_TEMPLATE U32 | ZSTD_row_nextIndex (BYTE *const tagRow, U32 const rowMask) |
FORCE_INLINE_TEMPLATE void | ZSTD_row_prefetch (U32 const *hashTable, BYTE const *tagTable, U32 const relRow, U32 const rowLog) |
void | ZSTD_row_update (ZSTD_matchState_t *const ms, const BYTE *ip) |
FORCE_INLINE_TEMPLATE void | ZSTD_row_update_internal (ZSTD_matchState_t *ms, const BYTE *ip, U32 const mls, U32 const rowLog, U32 const rowMask, U32 const useCache) |
FORCE_INLINE_TEMPLATE void | ZSTD_row_update_internalImpl (ZSTD_matchState_t *ms, U32 updateStartIdx, U32 const updateEndIdx, U32 const mls, U32 const rowLog, U32 const rowMask, U32 const useCache) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_RowFindBestMatch (ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode, const U32 rowLog) |
static int | ZSTD_rowMatchFinderSupported (const ZSTD_strategy strategy) |
static int | ZSTD_rowMatchFinderUsed (const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) |
static void | ZSTD_safecopy (BYTE *op, const BYTE *const oend_w, BYTE const *ip, ptrdiff_t length, ZSTD_overlap_e ovtype) |
static void | ZSTD_safecopyDstBeforeSrc (BYTE *op, BYTE const *ip, ptrdiff_t length) |
static void | ZSTD_safecopyLiterals (BYTE *op, BYTE const *ip, BYTE const *const iend, BYTE const *ilimit_w) |
static U32 | ZSTD_scaleStats (unsigned *table, U32 lastEltIndex, U32 logTarget) |
FORCE_INLINE_TEMPLATE size_t | ZSTD_searchMax (ZSTD_matchState_t *ms, const BYTE *ip, const BYTE *iend, size_t *offsetPtr, U32 const mls, U32 const rowLog, searchMethod_e const searchMethod, ZSTD_dictMode_e const dictMode) |
ZSTD_blockCompressor | ZSTD_selectBlockCompressor (ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode) |
static ZSTD_getAllMatchesFn | ZSTD_selectBtGetAllMatches (ZSTD_matchState_t const *ms, ZSTD_dictMode_e const dictMode) |
symbolEncodingType_e | ZSTD_selectEncodingType (FSE_repeat *repeatMode, unsigned const *count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const *prevCTable, short const *defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy) |
static ZSTD_sequenceCopier | ZSTD_selectSequenceCopier (ZSTD_sequenceFormat_e mode) |
static size_t | ZSTD_seqDecompressedSize (seqStore_t const *seqStore, const seqDef *sequences, size_t nbSeq, size_t litSize, int lastSequence) |
static void | ZSTD_seqStore_resolveOffCodes (repcodes_t *const dRepcodes, repcodes_t *const cRepcodes, const seqStore_t *const seqStore, U32 const nbSeq) |
int | ZSTD_seqToCodes (const seqStore_t *seqStorePtr) |
size_t | ZSTD_sequenceBound (size_t srcSize) |
static void | ZSTD_setBasePrices (optState_t *optPtr, int optLevel) |
static void | ZSTD_setBufferExpectations (ZSTD_CCtx *cctx, const ZSTD_outBuffer *output, const ZSTD_inBuffer *input) |
static size_t | ZSTD_setRleBlock (void *dst, size_t dstCapacity, BYTE b, size_t regenSize) |
static int | ZSTD_shouldAttachDict (const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, U64 pledgedSrcSize) |
size_t | ZSTD_sizeof_CCtx (const ZSTD_CCtx *cctx) |
size_t | ZSTD_sizeof_CDict (const ZSTD_CDict *cdict) |
size_t | ZSTD_sizeof_CStream (const ZSTD_CStream *zcs) |
size_t | ZSTD_sizeof_DCtx (const ZSTD_DCtx *dctx) |
size_t | ZSTD_sizeof_DDict (const ZSTD_DDict *ddict) |
size_t | ZSTD_sizeof_DStream (const ZSTD_DStream *dctx) |
static size_t | ZSTD_sizeof_localDict (ZSTD_localDict dict) |
static size_t | ZSTD_sizeof_matchState (const ZSTD_compressionParameters *const cParams, const ZSTD_paramSwitch_e useRowMatchFinder, const U32 enableDedicatedDictSearch, const U32 forCCtx) |
static size_t | ZSTD_sizeof_mtctx (const ZSTD_CCtx *cctx) |
static size_t | ZSTD_startingInputLength (ZSTD_format_e format) |
static void | ZSTD_storeLastLiterals (seqStore_t *seqStorePtr, const BYTE *anchor, size_t lastLLSize) |
HINT_INLINE UNUSED_ATTR void | ZSTD_storeSeq (seqStore_t *seqStorePtr, size_t litLength, const BYTE *literals, const BYTE *litLimit, U32 offBase, size_t matchLength) |
size_t | ZSTD_toFlushNow (ZSTD_CCtx *cctx) |
static size_t | ZSTD_totalHistorySize (BYTE *op, BYTE const *virtualStart) |
static U32 | ZSTD_totalLen (ZSTD_optimal_t sol) |
static void | ZSTD_updateDUBT (ZSTD_matchState_t *ms, const BYTE *ip, const BYTE *iend, U32 mls) |
FORCE_INLINE_TEMPLATE void | ZSTD_updateFseStateWithDInfo (ZSTD_fseState *DStatePtr, BIT_DStream_t *bitD, U16 nextState, U32 nbBits) |
MEM_STATIC void | ZSTD_updateRep (U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) |
static void | ZSTD_updateStats (optState_t *const optPtr, U32 litLength, const BYTE *literals, U32 offBase, U32 matchLength) |
void | ZSTD_updateTree (ZSTD_matchState_t *ms, const BYTE *ip, const BYTE *iend) |
FORCE_INLINE_TEMPLATE void | ZSTD_updateTree_internal (ZSTD_matchState_t *ms, const BYTE *const ip, const BYTE *const iend, const U32 mls, const ZSTD_dictMode_e dictMode) |
static unsigned | ZSTD_useLowProbCount (size_t const nbSeq) |
static int | ZSTD_useTargetCBlockSize (const ZSTD_CCtx_params *cctxParams) |
static size_t | ZSTD_validateSequence (U32 offCode, U32 matchLength, U32 minMatch, size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) |
MEM_STATIC U32 | ZSTD_VecMask_next (ZSTD_VecMask val) |
unsigned | ZSTD_versionNumber (void) |
const char * | ZSTD_versionString (void) |
MEM_STATIC FORCE_INLINE_ATTR void | ZSTD_wildcopy (void *dst, const void *src, ptrdiff_t length, ZSTD_overlap_e const ovtype) |
MEM_STATIC U32 | ZSTD_window_canOverflowCorrect (ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src) |
MEM_STATIC void | ZSTD_window_clear (ZSTD_window_t *window) |
MEM_STATIC U32 | ZSTD_window_correctOverflow (ZSTD_window_t *window, U32 cycleLog, U32 maxDist, void const *src) |
MEM_STATIC void | ZSTD_window_enforceMaxDist (ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_matchState_t **dictMatchStatePtr) |
MEM_STATIC U32 | ZSTD_window_hasExtDict (ZSTD_window_t const window) |
MEM_STATIC void | ZSTD_window_init (ZSTD_window_t *window) |
MEM_STATIC U32 | ZSTD_window_isEmpty (ZSTD_window_t const window) |
MEM_STATIC U32 | ZSTD_window_needOverflowCorrection (ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src, void const *srcEnd) |
MEM_STATIC U32 | ZSTD_window_update (ZSTD_window_t *window, void const *src, size_t srcSize, int forceNonContiguous) |
static size_t | ZSTD_writeEpilogue (ZSTD_CCtx *cctx, void *dst, size_t dstCapacity) |
static size_t | ZSTD_writeFrameHeader (void *dst, size_t dstCapacity, const ZSTD_CCtx_params *params, U64 pledgedSrcSize, U32 dictID) |
size_t | ZSTD_writeLastEmptyBlock (void *dst, size_t dstCapacity) |
size_t | ZSTD_writeSkippableFrame (void *dst, size_t dstCapacity, const void *src, size_t srcSize, unsigned magicVariant) |
MEM_STATIC void | ZSTD_writeTaggedIndex (U32 *const hashTable, size_t hashAndTag, U32 index) |
static size_t | ZSTDMT_CCtxParam_setNbWorkers (ZSTD_CCtx_params *params, unsigned nbWorkers) |
static void | ZSTDMT_compressionJob (void *jobDescription) |
size_t | ZSTDMT_compressStream_generic (ZSTDMT_CCtx *mtctx, ZSTD_outBuffer *output, ZSTD_inBuffer *input, ZSTD_EndDirective endOp) |
static size_t | ZSTDMT_computeOverlapSize (const ZSTD_CCtx_params *params) |
static unsigned | ZSTDMT_computeTargetJobLog (const ZSTD_CCtx_params *params) |
static ZSTDMT_bufferPool * | ZSTDMT_createBufferPool (unsigned maxNbBuffers, ZSTD_customMem cMem) |
ZSTDMT_CCtx * | ZSTDMT_createCCtx_advanced (unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool *pool) |
MEM_STATIC ZSTDMT_CCtx * | ZSTDMT_createCCtx_advanced_internal (unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool *pool) |
static ZSTDMT_CCtxPool * | ZSTDMT_createCCtxPool (int nbWorkers, ZSTD_customMem cMem) |
static size_t | ZSTDMT_createCompressionJob (ZSTDMT_CCtx *mtctx, size_t srcSize, ZSTD_EndDirective endOp) |
static ZSTDMT_jobDescription * | ZSTDMT_createJobsTable (U32 *nbJobsPtr, ZSTD_customMem cMem) |
static ZSTDMT_seqPool * | ZSTDMT_createSeqPool (unsigned nbWorkers, ZSTD_customMem cMem) |
static int | ZSTDMT_doesOverlapWindow (buffer_t buffer, ZSTD_window_t window) |
static ZSTDMT_bufferPool * | ZSTDMT_expandBufferPool (ZSTDMT_bufferPool *srcBufPool, unsigned maxNbBuffers) |
static ZSTDMT_CCtxPool * | ZSTDMT_expandCCtxPool (ZSTDMT_CCtxPool *srcPool, int nbWorkers) |
static size_t | ZSTDMT_expandJobsTable (ZSTDMT_CCtx *mtctx, U32 nbWorkers) |
static ZSTDMT_seqPool * | ZSTDMT_expandSeqPool (ZSTDMT_seqPool *pool, U32 nbWorkers) |
static size_t | ZSTDMT_flushProduced (ZSTDMT_CCtx *mtctx, ZSTD_outBuffer *output, unsigned blockToFlush, ZSTD_EndDirective end) |
static void | ZSTDMT_freeBufferPool (ZSTDMT_bufferPool *bufPool) |
size_t | ZSTDMT_freeCCtx (ZSTDMT_CCtx *mtctx) |
static void | ZSTDMT_freeCCtxPool (ZSTDMT_CCtxPool *pool) |
static void | ZSTDMT_freeJobsTable (ZSTDMT_jobDescription *jobTable, U32 nbJobs, ZSTD_customMem cMem) |
static void | ZSTDMT_freeSeqPool (ZSTDMT_seqPool *seqPool) |
static buffer_t | ZSTDMT_getBuffer (ZSTDMT_bufferPool *bufPool) |
static ZSTD_CCtx * | ZSTDMT_getCCtx (ZSTDMT_CCtxPool *cctxPool) |
ZSTD_frameProgression | ZSTDMT_getFrameProgression (ZSTDMT_CCtx *mtctx) |
static range_t | ZSTDMT_getInputDataInUse (ZSTDMT_CCtx *mtctx) |
static rawSeqStore_t | ZSTDMT_getSeq (ZSTDMT_seqPool *seqPool) |
size_t | ZSTDMT_initCStream_internal (ZSTDMT_CCtx *mtctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict *cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) |
static int | ZSTDMT_isOverlapped (buffer_t buffer, range_t range) |
size_t | ZSTDMT_nextInputSizeHint (const ZSTDMT_CCtx *mtctx) |
static int | ZSTDMT_overlapLog (int ovlog, ZSTD_strategy strat) |
static int | ZSTDMT_overlapLog_default (ZSTD_strategy strat) |
static void | ZSTDMT_releaseAllJobResources (ZSTDMT_CCtx *mtctx) |
static void | ZSTDMT_releaseBuffer (ZSTDMT_bufferPool *bufPool, buffer_t buf) |
static void | ZSTDMT_releaseCCtx (ZSTDMT_CCtxPool *pool, ZSTD_CCtx *cctx) |
static void | ZSTDMT_releaseSeq (ZSTDMT_seqPool *seqPool, rawSeqStore_t seq) |
static size_t | ZSTDMT_resize (ZSTDMT_CCtx *mtctx, unsigned nbWorkers) |
static void | ZSTDMT_serialState_ensureFinished (serialState_t *serialState, unsigned jobID, size_t cSize) |
static void | ZSTDMT_serialState_free (serialState_t *serialState) |
static int | ZSTDMT_serialState_init (serialState_t *serialState) |
static int | ZSTDMT_serialState_reset (serialState_t *serialState, ZSTDMT_seqPool *seqPool, ZSTD_CCtx_params params, size_t jobSize, const void *dict, size_t const dictSize, ZSTD_dictContentType_e dictContentType) |
static void | ZSTDMT_serialState_update (serialState_t *serialState, ZSTD_CCtx *jobCCtx, rawSeqStore_t seqStore, range_t src, unsigned jobID) |
static void | ZSTDMT_setBufferSize (ZSTDMT_bufferPool *const bufPool, size_t const bSize) |
static void | ZSTDMT_setNbSeq (ZSTDMT_seqPool *const seqPool, size_t const nbSeq) |
static size_t | ZSTDMT_sizeof_bufferPool (ZSTDMT_bufferPool *bufPool) |
size_t | ZSTDMT_sizeof_CCtx (ZSTDMT_CCtx *mtctx) |
static size_t | ZSTDMT_sizeof_CCtxPool (ZSTDMT_CCtxPool *cctxPool) |
static size_t | ZSTDMT_sizeof_seqPool (ZSTDMT_seqPool *seqPool) |
size_t | ZSTDMT_toFlushNow (ZSTDMT_CCtx *mtctx) |
static int | ZSTDMT_tryGetInputRange (ZSTDMT_CCtx *mtctx) |
void | ZSTDMT_updateCParams_whileCompressing (ZSTDMT_CCtx *mtctx, const ZSTD_CCtx_params *cctxParams) |
static void | ZSTDMT_waitForAllJobsCompleted (ZSTDMT_CCtx *mtctx) |
static void | ZSTDMT_waitForLdmComplete (ZSTDMT_CCtx *mtctx, buffer_t buffer) |
static void | ZSTDMT_writeLastEmptyBlock (ZSTDMT_jobDescription *job) |
#define __has_attribute | ( | x | ) | 0 |
#define _FORCE_HAS_FORMAT_STRING | ( | ... | ) |
#define ADVANCED_SEQS STORED_SEQS |
#define assert | ( | condition | ) | ((void)0) /* disable assert (default) */ |
#define B | ( | name, | |
bit | |||
) | X(name, f7b, bit) |
#define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) |
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") |
#define BOUNDCHECK | ( | cParam, | |
val | |||
) |
#define BUCKET_A_SIZE (ALPHABET_SIZE) |
#define BUCKET_B | ( | _c0, | |
_c1 | |||
) | (bucket_B[((_c1) << 8) | (_c0)]) |
#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) |
#define BUCKET_BSTAR | ( | _c0, | |
_c1 | |||
) | (bucket_B[((_c0) << 8) | (_c1)]) |
#define BUF_POOL_MAX_NB_BUFFERS | ( | nbWorkers | ) | (2*(nbWorkers) + 3) |
#define CHECK_DBOUNDS | ( | p, | |
v | |||
) |
#define CHECK_V_F | ( | e, | |
f | |||
) | size_t const e = f; if (ERR_isError(e)) return e |
#define CLAMP | ( | cParam, | |
val | |||
) | CLAMP_TYPE(cParam, val, unsigned) |
#define CLAMP_TYPE | ( | cParam, | |
val, | |||
type | |||
) |
#define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ |
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) |
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) |
#define D | ( | name, | |
bit | |||
) | X(name, f1d, bit) |
#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT |
#define DEBUG_STATIC_ASSERT | ( | c | ) | (void)sizeof(char[(c) ? 1 : -1]) |
#define DISPLAY | ( | ... | ) |
#define DISPLAY | ( | ... | ) |
#define DISPLAY | ( | ... | ) | { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } |
#define DISPLAYLEVEL | ( | l, | |
... | |||
) | LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) |
#define DISPLAYLEVEL | ( | l, | |
... | |||
) | LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) |
#define DISPLAYLEVEL | ( | l, | |
... | |||
) | if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ |
#define DISPLAYUPDATE | ( | l, | |
... | |||
) | LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) |
#define DISPLAYUPDATE | ( | l, | |
... | |||
) | LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) |
#define DISPLAYUPDATE | ( | l, | |
... | |||
) |
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE) |
#define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ |
#define ERROR | ( | name | ) | ZSTD_ERROR(name) |
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) |
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR |
#define FORWARD_IF_ERROR | ( | err, | |
... | |||
) |
If the provided expression evaluates to an error code, returns that error code.
In debug modes, prints additional information.
#define FSE_BLOCKBOUND | ( | size | ) | ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) |
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE | ( | maxSymbolValue, | |
tableLog | |||
) | (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) |
#define FSE_BUILD_DTABLE_WKSP_SIZE | ( | maxTableLog, | |
maxSymbolValue | |||
) | (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8) |
#define FSE_BUILD_DTABLE_WKSP_SIZE_U32 | ( | maxTableLog, | |
maxSymbolValue | |||
) | ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned)) |
#define FSE_COMPRESSBOUND | ( | size | ) | (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
#define FSE_CTABLE_SIZE | ( | maxTableLog, | |
maxSymbolValue | |||
) | (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) |
#define FSE_CTABLE_SIZE_U32 | ( | maxTableLog, | |
maxSymbolValue | |||
) | (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2)) |
#define FSE_DECODE_TYPE FSE_decode_t |
#define FSE_DECOMPRESS_WKSP_SIZE | ( | maxTableLog, | |
maxSymbolValue | |||
) | (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) |
#define FSE_DECOMPRESS_WKSP_SIZE_U32 | ( | maxTableLog, | |
maxSymbolValue | |||
) | (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) |
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2) |
#define FSE_DTABLE_SIZE | ( | maxTableLog | ) | (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) |
#define FSE_DTABLE_SIZE_U32 | ( | maxTableLog | ) | (1 + (1<<(maxTableLog))) |
#define FSE_FLUSHBITS | ( | s | ) | (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) |
#define FSE_GETSYMBOL | ( | statePtr | ) | fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) |
#define FSE_H_FSE_STATIC_LINKING_ONLY |
(Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead )
The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError().
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError().
FSE_DTable
can then be used to decompress cSrc
, with FSE_decompress_usingDTable(). cSrcSize
must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=dstCapacity
). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
#define FSE_isError ERR_isError |
#define FSE_isError ERR_isError |
#define FSE_isError ERR_isError |
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE |
#define FSE_MAX_MEMORY_USAGE 14 |
MEMORY_USAGE : Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) Increasing memory usage improves compression ratio Reduced memory usage can improve speed, due to cache effect Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
#define FSE_MAX_SYMBOL_VALUE 255 |
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) |
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG) |
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1) |
#define FSE_STATIC_ASSERT | ( | c | ) | DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ |
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ |
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ |
#define FSE_TABLESTEP | ( | tableSize | ) | (((tableSize)>>1) + ((tableSize)>>3) + 3) |
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) |
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) |
#define GEAR_ITER_ONCE | ( | ) |
#define GEAR_ITER_ONCE | ( | ) |
#define GEN_ZSTD_BT_GET_ALL_MATCHES | ( | dictMode | ) |
#define GEN_ZSTD_BT_GET_ALL_MATCHES_ | ( | dictMode, | |
mls | |||
) |
#define GEN_ZSTD_BT_SEARCH_FN | ( | dictMode, | |
mls | |||
) |
#define GEN_ZSTD_CALL_BT_SEARCH_FN | ( | dictMode, | |
mls | |||
) |
#define GEN_ZSTD_CALL_HC_SEARCH_FN | ( | dictMode, | |
mls | |||
) |
#define GEN_ZSTD_CALL_ROW_SEARCH_FN | ( | dictMode, | |
mls, | |||
rowLog | |||
) |
#define GEN_ZSTD_HC_SEARCH_FN | ( | dictMode, | |
mls | |||
) |
#define GEN_ZSTD_ROW_SEARCH_FN | ( | dictMode, | |
mls, | |||
rowLog | |||
) |
#define GETIDX | ( | a | ) | ((0 <= (a)) ? (a) : (~(a))) |
#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */ |
#define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR |
HINT_INLINE is used to help the compiler generate better code. It is not used for "templates", so it can be tweaked based on the compilers performance.
gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the always_inline attribute.
clang up to 5.0.0 (trunk) benefit tremendously from the always_inline attribute.
#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) |
#define HUF_ALIGN | ( | x, | |
a | |||
) | HUF_ALIGN_MASK((x), (a) - 1) |
#define HUF_ALIGN_MASK | ( | x, | |
mask | |||
) | (((x) + (mask)) & ~(mask)) |
#define HUF_ASM_DECL HUF_EXTERN_C |
#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) |
HUF_CStream_t: Huffman uses its own BIT_CStream_t implementation. There are three major differences from BIT_CStream_t:
#define HUF_BLOCKBOUND | ( | size | ) | (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ |
#define HUF_BLOCKSIZE_MAX (128 * 1024) |
#define HUF_COMPRESSBOUND | ( | size | ) | (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
#define HUF_CREATE_STATIC_CTABLE | ( | name, | |
maxSymbolValue | |||
) | HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ |
#define HUF_CREATE_STATIC_DTABLEX1 | ( | DTable, | |
maxTableLog | |||
) | HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } |
#define HUF_CREATE_STATIC_DTABLEX2 | ( | DTable, | |
maxTableLog | |||
) | HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } |
#define HUF_CTABLE_SIZE | ( | maxSymbolValue | ) | (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) |
#define HUF_CTABLE_SIZE_ST | ( | maxSymbolValue | ) | ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ |
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) |
#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) |
HUF_buildCTable_wksp() : Same as HUF_buildCTable(), but using externally allocated scratch buffer. workSpace
must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
#define HUF_DECODE_SYMBOLX1_0 | ( | ptr, | |
DStreamPtr | |||
) | *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) |
#define HUF_DECODE_SYMBOLX1_2 | ( | ptr, | |
DStreamPtr | |||
) |
#define HUF_DECODE_SYMBOLX2_0 | ( | ptr, | |
DStreamPtr | |||
) | ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) |
#define HUF_DECODE_SYMBOLX2_1 | ( | ptr, | |
DStreamPtr | |||
) |
#define HUF_DECODE_SYMBOLX2_2 | ( | ptr, | |
DStreamPtr | |||
) |
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) |
The minimum workspace size for the workSpace
used in HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. Buffer overflow errors may potentially occur if code modifications result in a required workspace size greater than that specified in the following macro.
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) |
#define HUF_DGEN | ( | fn | ) |
#define HUF_DTABLE_SIZE | ( | maxTableLog | ) | (1 + (1<<(maxTableLog))) |
#define HUF_isError ERR_isError |
#define HUF_isError ERR_isError |
#define HUF_isError ERR_isError |
#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra |
#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned)) |
#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1) |
HUF_readStats_wksp() : Same as HUF_readStats() but takes an external workspace which must be 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE. If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
#define HUF_STATIC_ASSERT | ( | c | ) | DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ |
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ |
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ |
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) |
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) |
#define JOB_ERROR | ( | e | ) |
#define LLIMIT 64 /* heuristic determined experimentally */ |
#define LOCALDISPLAYLEVEL | ( | displayLevel, | |
l, | |||
... | |||
) |
#define LOCALDISPLAYLEVEL | ( | displayLevel, | |
l, | |||
... | |||
) |
#define LOCALDISPLAYUPDATE | ( | displayLevel, | |
l, | |||
... | |||
) |
#define LOCALDISPLAYUPDATE | ( | displayLevel, | |
l, | |||
... | |||
) |
#define LONG_OFFSETS_MAX_EXTRA_BITS_32 |
#define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ |
#define MERGE_CHECK | ( | a, | |
b, | |||
c | |||
) |
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ |
#define MINMATCHLENGTH 7 /* heuristic determined experimentally */ |
#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ |
#define OFFBASE_IS_OFFSET | ( | o | ) | ((o) > ZSTD_REP_NUM) |
#define OFFBASE_IS_REPCODE | ( | o | ) | ( 1 <= (o) && (o) <= ZSTD_REP_NUM) |
#define OFFBASE_TO_OFFSET | ( | o | ) | (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) |
#define OFFBASE_TO_REPCODE | ( | o | ) | (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ |
#define OFFCODE_MAX 30 /* only applicable to first block */ |
#define OFFSET_TO_OFFBASE | ( | o | ) | (assert((o)>0), o + ZSTD_REP_NUM) |
#define PREFETCH_AREA | ( | p, | |
s | |||
) |
#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) |
#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) |
#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) |
#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) |
#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) |
#define REPCODE_TO_OFFBASE | ( | r | ) | (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ |
#define RETURN_ERROR | ( | err, | |
... | |||
) |
Unconditionally return the specified error.
In debug modes, prints additional information.
#define RETURN_ERROR_IF | ( | cond, | |
err, | |||
... | |||
) |
Return the specified error if the condition evaluates to true.
In debug modes, prints additional information. In order to do that (particularly, printing the conditional that failed), this can't just wrap RETURN_ERROR().
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX |
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG) |
#define STACK_POP | ( | _a, | |
_b, | |||
_c, | |||
_d | |||
) |
#define STACK_POP5 | ( | _a, | |
_b, | |||
_c, | |||
_d, | |||
_e | |||
) |
#define STACK_PUSH | ( | _a, | |
_b, | |||
_c, | |||
_d | |||
) |
#define STACK_PUSH5 | ( | _a, | |
_b, | |||
_c, | |||
_d, | |||
_e | |||
) |
#define STACK_SIZE SS_SMERGE_STACKSIZE |
#define STACK_SIZE TR_STACKSIZE |
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) |
HUF_buildCTable_wksp() : Same as HUF_buildCTable(), but using externally allocated scratch buffer. workSpace
must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
#define STORED_SEQS 8 |
#define STORED_SEQS_MASK (STORED_SEQS-1) |
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) |
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ |
#define SWAP | ( | _a, | |
_b | |||
) | do { t = (_a); (_a) = (_b); (_b) = t; } while(0) |
#define THREADING_H_938743 |
Copyright (c) 2016 Tino Reichardt All rights reserved.
You can contact the author at:
This source code is licensed under both the BSD-style license (found in the LICENSE file in the root directory of this source tree) and the GPLv2 (found in the COPYING file in the root directory of this source tree). You may select, at your option, one of the above-listed licenses. This file will hold wrapper for systems, which do not support pthreads Copyright (c) 2016 Tino Reichardt All rights reserved.
You can contact the author at:
This source code is licensed under both the BSD-style license (found in the LICENSE file in the root directory of this source tree) and the GPLv2 (found in the COPYING file in the root directory of this source tree). You may select, at your option, one of the above-listed licenses.
#define WEIGHT | ( | stat, | |
opt | |||
) | ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) |
#define WIN_CDECL |
#define X | ( | name, | |
r, | |||
bit | |||
) |
#define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) |
#define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) |
#define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) |
#define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) |
#define XXH128_hash_t XXH_IPREF(XXH128_hash_t) |
#define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) |
#define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) |
#define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) |
#define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) |
#define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) |
#define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) |
#define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) |
#define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) |
#define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) |
#define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) |
#define XXH32_state_s XXH_IPREF(XXH32_state_s) |
#define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) |
#define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) |
#define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) |
#define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) |
#define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) |
#define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) |
#define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) |
#define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) |
#define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) |
#define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) |
#define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) |
#define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) |
#define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) |
#define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) |
#define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) |
#define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) |
#define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) |
#define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) |
#define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) |
#define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) |
#define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) |
#define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) |
#define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) |
#define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) |
#define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) |
#define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) |
#define XXH3_state_s XXH_IPREF(XXH3_state_s) |
#define XXH3_state_t XXH_IPREF(XXH3_state_t) |
#define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) |
#define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) |
#define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) |
#define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) |
#define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) |
#define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) |
#define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) |
#define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) |
#define XXH64_state_s XXH_IPREF(XXH64_state_s) |
#define XXH64_state_t XXH_IPREF(XXH64_state_t) |
#define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) |
#define XXH_ERROR XXH_IPREF(XXH_ERROR) |
#define XXH_errorcode XXH_IPREF(XXH_errorcode) |
#define XXH_INLINE_ALL_31684351384 |
#define XXH_IPREF | ( | Id | ) | XXH_NAMESPACE ## Id |
#define XXH_NO_XXH3 |
#define XXH_OK XXH_IPREF(XXH_OK) |
#define XXH_PROCESS1 |
#define XXH_PROCESS4 |
#define XXH_PUBLIC_API static |
#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ |
#define XXH_STATIC_LINKING_ONLY |
#define XXH_VERSION_MAJOR 0 |
#define XXH_VERSION_MINOR 8 |
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) |
#define XXH_VERSION_RELEASE 1 |
#define XXHASH_H_5627135585666179 1 |
#define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) |
#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) |
#define ZDICTLIB_API ZDICTLIB_VISIBLE |
#define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE |
#define ZSTD_ALIGNOF | ( | T | ) | (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T)) |
#define ZSTD_ASM_SUPPORTED 0 |
Only enable assembly for GNUC compatible compilers, because other platforms may not support GAS assembly syntax.
Only enable assembly for Linux / MacOS, other platforms may work, but they haven't been tested. This could likely be extended to BSD systems.
Disable assembly when MSAN is enabled, because MSAN requires 100% of code to be instrumented to work.
#define ZSTD_BT_GET_ALL_MATCHES_ARRAY | ( | dictMode | ) |
#define ZSTD_BT_GET_ALL_MATCHES_FN | ( | dictMode, | |
mls | |||
) | ZSTD_btGetAllMatches_##dictMode##_##mls |
#define ZSTD_BT_SEARCH_FN | ( | dictMode, | |
mls | |||
) | ZSTD_BtFindBestMatch_##dictMode##_##mls |
Generate search functions templated on (dictMode, mls, rowLog). These functions are outlined for code size & compilation time. ZSTD_searchMax() dispatches to the correct implementation function.
TODO: The start of the search function involves loading and calculating a bunch of constants from the ZSTD_matchState_t. These computations could be done in an initialization function, and saved somewhere in the match state. Then we could pass a pointer to the saved state instead of the match state, and avoid duplicate computations.
TODO: Move the match re-winding into searchMax. This improves compression ratio, and unlocks further simplifications with the next TODO.
TODO: Try moving the repcode search into searchMax. After the re-winding and repcode search are in searchMax, there is no more logic in the match finder loop that requires knowledge about the dictMode. So we should be able to avoid force inlining it, and we can join the extDict loop with the single segment loop. It should go in searchMax instead of its own function to avoid having multiple virtual function calls per search.
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) |
#define ZSTD_CHUNKSIZE_MAX |
#define ZSTD_COMPRESS_HEAPMODE 0 |
COMPRESS_HEAPMODE : Select how default decompression function ZSTD_compress() allocates its context, on stack (0, default), or into heap (1). Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) |
#define ZSTD_div64 | ( | dividend, | |
divisor | |||
) | ((dividend) / (divisor)) |
#define ZSTD_DUBT_UNSORTED_MARK |
#define ZSTD_ENABLE_ASM_X86_64_BMI2 0 |
Determines whether we should enable assembly for x86-64 with BMI2.
Enable if all of the following conditions hold:
#define ZSTD_FOR_EACH_DICT_MODE | ( | X, | |
... | |||
) |
#define ZSTD_FOR_EACH_MLS | ( | X, | |
dictMode | |||
) |
#define ZSTD_FOR_EACH_MLS_ROWLOG | ( | X, | |
dictMode | |||
) |
#define ZSTD_FOR_EACH_ROWLOG | ( | X, | |
dictMode, | |||
mls | |||
) |
#define ZSTD_GEN_DFAST_FN | ( | dictMode, | |
mls | |||
) |
#define ZSTD_GEN_FAST_FN | ( | dictMode, | |
mls, | |||
step | |||
) |
#define ZSTD_HASHLOG3_MAX 17 |
ZSTD_HASHLOG3_MAX : Maximum size of the hash table dedicated to find 3-bytes matches, in log format, aka 17 => 1 << 17 == 128Ki positions. This structure is only used in zstd_opt. Since allocation is centralized for all strategies, it has to be known here. The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, so that zstd_opt.c doesn't need to know about this constant.
#define ZSTD_HC_SEARCH_FN | ( | dictMode, | |
mls | |||
) | ZSTD_HcFindBestMatch_##dictMode##_##mls |
#define ZSTD_HEAPMODE 1 |
HEAPMODE : Select how default decompression function ZSTD_decompress() allocates its context, on stack (0), or into heap (1, default; requires malloc()). Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
#define ZSTD_isError ERR_isError /* for inlining */ |
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2 |
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT |
#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX) |
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ |
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ |
#define ZSTD_MAX_NB_BLOCK_SPLITS 196 |
#define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) |
LEGACY_SUPPORT : if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
MAXWINDOWSIZE_DEFAULT : maximum window size accepted by DStream by default. Frames requiring more memory will be rejected. It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
#define ZSTD_NO_FORWARD_PROGRESS_MAX 16 |
NO_FORWARD_PROGRESS_MAX : maximum allowed nb of calls to ZSTD_decompressStream() without any forward progress (defined as: no byte read from input, and no byte flushed to output) before triggering an error.
#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ |
#define ZSTD_pthread_cond_broadcast | ( | a | ) | pthread_cond_broadcast((a)) |
#define ZSTD_pthread_cond_destroy | ( | a | ) | pthread_cond_destroy((a)) |
#define ZSTD_pthread_cond_init | ( | a, | |
b | |||
) | pthread_cond_init((a), (b)) |
#define ZSTD_pthread_cond_signal | ( | a | ) | pthread_cond_signal((a)) |
#define ZSTD_pthread_cond_wait | ( | a, | |
b | |||
) | pthread_cond_wait((a), (b)) |
#define ZSTD_pthread_mutex_destroy | ( | a | ) | pthread_mutex_destroy((a)) |
#define ZSTD_pthread_mutex_init | ( | a, | |
b | |||
) | pthread_mutex_init((a), (b)) |
#define ZSTD_pthread_mutex_lock | ( | a | ) | pthread_mutex_lock((a)) |
#define ZSTD_PTHREAD_MUTEX_LOCK | ( | m | ) | ZSTD_pthread_mutex_lock(m) |
#define ZSTD_pthread_mutex_unlock | ( | a | ) | pthread_mutex_unlock((a)) |
#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) |
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ |
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ |
#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ |
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) |
#define ZSTD_ROW_SEARCH_FN | ( | dictMode, | |
mls, | |||
rowLog | |||
) | ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog |
#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE |
#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) |
#define ZSTD_STATIC_ASSERT | ( | c | ) | DEBUG_STATIC_ASSERT(c) |
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ |
#define ZSTD_SWITCH_MLS | ( | X, | |
dictMode | |||
) |
#define ZSTD_SWITCH_ROWLOG | ( | dictMode, | |
mls | |||
) |
#define ZSTD_SWITCH_SEARCH_METHOD | ( | dictMode | ) |
#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE |
#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) |
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) |
#define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256) |
typedef struct COVER_best_s COVER_best_t |
COVER_best_t is used for two purposes:
All of the methods except COVER_best_init() are thread safe if zstd is compiled with multithreaded support.
typedef struct COVER_dictSelection COVER_dictSelection_t |
Struct used for the dictionary selection function.
typedef struct COVER_map_pair_t_s COVER_map_pair_t |
typedef struct COVER_map_s COVER_map_t |
typedef struct COVER_tryParameters_data_s COVER_tryParameters_data_t |
Parameters for COVER_tryParameters().
typedef ZSTD_ErrorCode ERR_enum |
typedef struct FASTCOVER_tryParameters_data_s FASTCOVER_tryParameters_data_t |
Parameters for FASTCOVER_tryParameters().
typedef unsigned FSE_CTable |
typedef unsigned FSE_DTable |
typedef void(* HUF_DecompressFastLoopFn) (HUF_DecompressFastArgs *) |
typedef size_t(* HUF_DecompressUsingDTableFn) (void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
typedef U32 HUF_DTable |
typedef nodeElt huffNodeTable[2 *(HUF_SYMBOLVALUE_MAX+1)] |
typedef struct POOL_ctx_s POOL_ctx |
typedef void(* POOL_function) (void *) |
typedef struct POOL_job_s POOL_job |
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX] |
typedef U32 rankValCol_t[HUF_TABLELOG_MAX+1] |
typedef struct repcodes_s repcodes_t |
typedef struct _trbudget_t trbudget_t |
typedef size_t(* ZSTD_blockCompressor) (ZSTD_matchState_t *bs, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize) |
typedef size_t(* ZSTD_decompressSequences_t) (ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) |
typedef U32(* ZSTD_getAllMatchesFn) (ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat) |
typedef struct ZSTD_matchState_t ZSTD_matchState_t |
typedef struct ZSTD_prefixDict_s ZSTD_prefixDict |
typedef size_t(* ZSTD_sequenceCopier) (ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) |
typedef U64 ZSTD_VecMask |
typedef struct ZSTDMT_bufferPool_s ZSTDMT_bufferPool |
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx |
typedef ZSTDMT_bufferPool ZSTDMT_seqPool |
enum base_directive_e |
enum BIT_DStream_status |
enum blockType_e |
enum FSE_repeat |
enum HIST_checkInput_e |
enum HUF_flags_e |
Huffman flags bitset. For all flags, 0 is the default value.
enum HUF_nbStreams_e |
enum HUF_repeat |
enum searchMethod_e |
enum streaming_operation |
enum symbolEncodingType_e |
enum ZSTD_bufferMode_e |
enum ZSTD_buildSeqStore_e |
Controls, for this matchState reset, whether the tables need to be cleared / prepared for the coming compression (ZSTDcrp_makeClean), or whether the tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a subsequent operation will overwrite the table space anyways (e.g., copying the matchState contents in from a CDict).
Enumerator | |
---|---|
ZSTDcrp_makeClean | |
ZSTDcrp_leaveDirty |
enum ZSTD_cParamMode_e |
enum ZSTD_cStreamStage |
enum ZSTD_defaultPolicy_e |
enum ZSTD_dictMode_e |
enum ZSTD_dictUses_e |
enum ZSTD_dStage |
enum ZSTD_dStreamStage |
enum ZSTD_ErrorCode |
enum ZSTD_litLocation_e |
enum ZSTD_longOffset_e |
enum ZSTD_OptPrice_e |
enum ZSTD_overlap_e |
enum ZSTD_resetTarget_e |
|
static |
Ignore: this is an internal helper.
This is a helper function to help force C99-correctness during compilation. Under strict compilation modes, variadic macro arguments can't be empty. However, variadic function arguments can be. Using a function therefore lets us statically check that at least one (string) argument was passed, independent of the compilation flags.
|
static |
MEM_STATIC void BIT_addBits | ( | BIT_CStream_t * | bitC, |
size_t | value, | ||
unsigned | nbBits | ||
) |
BIT_addBits() : can add up to 31 bits into bitC
. Note : does not check for register overflow !
MEM_STATIC void BIT_addBitsFast | ( | BIT_CStream_t * | bitC, |
size_t | value, | ||
unsigned | nbBits | ||
) |
BIT_addBitsFast() : works only if value
is clean, meaning all high bits above nbBits are 0
MEM_STATIC size_t BIT_closeCStream | ( | BIT_CStream_t * | bitC | ) |
MEM_STATIC unsigned BIT_endOfDStream | ( | const BIT_DStream_t * | DStream | ) |
MEM_STATIC void BIT_flushBits | ( | BIT_CStream_t * | bitC | ) |
BIT_flushBits() : assumption : bitContainer has not overflowed safe version; check for buffer overflow, and prevents it. note : does not signal buffer overflow. overflow will be revealed later on using BIT_closeCStream()
MEM_STATIC void BIT_flushBitsFast | ( | BIT_CStream_t * | bitC | ) |
BIT_flushBitsFast() : assumption : bitContainer has not overflowed unsafe version; does not check buffer overflow
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits | ( | size_t | bitContainer, |
U32 const | nbBits | ||
) |
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits | ( | size_t | bitContainer, |
U32 const | start, | ||
U32 const | nbBits | ||
) |
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits | ( | size_t | bitContainer, |
U32 const | start | ||
) |
MEM_STATIC size_t BIT_initCStream | ( | BIT_CStream_t * | bitC, |
void * | startPtr, | ||
size_t | dstCapacity | ||
) |
BIT_initCStream() : dstCapacity
must be > sizeof(size_t)
MEM_STATIC size_t BIT_initDStream | ( | BIT_DStream_t * | bitD, |
const void * | srcBuffer, | ||
size_t | srcSize | ||
) |
BIT_initDStream() : Initialize a BIT_DStream_t. bitD
: a pointer to an already allocated BIT_DStream_t structure. srcSize
must be the exact size of the bitStream, in bytes.
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits | ( | const BIT_DStream_t * | bitD, |
U32 | nbBits | ||
) |
BIT_lookBits() : Provides next n bits from local register. local register is not modified. On 32-bits, maxNbBits==24. On 64-bits, maxNbBits==56.
MEM_STATIC size_t BIT_lookBitsFast | ( | const BIT_DStream_t * | bitD, |
U32 | nbBits | ||
) |
BIT_lookBitsFast() : unsafe version; only works if nbBits >= 1
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits | ( | BIT_DStream_t * | bitD, |
unsigned | nbBits | ||
) |
BIT_readBits() : Read (consume) next n bits from local register and update. Pay attention to not read more than nbBits contained into local register.
MEM_STATIC size_t BIT_readBitsFast | ( | BIT_DStream_t * | bitD, |
unsigned | nbBits | ||
) |
BIT_readBitsFast() : unsafe version; only works if nbBits >= 1
MEM_STATIC FORCE_INLINE_ATTR BIT_DStream_status BIT_reloadDStream | ( | BIT_DStream_t * | bitD | ) |
BIT_reloadDStream() : Refill bitD
from buffer previously set in BIT_initDStream() . This function is safe, it guarantees it will not read beyond src buffer.
BIT_DStream_t
internal register. when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast | ( | BIT_DStream_t * | bitD | ) |
BIT_reloadDStreamFast() : Similar to BIT_reloadDStream(), but with two differences:
MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits | ( | BIT_DStream_t * | bitD, |
U32 | nbBits | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
void COVER_best_destroy | ( | COVER_best_t * | best | ) |
Call COVER_best_wait() and then destroy the COVER_best_t.
void COVER_best_finish | ( | COVER_best_t * | best, |
ZDICT_cover_params_t | parameters, | ||
COVER_dictSelection_t | selection | ||
) |
void COVER_best_init | ( | COVER_best_t * | best | ) |
void COVER_best_start | ( | COVER_best_t * | best | ) |
void COVER_best_wait | ( | COVER_best_t * | best | ) |
|
static |
|
static |
size_t COVER_checkTotalCompressedSize | ( | const ZDICT_cover_params_t | parameters, |
const size_t * | samplesSizes, | ||
const BYTE * | samples, | ||
size_t * | offsets, | ||
size_t | nbTrainSamples, | ||
size_t | nbSamples, | ||
BYTE *const | dict, | ||
size_t | dictBufferCapacity | ||
) |
|
static |
|
static |
COVER_epoch_info_t COVER_computeEpochs | ( | U32 | maxDictSize, |
U32 | nbDmers, | ||
U32 | k, | ||
U32 | passes | ||
) |
Computes the number of epochs and the size of each epoch. We will make sure that each epoch gets at least 10 * k bytes.
The COVER algorithms divide the data up into epochs of equal size and select one segment from each epoch.
maxDictSize | The maximum allowed dictionary size. |
nbDmers | The number of dmers we are training on. |
k | The parameter k (segment size). |
passes | The target number of passes over the dmer corpus. More passes means a better dictionary. |
|
static |
Clean up a context initialized with COVER_ctx_init()
.
|
static |
Prepare a context for dictionary building. The context is only dependent on the parameter d
and can be used multiple times. Returns 0 on success or error code on error. The context must be destroyed with COVER_ctx_destroy()
.
COVER_dictSelection_t COVER_dictSelectionError | ( | size_t | error | ) |
void COVER_dictSelectionFree | ( | COVER_dictSelection_t | selection | ) |
unsigned COVER_dictSelectionIsError | ( | COVER_dictSelection_t | selection | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
Destroys a map that is inited with COVER_map_init().
|
static |
|
static |
|
static |
Initializes a map of the given size. Returns 1 on success and 0 on failure. The map must be destroyed with COVER_map_destroy(). The map is only guaranteed to be large enough to hold size elements.
|
static |
COVER_dictSelection_t COVER_selectDict | ( | BYTE * | customDictContent, |
size_t | dictBufferCapacity, | ||
size_t | dictContentSize, | ||
const BYTE * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbFinalizeSamples, | ||
size_t | nbCheckSamples, | ||
size_t | nbSamples, | ||
ZDICT_cover_params_t | params, | ||
size_t * | offsets, | ||
size_t | totalCompressedSize | ||
) |
|
static |
Selects the best segment in an epoch. Segments of are scored according to the function:
Let F(d) be the frequency of dmer d. Let S_i be the dmer at position i of segment S which has length k.
Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
Once the dmer d is in the dictionary we set F(d) = 0.
|
static |
Same as COVER_cmp() except ties are broken by pointer value NOTE: g_coverCtx must be set to call this function. A global is required because qsort doesn't take an opaque pointer.
|
static |
size_t COVER_sum | ( | const size_t * | samplesSizes, |
unsigned | nbSamples | ||
) |
|
static |
void COVER_warnOnSmallCorpus | ( | size_t | maxDictSize, |
size_t | nbDmers, | ||
int | displayLevel | ||
) |
|
static |
int divbwt | ( | const unsigned char * | T, |
unsigned char * | U, | ||
int * | A, | ||
int | n, | ||
unsigned char * | num_indexes, | ||
int * | indexes, | ||
int | openMP | ||
) |
Constructs the burrows-wheeler transformed string of a given string.
T | [0..n-1] The input string. |
U | [0..n-1] The output string. (can be T) |
A | [0..n-1] The temporary array. (can be NULL) |
n | The length of the given string. |
num_indexes | The length of secondary indexes array. (can be NULL) |
indexes | The secondary indexes array. (can be NULL) |
openMP | enables OpenMP optimization. |
int divsufsort | ( | const unsigned char * | T, |
int * | SA, | ||
int | n, | ||
int | openMP | ||
) |
ERR_STATIC ERR_enum ERR_getErrorCode | ( | size_t | code | ) |
const ERR_STATIC char* ERR_getErrorName | ( | size_t | code | ) |
ERR_STATIC unsigned ERR_isError | ( | size_t | code | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
Clean up a context initialized with FASTCOVER_ctx_init()
.
|
static |
Prepare a context for dictionary building. The context is only dependent on the parameter d
and can be used multiple times. Returns 0 on success or error code on error. The context must be destroyed with FASTCOVER_ctx_destroy()
.
|
static |
|
static |
Selects the best segment in an epoch. Segments of are scored according to the function:
Let F(d) be the frequency of all dmers with hash value d. Let S_i be hash value of the dmer at position i of segment S which has length k.
Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
Once the dmer with hash value d is in the dictionary we set F(d) = 0.
|
static |
|
static |
MEM_STATIC U32 FSE_bitCost | ( | const void * | symbolTTPtr, |
U32 | tableLog, | ||
U32 | symbolValue, | ||
U32 | accuracyLog | ||
) |
FSE_PUBLIC_API size_t FSE_buildCTable | ( | FSE_CTable * | ct, |
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog | ||
) |
FSE_buildCTable(): Builds ct
, which must be already allocated, using FSE_createCTable().
size_t FSE_buildCTable_rle | ( | FSE_CTable * | ct, |
unsigned char | symbolValue | ||
) |
size_t FSE_buildCTable_wksp | ( | FSE_CTable * | ct, |
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog, | ||
void * | workSpace, | ||
size_t | wkspSize | ||
) |
|
static |
size_t FSE_buildDTable_wksp | ( | FSE_DTable * | dt, |
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog, | ||
void * | workSpace, | ||
size_t | wkspSize | ||
) |
Same as FSE_buildDTable(), using an externally allocated workspace
produced with FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)
size_t FSE_compress_usingCTable | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const FSE_CTable * | ct | ||
) |
FSE_compress_usingCTable(): Compress src
using ct
into dst
which must be already allocated.
dstCapacity
), or 0 if compressed data could not fit into dst
, or an errorCode, which can be tested using FSE_isError()
|
static |
|
static |
|
static |
FSE_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50%
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic | ( | void * | dst, |
size_t | maxDstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const FSE_DTable * | dt, | ||
const unsigned | fast | ||
) |
size_t FSE_decompress_wksp_bmi2 | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
unsigned | maxLog, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | bmi2 | ||
) |
same as FSE_decompress(), using an externally allocated workSpace
produced with FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)
. Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
unsigned | maxLog, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | bmi2 | ||
) |
|
static |
|
static |
|
static |
Let's now decompose FSE_decompress_usingDTable() into its unitary components. You will decode FSE-encoded symbols from the bitStream, and also any other bitFields you put in, in reverse order.
You will need a few variables to track your bitStream. They are :
BIT_DStream_t DStream; // Stream context FSE_DState_t DState; // State context. Multiple ones are possible FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
The first thing to do is to init the bitStream. errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
You should then retrieve your initial state(s) (in reverse flushing order if you have several ones) : errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
You can then decode your data, symbol after symbol. For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'. Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out). unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order) Note : maximum allowed nbBits is 25, for 32-bits compatibility size_t bitField = BIT_readBits(&DStream, nbBits);
All above operations only read from local register (which size depends on size_t). Refueling the register from memory is manually performed by the reload method. endSignal = FSE_reloadDStream(&DStream);
BIT_reloadDStream() result tells if there is still some more data to read from DStream. BIT_DStream_unfinished : there is still some data left into the DStream. BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled. BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed. BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop, to properly detect the exact end of stream. After each decoded symbol, check if DStream is fully consumed using this simple test : BIT_reloadDStream(&DStream) >= BIT_DStream_completed
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. Checking if DStream has reached its end is performed by : BIT_endOfDStream(&DStream); Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. FSE_endOfDState(&DState);
|
static |
These functions are inner components of FSE_compress_usingCTable(). They allow the creation of custom streams, mixing multiple tables and bit sources.
A key property to keep in mind is that encoding and decoding are done in reverse direction. So the first symbol you will encode is the last you will decode, like a LIFO stack.
You will need a few variables to track your CStream. They are :
FSE_CTable ct; // Provided by FSE_buildCTable() BIT_CStream_t bitStream; // bitStream tracking structure FSE_CState_t state; // State tracking structure (can have several)
The first thing to do is to init bitStream and state. size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize); FSE_initCState(&state, ct);
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError(); You can then encode your input data, byte after byte. FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time. Remember decoding will be done in reverse direction. FSE_encodeByte(&bitStream, &state, symbol);
At any time, you can also add any bit sequence. Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders BIT_addBits(&bitStream, bitField, nbBits);
The above methods don't commit data to memory, they just store it into local register, for speed. Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). Writing data to memory is a manual operation, performed by the flushBits function. BIT_flushBits(&bitStream);
Your last FSE encoding operation shall be to flush your last state value(s). FSE_flushState(&bitStream, &state);
Finally, you must close the bitStream. The function returns the size of CStream in bytes. If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible) If there is an error, it returns an errorCode (which can be tested using FSE_isError()). size_t size = BIT_closeCStream(&bitStream);
MEM_STATIC U32 FSE_getMaxNbBits | ( | const void * | symbolTTPtr, |
U32 | symbolValue | ||
) |
|
static |
MEM_STATIC void FSE_initCState2 | ( | FSE_CState_t * | statePtr, |
const FSE_CTable * | ct, | ||
U32 | symbol | ||
) |
FSE_initCState2() : Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) uses the smallest state value possible, saving the cost of this symbol
|
static |
|
static |
size_t FSE_NCountWriteBound | ( | unsigned | maxSymbolValue, |
unsigned | tableLog | ||
) |
FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose.
size_t FSE_normalizeCount | ( | short * | normalizedCounter, |
unsigned | tableLog, | ||
const unsigned * | count, | ||
size_t | srcSize, | ||
unsigned | maxSymbolValue, | ||
unsigned | useLowProbCount | ||
) |
FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). useLowProbCount is a boolean parameter which trades off compressed size for faster header decoding. When it is set to 1, the compressed data will be slightly smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 is a good default, since header deserialization makes a big speed difference. Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
unsigned FSE_optimalTableLog | ( | unsigned | maxTableLog, |
size_t | srcSize, | ||
unsigned | maxSymbolValue | ||
) |
FSE_compress() does the following:
FSE_decompress() does the following:
The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method.
FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
unsigned FSE_optimalTableLog_internal | ( | unsigned | maxTableLog, |
size_t | srcSize, | ||
unsigned | maxSymbolValue, | ||
unsigned | minus | ||
) |
same as FSE_optimalTableLog(), which used minus==2
MEM_STATIC BYTE FSE_peekSymbol | ( | const FSE_DState_t * | DStatePtr | ) |
size_t FSE_readNCount | ( | short * | normalizedCounter, |
unsigned * | maxSymbolValuePtr, | ||
unsigned * | tableLogPtr, | ||
const void * | rBuffer, | ||
size_t | rBuffSize | ||
) |
The first step is to count all symbols. FSE_count() does this job very fast. Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. 'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) FSE_count() will return the number of occurrence of the most frequent symbol. This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
The next step is to normalize the frequencies. FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. It also guarantees a minimum of 1 to any Symbol with frequency >= 1. You can use 'tableLog'==0 to mean "use default tableLog value". If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
The result of FSE_normalizeCount() will be saved into a table, called 'normalizedCounter', which is a table of signed short. 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. The return value is tableLog if everything proceeded as expected. It is 0 if there is a single symbol within distribution. If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). 'buffer' must be already allocated. For guaranteed success, buffer size must be at least FSE_headerBound(). The result of the function is the number of bytes written into 'buffer'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
'normalizedCounter' can then be used to create the compression table 'CTable'. The space required by 'CTable' must be already allocated, using FSE_createCTable(). You can then use FSE_buildCTable() to fill 'CTable'. If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' The function returns the size of compressed data (without header), necessarily <= dstCapacity
. If it returns '0', compressed data could not fit into 'dst'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'.
size_t FSE_readNCount_bmi2 | ( | short * | normalizedCounter, |
unsigned * | maxSymbolValuePtr, | ||
unsigned * | tableLogPtr, | ||
const void * | rBuffer, | ||
size_t | rBuffSize, | ||
int | bmi2 | ||
) |
FSE_readNCount_bmi2(): Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
FORCE_INLINE_TEMPLATE size_t FSE_readNCount_body | ( | short * | normalizedCounter, |
unsigned * | maxSVPtr, | ||
unsigned * | tableLogPtr, | ||
const void * | headerBuffer, | ||
size_t | hbSize | ||
) |
|
static |
MEM_STATIC void FSE_updateState | ( | FSE_DState_t * | DStatePtr, |
BIT_DStream_t * | bitD | ||
) |
unsigned FSE_versionNumber | ( | void | ) |
size_t FSE_writeNCount | ( | void * | buffer, |
size_t | bufferSize, | ||
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog | ||
) |
FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'.
|
static |
size_t HIST_count | ( | unsigned * | count, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
HIST_count(): Provides the precise count of each byte within a table 'count'. 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). Updates *maxSymbolValuePtr with actual largest symbol value detected.
|
static |
unsigned HIST_count_simple | ( | unsigned * | count, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
HIST_count_simple() : Same as HIST_countFast(), this function is unsafe, and will segfault if any value within src
is > *maxSymbolValuePtr
. It is also a bit slower for large inputs. However, it does not need any additional memory (not even on stack).
size_t HIST_count_wksp | ( | unsigned * | count, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize, | ||
void * | workSpace, | ||
size_t | workSpaceSize | ||
) |
HIST_count_wksp() : Same as HIST_count(), but using an externally provided scratch buffer. Benefit is this function will use very little stack space. workSpace
is a writable buffer which must be 4-bytes aligned, workSpaceSize
must be >= HIST_WKSP_SIZE
size_t HIST_countFast | ( | unsigned * | count, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
HIST_countFast() : same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. This function is unsafe, and will segfault if any value within src
is > *maxSymbolValuePtr
size_t HIST_countFast_wksp | ( | unsigned * | count, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize, | ||
void * | workSpace, | ||
size_t | workSpaceSize | ||
) |
HIST_countFast_wksp() : Same as HIST_countFast(), but using an externally provided scratch buffer. workSpace
is a writable buffer which must be 4-bytes aligned, workSpaceSize
must be >= HIST_WKSP_SIZE
unsigned HIST_isError | ( | size_t | code | ) |
FORCE_INLINE_TEMPLATE void HUF_addBits | ( | HUF_CStream_t * | bitC, |
HUF_CElt | elt, | ||
int | idx, | ||
int | kFast | ||
) |
HUF_addBits(): Adds the symbol stored in HUF_CElt elt to the bitstream.
elt | The element we're adding. This is a (nbBits, value) pair. See the HUF_CStream_t docs for the format. |
idx | Insert into the bitstream at this idx. |
kFast | This is a template parameter. If the bitstream is guaranteed to have at least 4 unused bits after this call it may be 1, otherwise it must be 0. HUF_addBits() is faster when fast is set. |
|
static |
|
static |
HUF_buildCTableFromTree(): Build the CTable given the Huffman tree in huffNode.
[out] | CTable | The output Huffman CTable. |
huffNode | The Huffman tree. | |
nonNullRank | The last and smallest node in the Huffman tree. | |
maxSymbolValue | The maximum symbol value. | |
maxNbBits | The exact maximum number of bits used in the Huffman tree. |
|
static |
Constructs a HUF_DEltX2.
Constructs a HUF_DEltX2 in a U32.
unsigned HUF_cardinality | ( | const unsigned * | count, |
unsigned | maxSymbolValue | ||
) |
|
static |
size_t HUF_compress1X_repeat | ( | void * | dst, |
size_t | dstSize, | ||
const void * | src, | ||
size_t | srcSize, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
HUF_CElt * | hufTable, | ||
HUF_repeat * | repeat, | ||
int | flags | ||
) |
HUF_compress1X_repeat() : Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. If it uses hufTable it does not modify hufTable or repeat. If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. If preferRepeat then the old table will always be used if valid. If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding
wkspSize | `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE |
size_t HUF_compress1X_usingCTable | ( | void * | dst, |
size_t | dstSize, | ||
const void * | src, | ||
size_t | srcSize, | ||
const HUF_CElt * | CTable, | ||
int | flags | ||
) |
|
static |
FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body | ( | void * | dst, |
size_t | dstSize, | ||
const void * | src, | ||
size_t | srcSize, | ||
const HUF_CElt * | CTable | ||
) |
FORCE_INLINE_TEMPLATE void HUF_compress1X_usingCTable_internal_body_loop | ( | HUF_CStream_t * | bitC, |
const BYTE * | ip, | ||
size_t | srcSize, | ||
const HUF_CElt * | ct, | ||
int | kUnroll, | ||
int | kFastFlush, | ||
int | kLastFast | ||
) |
size_t HUF_compress4X_repeat | ( | void * | dst, |
size_t | dstSize, | ||
const void * | src, | ||
size_t | srcSize, | ||
unsigned | maxSymbolValue, | ||
unsigned | tableLog, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
HUF_CElt * | hufTable, | ||
HUF_repeat * | repeat, | ||
int | flags | ||
) |
HUF_compress4X_repeat() : Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. If it uses hufTable it does not modify hufTable or repeat. If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. If preferRepeat then the old table will always be used if valid. If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding
wkspSize | `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE |
size_t HUF_compress4X_usingCTable | ( | void * | dst, |
size_t | dstSize, | ||
const void * | src, | ||
size_t | srcSize, | ||
const HUF_CElt * | CTable, | ||
int | flags | ||
) |
|
static |
|
static |
size_t HUF_compressBound | ( | size_t | size | ) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2 | ( | void * | op, |
BIT_DStream_t * | DStream, | ||
const HUF_DEltX2 * | dt, | ||
const U32 | dtLog | ||
) |
HINT_INLINE size_t HUF_decodeStreamX1 | ( | BYTE * | p, |
BIT_DStream_t *const | bitDPtr, | ||
BYTE *const | pEnd, | ||
const HUF_DEltX1 *const | dt, | ||
const U32 | dtLog | ||
) |
HINT_INLINE size_t HUF_decodeStreamX2 | ( | BYTE * | p, |
BIT_DStream_t * | bitDPtr, | ||
BYTE *const | pEnd, | ||
const HUF_DEltX2 *const | dt, | ||
const U32 | dtLog | ||
) |
FORCE_INLINE_TEMPLATE BYTE HUF_decodeSymbolX1 | ( | BIT_DStream_t * | Dstream, |
const HUF_DEltX1 * | dt, | ||
const U32 | dtLog | ||
) |
FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2 | ( | void * | op, |
BIT_DStream_t * | DStream, | ||
const HUF_DEltX2 * | dt, | ||
const U32 | dtLog | ||
) |
size_t HUF_decompress1X1_DCtx_wksp | ( | HUF_DTable * | dctx, |
void * | dst, | ||
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
FORCE_INLINE_TEMPLATE size_t HUF_decompress1X1_usingDTable_internal_body | ( | void * | dst, |
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable | ||
) |
size_t HUF_decompress1X2_DCtx_wksp | ( | HUF_DTable * | dctx, |
void * | dst, | ||
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
FORCE_INLINE_TEMPLATE size_t HUF_decompress1X2_usingDTable_internal_body | ( | void * | dst, |
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable | ||
) |
size_t HUF_decompress1X_DCtx_wksp | ( | HUF_DTable * | dctx, |
void * | dst, | ||
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
size_t HUF_decompress1X_usingDTable | ( | void * | dst, |
size_t | maxDstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable, | ||
int | flags | ||
) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body | ( | void * | dst, |
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body | ( | void * | dst, |
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable | ||
) |
|
static |
|
static |
|
static |
size_t HUF_decompress4X_hufOnly_wksp | ( | HUF_DTable * | dctx, |
void * | dst, | ||
size_t | dstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
size_t HUF_decompress4X_usingDTable | ( | void * | dst, |
size_t | maxDstSize, | ||
const void * | cSrc, | ||
size_t | cSrcSize, | ||
const HUF_DTable * | DTable, | ||
int | flags | ||
) |
|
static |
Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at a time.
FORCE_INLINE_TEMPLATE void HUF_encodeSymbol | ( | HUF_CStream_t * | bitCPtr, |
U32 | symbol, | ||
const HUF_CElt * | CTable, | ||
int | idx, | ||
int | fast | ||
) |
|
static |
size_t HUF_estimateCompressedSize | ( | const HUF_CElt * | CTable, |
const unsigned * | count, | ||
unsigned | maxSymbolValue | ||
) |
|
static |
|
static |
Fills the DTable rank with all the symbols from [begin, end) that are each nbBits long.
DTableRank | The start of the rank in the DTable. |
begin | The first symbol to fill (inclusive). |
end | The last symbol to fill (exclusive). |
nbBits | Each symbol is nbBits long. |
tableLog | The table log. |
baseSeq | If level == 1 { 0 } else { the first level symbol } |
level | The level in the table. Must be 1 or 2. |
|
static |
FORCE_INLINE_TEMPLATE void HUF_flushBits | ( | HUF_CStream_t * | bitC, |
int | kFast | ||
) |
HUF_flushBits() : Flushes the bits in the bit container @ index 0.
kFast | If kFast is set then we must know a-priori that the bit container will not overflow. |
|
static |
const char * HUF_getErrorName | ( | size_t | code | ) |
HUF_getNbBitsFromCTable() : Read nbBits from CTable symbolTable, for symbol symbolValue
presumed <= HUF_SYMBOLVALUE_MAX Note 1 : is not inlined, as HUF_CElt definition is private
|
static |
! HUF_initCStream(): Initializes the bitstream.
|
static |
|
static |
HINT_INLINE void HUF_insertionSort | ( | nodeElt | huffNode[], |
int const | low, | ||
int const | high | ||
) |
unsigned HUF_isError | ( | size_t | code | ) |
MEM_STATIC int HUF_isSorted | ( | nodeElt | huffNode[], |
U32 const | maxSymbolValue1 | ||
) |
FORCE_INLINE_TEMPLATE void HUF_mergeIndex1 | ( | HUF_CStream_t * | bitC | ) |
HUF_mergeIndex1() : Merges the bit container @ index 1 into the bit container @ index 0 and zeros the bit container @ index 1.
unsigned HUF_minTableLog | ( | unsigned | symbolCardinality | ) |
HUF_compress() does the following:
The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and regenerate 'CTable' using external methods.
unsigned HUF_optimalTableLog | ( | unsigned | maxTableLog, |
size_t | srcSize, | ||
unsigned | maxSymbolValue, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
HUF_CElt * | table, | ||
const unsigned * | count, | ||
int | flags | ||
) |
|
static |
size_t HUF_readCTable | ( | HUF_CElt * | CTable, |
unsigned * | maxSymbolValuePtr, | ||
const void * | src, | ||
size_t | srcSize, | ||
unsigned * | hasZeroWeights | ||
) |
HUF_readCTable() : Loading a CTable saved with HUF_writeCTable()
size_t HUF_readDTableX1_wksp | ( | HUF_DTable * | DTable, |
const void * | src, | ||
size_t | srcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
size_t HUF_readDTableX2_wksp | ( | HUF_DTable * | DTable, |
const void * | src, | ||
size_t | srcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | flags | ||
) |
size_t HUF_readStats | ( | BYTE * | huffWeight, |
size_t | hwSize, | ||
U32 * | rankStats, | ||
U32 * | nbSymbolsPtr, | ||
U32 * | tableLogPtr, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). huffWeight
is destination buffer.
src
, or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableXn() .HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). huffWeight
is destination buffer. rankStats
is assumed to be a table of at least HUF_TABLELOG_MAX U32.
src
, or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableX?() . FORCE_INLINE_TEMPLATE size_t HUF_readStats_body | ( | BYTE * | huffWeight, |
size_t | hwSize, | ||
U32 * | rankStats, | ||
U32 * | nbSymbolsPtr, | ||
U32 * | tableLogPtr, | ||
const void * | src, | ||
size_t | srcSize, | ||
void * | workSpace, | ||
size_t | wkspSize, | ||
int | bmi2 | ||
) |
U32 HUF_selectDecoder | ( | size_t | dstSize, |
size_t | cSrcSize | ||
) |
HUF_selectDecoder() : Tells which decoder is likely to decode faster, based on a set of pre-computed metrics.
HUF_setMaxHeight(): Try to enforce @targetNbBits on the Huffman tree described in @huffNode.
It attempts to convert all nodes with nbBits > @targetNbBits to employ @targetNbBits instead. Then it adjusts the tree so that it remains a valid canonical Huffman tree.
huffNode | The Huffman tree modified in place to enforce targetNbBits. It's presumed sorted, from most frequent to rarest symbol. |
lastNonNull | The symbol with the lowest count in the Huffman tree. |
targetNbBits | The allowed number of bits, which the Huffman tree may not respect. After this function the Huffman tree will respect targetNbBits. |
|
static |
|
static |
|
static |
|
static |
HUF_sort(): Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
[out] | huffNode | Sorted symbols by decreasing count. Only members .count and .byte are filled. Must have (maxSymbolValue + 1) entries. |
[in] | count | Histogram of the symbols. |
[in] | maxSymbolValue | Maximum symbol value. |
rankPosition | This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. |
|
static |
int HUF_validateCTable | ( | const HUF_CElt * | CTable, |
const unsigned * | count, | ||
unsigned | maxSymbolValue | ||
) |
size_t HUF_writeCTable_wksp | ( | void * | dst, |
size_t | maxDstSize, | ||
const HUF_CElt * | CTable, | ||
unsigned | maxSymbolValue, | ||
unsigned | huffLog, | ||
void * | workspace, | ||
size_t | workspaceSize | ||
) |
FORCE_INLINE_TEMPLATE void HUF_zeroIndex1 | ( | HUF_CStream_t * | bitC | ) |
|
static |
|
static |
|
static |
|
static |
MEM_STATIC unsigned MEM_32bits | ( | void | ) |
MEM_STATIC unsigned MEM_64bits | ( | void | ) |
MEM_STATIC void MEM_check | ( | void | ) |
MEM_STATIC unsigned MEM_isLittleEndian | ( | void | ) |
MEM_STATIC U16 MEM_read16 | ( | const void * | memPtr | ) |
MEM_STATIC U32 MEM_read32 | ( | const void * | memPtr | ) |
MEM_STATIC U64 MEM_read64 | ( | const void * | memPtr | ) |
MEM_STATIC U32 MEM_readBE32 | ( | const void * | memPtr | ) |
MEM_STATIC U64 MEM_readBE64 | ( | const void * | memPtr | ) |
MEM_STATIC size_t MEM_readBEST | ( | const void * | memPtr | ) |
MEM_STATIC U16 MEM_readLE16 | ( | const void * | memPtr | ) |
MEM_STATIC U32 MEM_readLE24 | ( | const void * | memPtr | ) |
MEM_STATIC U32 MEM_readLE32 | ( | const void * | memPtr | ) |
MEM_STATIC U64 MEM_readLE64 | ( | const void * | memPtr | ) |
MEM_STATIC size_t MEM_readLEST | ( | const void * | memPtr | ) |
MEM_STATIC size_t MEM_readST | ( | const void * | memPtr | ) |
MEM_STATIC U32 MEM_swap32 | ( | U32 | in | ) |
MEM_STATIC U32 MEM_swap32_fallback | ( | U32 | in | ) |
MEM_STATIC U64 MEM_swap64 | ( | U64 | in | ) |
MEM_STATIC U64 MEM_swap64_fallback | ( | U64 | in | ) |
MEM_STATIC size_t MEM_swapST | ( | size_t | in | ) |
MEM_STATIC void MEM_write16 | ( | void * | memPtr, |
U16 | value | ||
) |
MEM_STATIC void MEM_write32 | ( | void * | memPtr, |
U32 | value | ||
) |
MEM_STATIC void MEM_write64 | ( | void * | memPtr, |
U64 | value | ||
) |
MEM_STATIC void MEM_writeBE32 | ( | void * | memPtr, |
U32 | val32 | ||
) |
MEM_STATIC void MEM_writeBE64 | ( | void * | memPtr, |
U64 | val64 | ||
) |
MEM_STATIC void MEM_writeBEST | ( | void * | memPtr, |
size_t | val | ||
) |
MEM_STATIC void MEM_writeLE16 | ( | void * | memPtr, |
U16 | val | ||
) |
MEM_STATIC void MEM_writeLE24 | ( | void * | memPtr, |
U32 | val | ||
) |
MEM_STATIC void MEM_writeLE32 | ( | void * | memPtr, |
U32 | val32 | ||
) |
MEM_STATIC void MEM_writeLE64 | ( | void * | memPtr, |
U64 | val64 | ||
) |
MEM_STATIC void MEM_writeLEST | ( | void * | memPtr, |
size_t | val | ||
) |
void POOL_add | ( | POOL_ctx * | ctx, |
POOL_function | function, | ||
void * | opaque | ||
) |
POOL_add() : Add the job function(opaque)
to the thread pool. ctx
must be valid. Possibly blocks until there is room in the queue. Note : The function may be executed asynchronously, therefore, opaque
must live until function has been completed.
|
static |
POOL_ctx * POOL_create | ( | size_t | numThreads, |
size_t | queueSize | ||
) |
POOL_create() : Create a thread pool with at most numThreads
threads. numThreads
must be at least 1. The maximum number of queued jobs before blocking is queueSize
.
POOL_ctx * POOL_create_advanced | ( | size_t | numThreads, |
size_t | queueSize, | ||
ZSTD_customMem | customMem | ||
) |
void POOL_free | ( | POOL_ctx * | ctx | ) |
POOL_free() : Free a thread pool returned by POOL_create().
|
static |
POOL_join() : Shutdown the queue, wake any sleeping threads, and join all of the threads.
void POOL_joinJobs | ( | POOL_ctx * | ctx | ) |
POOL_joinJobs() : Waits for all queued jobs to finish executing.
int POOL_resize | ( | POOL_ctx * | ctx, |
size_t | numThreads | ||
) |
POOL_resize() : Expands or shrinks pool's number of threads. This is more efficient than releasing + creating a new context, since it tries to preserve and re-use existing threads. numThreads
must be at least 1.
|
static |
size_t POOL_sizeof | ( | const POOL_ctx * | ctx | ) |
int POOL_tryAdd | ( | POOL_ctx * | ctx, |
POOL_function | function, | ||
void * | opaque | ||
) |
POOL_tryAdd() : Add the job function(opaque)
to thread pool if a queue slot is available. Returns immediately even if not (does not block).
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
size_t ZDICT_addEntropyTablesFromBuffer | ( | void * | dictBuffer, |
size_t | dictContentSize, | ||
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples | ||
) |
|
static |
|
static |
|
static |
ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band.
|
static |
|
static |
size_t ZDICT_finalizeDictionary | ( | void * | dstDictBuffer, |
size_t | maxDictSize, | ||
const void * | dictContent, | ||
size_t | dictContentSize, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_params_t | parameters | ||
) |
ZDICT_finalizeDictionary(): Given a custom content as a basis for dictionary, and a set of samples, finalize dictionary by adding headers and statistics according to the zstd dictionary format.
Samples must be stored concatenated in a flat buffer samplesBuffer
, supplied with an array of sizes samplesSizes
, providing the size of each sample in order. The samples are used to construct the statistics, so they should be representative of what you will compress with this dictionary.
The compression level can be set in parameters
. You should pass the compression level you expect to use in production. The statistics for each compression level differ, so tuning the dictionary for the compression level can help quite a bit.
You can set an explicit dictionary ID in parameters
, or allow us to pick a random dictionary ID for you, but we can't guarantee no collisions.
The dstDictBuffer and the dictContent may overlap, and the content will be appended to the end of the header. If the header + the content doesn't fit in maxDictSize the beginning of the content is truncated to make room, since it is presumed that the most profitable content is at the end of the dictionary, since that is the cheapest to reference.
maxDictSize
must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).
dstDictBuffer
(<= maxDictSize
), or an error code, which can be tested by ZDICT_isError(). Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. NOTE: This function currently may fail in several edge cases including:size_t ZDICT_getDictHeaderSize | ( | const void * | dictBuffer, |
size_t | dictSize | ||
) |
unsigned ZDICT_getDictID | ( | const void * | dictBuffer, |
size_t | dictSize | ||
) |
|
static |
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_cover_params_t * | parameters | ||
) |
ZDICT_optimizeTrainFromBuffer_cover(): The same requirements as above hold for all the parameters except parameters
. This function tries many parameter combinations and picks the best parameters. *parameters
is filled with the best parameters found, dictionary constructed with those parameters is stored in dictBuffer
.
All of the parameters d, k, steps are optional. If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. if steps is zero it defaults to its default value. If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). On success *parameters
contains the parameters selected. See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_fastCover_params_t * | parameters | ||
) |
ZDICT_optimizeTrainFromBuffer_fastCover(): The same requirements as above hold for all the parameters except parameters
. This function tries many parameter combinations (specifically, k and d combinations) and picks the best parameters. *parameters
is filled with the best parameters found, dictionary constructed with those parameters is stored in dictBuffer
. All of the parameters d, k, steps, f, and accel are optional. If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. if steps is zero it defaults to its default value. If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. If f is zero, default value of 20 is used. If accel is zero, default value of 1 is used.
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). On success *parameters
contains the parameters selected. See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
static |
|
static |
size_t ZDICT_trainFromBuffer | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples | ||
) |
ZDICT_trainFromBuffer(): Train a dictionary from an array of samples. Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, f=20, and accel=1. Samples must be stored concatenated in a single flat buffer samplesBuffer
, supplied with an array of sizes samplesSizes
, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer
.
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). Note: Dictionary training will fail if there are not enough samples to construct a dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). If dictionary training fails, you should use zstd without a dictionary, as the dictionary would've been ineffective anyways. If you believe your samples would benefit from a dictionary please open an issue with details, and we can look into it. Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity
. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary. ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_cover_params_t | parameters | ||
) |
ZDICT_trainFromBuffer_cover(): Train a dictionary from an array of samples using the COVER algorithm. Samples must be stored concatenated in a single flat buffer samplesBuffer
, supplied with an array of sizes samplesSizes
, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer
.
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity
. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary. ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_fastCover_params_t | parameters | ||
) |
ZDICT_trainFromBuffer_fastCover(): Train a dictionary from an array of samples using a modified version of COVER algorithm. Samples must be stored concatenated in a single flat buffer samplesBuffer
, supplied with an array of sizes samplesSizes
, providing the size of each sample, in order. d and k are required. All other parameters are optional, will use default values if not provided The resulting dictionary will be saved into dictBuffer
.
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity
. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary. size_t ZDICT_trainFromBuffer_legacy | ( | void * | dictBuffer, |
size_t | dictBufferCapacity, | ||
const void * | samplesBuffer, | ||
const size_t * | samplesSizes, | ||
unsigned | nbSamples, | ||
ZDICT_legacy_params_t | parameters | ||
) |
ZDICT_trainFromBuffer_legacy(): Train a dictionary from an array of samples. Samples must be stored concatenated in a single flat buffer samplesBuffer
, supplied with an array of sizes samplesSizes
, providing the size of each sample, in order. The resulting dictionary will be saved into dictBuffer
. parameters
is optional and can be provided with values set to 0 to mean "default".
dictBuffer
(<= dictBufferCapacity
) or an error code, which can be tested with ZDICT_isError(). See ZDICT_trainFromBuffer() for details on failure modes. Tips: In general, a reasonable dictionary has a size of ~ 100 KB. It's possible to select smaller or larger size, just by specifying dictBufferCapacity
. In general, it's recommended to provide a few thousands samples, though this can vary a lot. It's recommended that total size of all samples be about ~x100 times the target size of dictionary. Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
|
static |
ZDICT_trainFromBuffer_unsafe_legacy() : Warning : samplesBuffer
must be followed by noisy guard band !!!
|
static |
ZDICT_tryMerge() : check if dictItem can be merged, do it if possible
ZSTD_compressionParameters ZSTD_adjustCParams | ( | ZSTD_compressionParameters | cPar, |
unsigned long long | srcSize, | ||
size_t | dictSize | ||
) |
|
static |
ZSTD_adjustCParams_internal() : optimize cPar
for a specified input (srcSize
and dictSize
). mostly downsize to reduce memory consumption and initialization latency. srcSize
can be ZSTD_CONTENTSIZE_UNKNOWN when not known. mode
is the mode for parameter adjustment. See docs for ZSTD_cParamMode_e
. note : srcSize==0
means 0! condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()).
|
static |
|
static |
|
static |
|
static |
MEM_STATIC U32 ZSTD_bitWeight | ( | U32 | stat | ) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_BtFindBestMatch | ( | ZSTD_matchState_t * | ms, |
const BYTE *const | ip, | ||
const BYTE *const | iLimit, | ||
size_t * | offBasePtr, | ||
const U32 | mls, | ||
const ZSTD_dictMode_e | dictMode | ||
) |
ZSTD_BtFindBestMatch() : Tree updater, providing best match
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal | ( | ZSTD_match_t * | matches, |
ZSTD_matchState_t * | ms, | ||
U32 * | nextToUpdate3, | ||
const BYTE * | ip, | ||
const BYTE *const | iHighLimit, | ||
const U32 | rep[ZSTD_REP_NUM], | ||
U32 const | ll0, | ||
U32 const | lengthToBeat, | ||
const ZSTD_dictMode_e | dictMode, | ||
const U32 | mls | ||
) |
size_t ZSTD_buildBlockEntropyStats | ( | const seqStore_t * | seqStorePtr, |
const ZSTD_entropyCTables_t * | prevEntropy, | ||
ZSTD_entropyCTables_t * | nextEntropy, | ||
const ZSTD_CCtx_params * | cctxParams, | ||
ZSTD_entropyCTablesMetadata_t * | entropyMetadata, | ||
void * | workspace, | ||
size_t | wkspSize | ||
) |
ZSTD_buildBlockEntropyStats() : Builds entropy for the block.
ZSTD_buildBlockEntropyStats() : Builds entropy for the block. Requires workspace size ENTROPY_WORKSPACE_SIZE
|
static |
ZSTD_buildBlockEntropyStats_literals() : Builds entropy for the literals. Stores literals block type (raw, rle, compressed, repeat) and huffman description table to hufMetadata. Requires ENTROPY_WORKSPACE_SIZE workspace
|
static |
ZSTD_buildBlockEntropyStats_sequences() : Builds entropy for the sequences. Stores symbol compression modes and fse table to fseMetadata. Requires ENTROPY_WORKSPACE_SIZE wksp.
size_t ZSTD_buildCTable | ( | void * | dst, |
size_t | dstCapacity, | ||
FSE_CTable * | nextCTable, | ||
U32 | FSELog, | ||
symbolEncodingType_e | type, | ||
unsigned * | count, | ||
U32 | max, | ||
const BYTE * | codeTable, | ||
size_t | nbSeq, | ||
const S16 * | defaultNorm, | ||
U32 | defaultNormLog, | ||
U32 | defaultMax, | ||
const FSE_CTable * | prevCTable, | ||
size_t | prevCTableSize, | ||
void * | entropyWorkspace, | ||
size_t | entropyWorkspaceSize | ||
) |
|
static |
|
static |
void ZSTD_buildFSETable | ( | ZSTD_seqSymbol * | dt, |
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
const U32 * | baseValue, | ||
const U8 * | nbAdditionalBits, | ||
unsigned | tableLog, | ||
void * | wksp, | ||
size_t | wkspSize, | ||
int | bmi2 | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_buildFSETable_body | ( | ZSTD_seqSymbol * | dt, |
const short * | normalizedCounter, | ||
unsigned | maxSymbolValue, | ||
const U32 * | baseValue, | ||
const U8 * | nbAdditionalBits, | ||
unsigned | tableLog, | ||
void * | wksp, | ||
size_t | wkspSize | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
size_t ZSTD_CCtx_getParameter | ( | ZSTD_CCtx const * | cctx, |
ZSTD_cParameter | param, | ||
int * | value | ||
) |
|
static |
size_t ZSTD_CCtx_loadDictionary | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize | ||
) |
ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ Create an internal CDict from dict
buffer. Decompression will have to use same dictionary.
dict
content will be copied internally. Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. In such a case, dictionary buffer must outlive its users. Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() to precisely select how dictionary content must be interpreted. Note 5 : This method does not benefit from LDM (long distance mode). If you want to employ LDM on some large dictionary content, prefer employing ZSTD_CCtx_refPrefix() described below. size_t ZSTD_CCtx_loadDictionary_advanced | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType | ||
) |
size_t ZSTD_CCtx_loadDictionary_byReference | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize | ||
) |
size_t ZSTD_CCtx_refCDict | ( | ZSTD_CCtx * | cctx, |
const ZSTD_CDict * | cdict | ||
) |
ZSTD_CCtx_refCDict() : Requires v1.4.0+ Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. The dictionary will remain valid for future compressed frames using same CCtx.
size_t ZSTD_CCtx_refPrefix | ( | ZSTD_CCtx * | cctx, |
const void * | prefix, | ||
size_t | prefixSize | ||
) |
ZSTD_CCtx_refPrefix() : Requires v1.4.0+ Reference a prefix (single-usage dictionary) for next compressed frame. A prefix is only used once. Tables are discarded at end of frame (ZSTD_e_end). Decompression will need same prefix to properly regenerate data. Compressing with a prefix is similar in outcome as performing a diff and compressing it, but performs much faster, especially during decompression (compression speed is tunable with compression level). This method is compatible with LDM (long distance mode).
size_t ZSTD_CCtx_refPrefix_advanced | ( | ZSTD_CCtx * | cctx, |
const void * | prefix, | ||
size_t | prefixSize, | ||
ZSTD_dictContentType_e | dictContentType | ||
) |
size_t ZSTD_CCtx_refThreadPool | ( | ZSTD_CCtx * | cctx, |
ZSTD_threadPool * | pool | ||
) |
size_t ZSTD_CCtx_reset | ( | ZSTD_CCtx * | cctx, |
ZSTD_ResetDirective | reset | ||
) |
ZSTD_CCtx_reset() : Also dumps dictionary
size_t ZSTD_CCtx_setCParams | ( | ZSTD_CCtx * | cctx, |
ZSTD_compressionParameters | cparams | ||
) |
size_t ZSTD_CCtx_setFParams | ( | ZSTD_CCtx * | cctx, |
ZSTD_frameParameters | fparams | ||
) |
size_t ZSTD_CCtx_setParameter | ( | ZSTD_CCtx * | cctx, |
ZSTD_cParameter | param, | ||
int | value | ||
) |
ZSTD_CCtx_setParameter() : Set one compression parameter, selected by enum ZSTD_cParameter. All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). Setting a parameter is generally only possible during frame initialization (before starting compression). Exception : when using multi-threading mode (nbWorkers >= 1), the following parameters can be updated during compression (within same frame): => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. new parameters will be active for next job only (after a flush()).
size_t ZSTD_CCtx_setParametersUsingCCtxParams | ( | ZSTD_CCtx * | cctx, |
const ZSTD_CCtx_params * | params | ||
) |
ZSTD_CCtx_setParametersUsingCCtxParams() : just applies params
into cctx
no action is performed, parameters are merely stored. If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. This is possible even if a compression is ongoing. In which case, new parameters will be applied on the fly, starting with next compression job.
size_t ZSTD_CCtx_setParams | ( | ZSTD_CCtx * | cctx, |
ZSTD_parameters | params | ||
) |
size_t ZSTD_CCtx_setPledgedSrcSize | ( | ZSTD_CCtx * | cctx, |
unsigned long long | pledgedSrcSize | ||
) |
ZSTD_CCtx_setPledgedSrcSize() : Total input data size to be compressed as a single frame. Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. This value will also be controlled at end of frame, and trigger an error if not respected.
void ZSTD_CCtx_trace | ( | ZSTD_CCtx * | cctx, |
size_t | extraCSize | ||
) |
ZSTD_CCtx_trace() : Trace the end of a compression call.
size_t ZSTD_CCtxParams_getParameter | ( | ZSTD_CCtx_params const * | CCtxParams, |
ZSTD_cParameter | param, | ||
int * | value | ||
) |
size_t ZSTD_CCtxParams_init | ( | ZSTD_CCtx_params * | cctxParams, |
int | compressionLevel | ||
) |
size_t ZSTD_CCtxParams_init_advanced | ( | ZSTD_CCtx_params * | cctxParams, |
ZSTD_parameters | params | ||
) |
|
static |
size_t ZSTD_CCtxParams_setParameter | ( | ZSTD_CCtx_params * | CCtxParams, |
ZSTD_cParameter | param, | ||
int | value | ||
) |
|
static |
|
static |
|
static |
void ZSTD_checkContinuity | ( | ZSTD_DCtx * | dctx, |
const void * | dst, | ||
size_t | dstSize | ||
) |
ZSTD_checkContinuity() : check if next dst
follows previous position, where decompression ended. If yes, do nothing (continue on current segment). If not, classify previous segment as "external dictionary", and start a new segment. This function cannot fail.
size_t ZSTD_checkCParams | ( | ZSTD_compressionParameters | cParams | ) |
ZSTD_checkCParams() : control CParam values remain within authorized range.
MEM_STATIC void ZSTD_checkDictValidity | ( | const ZSTD_window_t * | window, |
const void * | blockEnd, | ||
U32 | maxDist, | ||
U32 * | loadedDictEndPtr, | ||
const ZSTD_matchState_t ** | dictMatchStatePtr | ||
) |
|
static |
|
static |
ZSTD_clampCParams() : make CParam values within valid range.
|
static |
MEM_STATIC int ZSTD_comparePackedTags | ( | size_t | packedTag1, |
size_t | packedTag2 | ||
) |
size_t ZSTD_compress | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
int | compressionLevel | ||
) |
ZSTD_compress() : Compresses src
content as a single zstd compressed frame into already allocated dst
. NOTE: Providing dstCapacity >= ZSTD_compressBound(srcSize)
guarantees that zstd will have enough space to successfully compress the data.
dst
(<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()). size_t ZSTD_compress2 | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_compress2() : Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. ZSTD_compress2() always starts a new frame. Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
dstCapacity >= ZSTD_compressBound(srcSize)
guarantees that zstd will have enough space to successfully compress the data, though it is possible it fails for other reasons. dst
(<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()). size_t ZSTD_compress_advanced | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_parameters | params | ||
) |
size_t ZSTD_compress_advanced_internal | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const void * | dict, | ||
size_t | dictSize, | ||
const ZSTD_CCtx_params * | params | ||
) |
|
static |
ZSTD_compress_frameChunk() : Compress a chunk of data into one or multiple blocks. All blocks will be terminated, all input will be consumed. Function will issue an error if there is not enough dstCapacity
to hold the compressed content. Frame is supposed already started (header already produced)
|
static |
ZSTD_compress_insertDictionary() :
size_t ZSTD_compress_usingCDict | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const ZSTD_CDict * | cdict | ||
) |
ZSTD_compress_usingCDict() : Compression using a digested Dictionary. Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. Note that compression parameters are decided at CDict creation time while frame parameters are hardcoded
size_t ZSTD_compress_usingCDict_advanced | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const ZSTD_CDict * | cdict, | ||
ZSTD_frameParameters | fParams | ||
) |
ZSTD_compress_usingCDict_advanced(): This function is DEPRECATED.
|
static |
ZSTD_compress_usingCDict_internal(): Implementation of various ZSTD_compress_usingCDict* functions.
size_t ZSTD_compress_usingDict | ( | ZSTD_CCtx * | ctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const void * | dict, | ||
size_t | dictSize, | ||
int | compressionLevel | ||
) |
ZSTD_compress_usingDict() : Compression at an explicit compression level using a Dictionary. A dictionary can be any arbitrary data segment (also called a prefix), or a buffer with specified information (see zdict.h). Note : This function loads the dictionary, resulting in significant startup delay. It's intended for a dictionary used only once. Note 2 : When dict == NULL || dictSize < 8
no dictionary is used.
size_t ZSTD_compressBegin | ( | ZSTD_CCtx * | cctx, |
int | compressionLevel | ||
) |
size_t ZSTD_compressBegin_advanced | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_parameters | params, | ||
unsigned long long | pledgedSrcSize | ||
) |
ZSTD_compressBegin_advanced() :
size_t ZSTD_compressBegin_advanced_internal | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictContentType_e | dictContentType, | ||
ZSTD_dictTableLoadMethod_e | dtlm, | ||
const ZSTD_CDict * | cdict, | ||
const ZSTD_CCtx_params * | params, | ||
unsigned long long | pledgedSrcSize | ||
) |
|
static |
ZSTD_compressBegin_internal() : Assumption : either @dict OR @cdict (or none) is non-NULL, never both
size_t ZSTD_compressBegin_usingCDict | ( | ZSTD_CCtx * | cctx, |
const ZSTD_CDict * | cdict | ||
) |
size_t ZSTD_compressBegin_usingCDict_advanced | ( | ZSTD_CCtx *const | cctx, |
const ZSTD_CDict *const | cdict, | ||
ZSTD_frameParameters const | fParams, | ||
unsigned long long const | pledgedSrcSize | ||
) |
size_t ZSTD_compressBegin_usingCDict_deprecated | ( | ZSTD_CCtx * | cctx, |
const ZSTD_CDict * | cdict | ||
) |
|
static |
size_t ZSTD_compressBegin_usingDict | ( | ZSTD_CCtx * | cctx, |
const void * | dict, | ||
size_t | dictSize, | ||
int | compressionLevel | ||
) |
|
static |
size_t ZSTD_compressBlock | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btlazy2 | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK
size_t ZSTD_compressBlock_btlazy2_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btlazy2_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btopt | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btopt_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btopt_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btultra | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btultra2 | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btultra_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_btultra_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_deprecated | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_doubleFast | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_doubleFast_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize, | ||
U32 const | mls | ||
) |
size_t ZSTD_compressBlock_doubleFast_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_doubleFast_noDict_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize, | ||
U32 const | mls | ||
) |
size_t ZSTD_compressBlock_fast | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_fast_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_dictMatchState_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize, | ||
U32 const | mls, | ||
U32 const | hasStep | ||
) |
size_t ZSTD_compressBlock_fast_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_noDict_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize, | ||
U32 const | mls, | ||
U32 const | hasStep | ||
) |
If you squint hard enough (and ignore repcodes), the search operation at any given position is broken into 4 stages:
Each of these steps involves a memory read at an address which is computed from the previous step. This means these steps must be sequenced and their latencies are cumulative.
Rather than do 1->2->3->4 sequentially for a single position before moving onto the next, this implementation interleaves these operations across the next few positions:
R = Repcode Read & Compare H = Hash T = Table Lookup M = Match Read & Compare
Pos | Time --> -—+----------------— N | ... M N+1 | ... TM N+2 | R H T M N+3 | H TM N+4 | R H T M N+5 | H ... N+6 | R ...
This is very much analogous to the pipelining of execution in a CPU. And just like a CPU, we have to dump the pipeline when we find a match (i.e., take a branch).
When this happens, we throw away our current state, and do the following prep to re-enter the loop:
Pos | Time --> -—+----------------— N | H T N+1 | H
This is also the work we do at the beginning to enter the loop initially.
size_t ZSTD_compressBlock_greedy | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_dictMatchState_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_extDict_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_greedy_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2 | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_dictMatchState_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_extDict_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy2_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy_dictMatchState | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy_dictMatchState_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressBlock_lazy_extDict | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_extDict_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
const void * | src, | ||
size_t | srcSize, | ||
const searchMethod_e | searchMethod, | ||
const U32 | depth | ||
) |
size_t ZSTD_compressBlock_lazy_extDict_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
const void * | src, | ||
size_t | srcSize, | ||
const searchMethod_e | searchMethod, | ||
const U32 | depth, | ||
ZSTD_dictMode_e const | dictMode | ||
) |
size_t ZSTD_compressBlock_lazy_row | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
void const * | src, | ||
size_t | srcSize | ||
) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_opt_generic | ( | ZSTD_matchState_t * | ms, |
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
const void * | src, | ||
size_t | srcSize, | ||
const int | optLevel, | ||
const ZSTD_dictMode_e | dictMode | ||
) |
size_t ZSTD_compressBound | ( | size_t | srcSize | ) |
size_t ZSTD_compressCCtx | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
int | compressionLevel | ||
) |
ZSTD_compressCCtx() : Same as ZSTD_compress(), using an explicit ZSTD_CCtx. Important : in order to behave similarly to ZSTD_compress()
, this function compresses at requested compression level, ignoring any other parameter . If any advanced parameter was set using the advanced API, they will all be reset. Only compressionLevel
remains.
size_t ZSTD_compressContinue | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressContinue_public | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
|
static |
size_t ZSTD_compressEnd | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressEnd_public | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_compressLiterals | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
void * | entropyWorkspace, | ||
size_t | entropyWorkspaceSize, | ||
const ZSTD_hufCTables_t * | prevHuf, | ||
ZSTD_hufCTables_t * | nextHuf, | ||
ZSTD_strategy | strategy, | ||
int | disableLiteralCompression, | ||
int | suspectUncompressible, | ||
int | bmi2 | ||
) |
size_t ZSTD_compressRleLiteralsBlock | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
|
static |
size_t ZSTD_compressSequences | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const ZSTD_Sequence * | inSeqs, | ||
size_t | inSeqsSize, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
|
static |
size_t ZSTD_compressStream | ( | ZSTD_CStream * | zcs, |
ZSTD_outBuffer * | output, | ||
ZSTD_inBuffer * | input | ||
) |
Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). NOTE: The return value is different. ZSTD_compressStream() returns a hint for the next read size (if non-zero and not an error). ZSTD_compressStream2() returns the minimum nb of bytes left to flush (if non-zero and not an error).
size_t ZSTD_compressStream2 | ( | ZSTD_CCtx * | cctx, |
ZSTD_outBuffer * | output, | ||
ZSTD_inBuffer * | input, | ||
ZSTD_EndDirective | endOp | ||
) |
ZSTD_compressStream2() : Requires v1.4.0+ Behaves about the same as ZSTD_compressStream, with additional control on end directive.
size_t ZSTD_compressStream2_simpleArgs | ( | ZSTD_CCtx * | cctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
size_t * | dstPos, | ||
const void * | src, | ||
size_t | srcSize, | ||
size_t * | srcPos, | ||
ZSTD_EndDirective | endOp | ||
) |
|
static |
ZSTD_compressStream_generic(): internal function for all compressStream() variants
|
static |
ZSTD_compressSubBlock() : Compresses a single sub-block.
|
static |
ZSTD_compressSubBlock_literal() : Compresses literals section for a sub-block. When we have to write the Huffman table we will sometimes choose a header size larger than necessary. This is because we have to pick the header size before we know the table size + compressed size, so we have a bound on the table size. If we guessed incorrectly, we fall back to uncompressed literals.
We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded in writing the header, otherwise it is set to 0.
hufMetadata->hType has literals block type info. If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block and the following sub-blocks' literals sections will be Treeless_Literals_Block.
|
static |
ZSTD_compressSubBlock_multi() : Breaks super-block into multiple sub-blocks and compresses them. Entropy will be written to the first block. The following blocks will use repeat mode to compress. All sub-blocks are compressed blocks (no raw or rle blocks).
|
static |
ZSTD_compressSubBlock_sequences() : Compresses sequences section for a sub-block. fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have symbol compression modes for the super-block. The first successfully compressed block will have these in its header. We set entropyWritten=1 when we succeed in compressing the sequences. The following sub-blocks will always have repeat mode.
size_t ZSTD_compressSuperBlock | ( | ZSTD_CCtx * | zc, |
void * | dst, | ||
size_t | dstCapacity, | ||
void const * | src, | ||
size_t | srcSize, | ||
unsigned | lastBlock | ||
) |
|
static |
|
static |
|
static |
|
static |
size_t ZSTD_copyCCtx | ( | ZSTD_CCtx * | dstCCtx, |
const ZSTD_CCtx * | srcCCtx, | ||
unsigned long long | pledgedSrcSize | ||
) |
ZSTD_copyCCtx() : Duplicate an existing context srcCCtx
into another one dstCCtx
. Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). pledgedSrcSize==0 means "unknown".
|
static |
ZSTD_copyCCtx_internal() : Duplicate an existing context srcCCtx
into another one dstCCtx
. Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). The "context", in this case, refers to the hash and chain tables, entropy tables, and dictionary references. windowLog
value is enforced if != 0, otherwise value is copied from srcCCtx.
void ZSTD_copyDDictParameters | ( | ZSTD_DCtx * | dctx, |
const ZSTD_DDict * | ddict | ||
) |
|
static |
size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim | ( | ZSTD_CCtx * | cctx, |
ZSTD_sequencePosition * | seqPos, | ||
const ZSTD_Sequence *const | inSeqs, | ||
size_t | inSeqsSize, | ||
const void * | src, | ||
size_t | blockSize, | ||
ZSTD_paramSwitch_e | externalRepSearch | ||
) |
size_t ZSTD_copySequencesToSeqStoreNoBlockDelim | ( | ZSTD_CCtx * | cctx, |
ZSTD_sequencePosition * | seqPos, | ||
const ZSTD_Sequence *const | inSeqs, | ||
size_t | inSeqsSize, | ||
const void * | src, | ||
size_t | blockSize, | ||
ZSTD_paramSwitch_e | externalRepSearch | ||
) |
MEM_STATIC size_t ZSTD_count | ( | const BYTE * | pIn, |
const BYTE * | pMatch, | ||
const BYTE *const | pInLimit | ||
) |
MEM_STATIC size_t ZSTD_count_2segments | ( | const BYTE * | ip, |
const BYTE * | match, | ||
const BYTE * | iEnd, | ||
const BYTE * | mEnd, | ||
const BYTE * | iStart | ||
) |
ZSTD_count_2segments() : can count match length with ip
& match
in 2 different segments. convention : on reaching mEnd, match count continue starting from iStart
MEM_STATIC unsigned ZSTD_countLeadingZeros32 | ( | U32 | val | ) |
MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback | ( | U32 | val | ) |
MEM_STATIC unsigned ZSTD_countLeadingZeros64 | ( | U64 | val | ) |
|
static |
|
static |
MEM_STATIC unsigned ZSTD_countTrailingZeros32 | ( | U32 | val | ) |
MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback | ( | U32 | val | ) |
MEM_STATIC unsigned ZSTD_countTrailingZeros64 | ( | U64 | val | ) |
|
static |
ZSTD_bounds ZSTD_cParam_getBounds | ( | ZSTD_cParameter | cParam | ) |
ZSTD_cParam_getBounds() : All parameters must belong to an interval with lower and upper bounds, otherwise they will either trigger an error or be automatically clamped.
MEM_STATIC int ZSTD_cParam_withinBounds | ( | ZSTD_cParameter | cParam, |
int | value | ||
) |
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid | ( | void | ) |
MEM_STATIC int ZSTD_cpuSupportsBmi2 | ( | void | ) |
ZSTD_CCtx* ZSTD_createCCtx_advanced | ( | ZSTD_customMem | customMem | ) |
|
static |
ZSTD_CDict* ZSTD_createCDict | ( | const void * | dictBuffer, |
size_t | dictSize, | ||
int | compressionLevel | ||
) |
ZSTD_createCDict() : When compressing multiple messages or blocks using the same dictionary, it's recommended to digest the dictionary only once, since it's a costly operation. ZSTD_createCDict() will create a state from digesting a dictionary. The resulting state can be used for future compression operations with very limited startup cost. ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. Note 1 : Consider experimental function ZSTD_createCDict_byReference()
if you prefer to not duplicate @dictBuffer content. Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, in which case the only thing that it transports is the @compressionLevel. This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.
ZSTD_CDict* ZSTD_createCDict_advanced | ( | const void * | dictBuffer, |
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType, | ||
ZSTD_compressionParameters | cParams, | ||
ZSTD_customMem | customMem | ||
) |
ZSTD_CDict* ZSTD_createCDict_advanced2 | ( | const void * | dict, |
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType, | ||
const ZSTD_CCtx_params * | originalCctxParams, | ||
ZSTD_customMem | customMem | ||
) |
|
static |
ZSTD_CDict* ZSTD_createCDict_byReference | ( | const void * | dict, |
size_t | dictSize, | ||
int | compressionLevel | ||
) |
ZSTD_CStream* ZSTD_createCStream | ( | void | ) |
ZSTD_CStream* ZSTD_createCStream_advanced | ( | ZSTD_customMem | customMem | ) |
ZSTD_DCtx* ZSTD_createDCtx_advanced | ( | ZSTD_customMem | customMem | ) |
|
static |
ZSTD_DDict* ZSTD_createDDict | ( | const void * | dict, |
size_t | dictSize | ||
) |
ZSTD_createDDict() : Create a digested dictionary, to start decompression without startup delay. dict
content is copied inside DDict. Consequently, dict
can be released after ZSTD_DDict
creation
ZSTD_DDict* ZSTD_createDDict_advanced | ( | const void * | dict, |
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType, | ||
ZSTD_customMem | customMem | ||
) |
ZSTD_DDict* ZSTD_createDDict_byReference | ( | const void * | dictBuffer, |
size_t | dictSize | ||
) |
ZSTD_createDDict_byReference() : Create a digested dictionary, to start decompression without startup delay. Dictionary content is simply referenced, it will be accessed during decompression. Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer)
|
static |
ZSTD_DStream* ZSTD_createDStream | ( | void | ) |
ZSTD_DStream* ZSTD_createDStream_advanced | ( | ZSTD_customMem | customMem | ) |
size_t ZSTD_crossEntropyCost | ( | short const * | norm, |
unsigned | accuracyLog, | ||
unsigned const * | count, | ||
unsigned const | max | ||
) |
size_t ZSTD_CStreamInSize | ( | void | ) |
size_t ZSTD_CStreamOutSize | ( | void | ) |
MEM_STATIC void* ZSTD_customCalloc | ( | size_t | size, |
ZSTD_customMem | customMem | ||
) |
MEM_STATIC void ZSTD_customFree | ( | void * | ptr, |
ZSTD_customMem | customMem | ||
) |
MEM_STATIC void* ZSTD_customMalloc | ( | size_t | size, |
ZSTD_customMem | customMem | ||
) |
MEM_STATIC size_t ZSTD_cwksp_align | ( | size_t | size, |
size_t const | align | ||
) |
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size | ( | size_t | size | ) |
MEM_STATIC size_t ZSTD_cwksp_alloc_size | ( | size_t | size | ) |
Use this to determine how much space in the workspace we will consume to allocate this object. (Normally it should be exactly the size of the object, but under special conditions, like ASAN, where we pad each object, it might be larger.)
Since tables aren't currently redzoned, you don't need to call through this to figure out how much space you need for the matchState tables. Everything else is though.
Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC size_t ZSTD_cwksp_available_space | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration | ( | ZSTD_cwksp * | ws, |
size_t | additionalNeededSpace | ||
) |
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr | ( | void * | ptr, |
const size_t | alignBytes | ||
) |
MEM_STATIC int ZSTD_cwksp_check_available | ( | ZSTD_cwksp * | ws, |
size_t | additionalNeededSpace | ||
) |
MEM_STATIC int ZSTD_cwksp_check_too_large | ( | ZSTD_cwksp * | ws, |
size_t | additionalNeededSpace | ||
) |
MEM_STATIC int ZSTD_cwksp_check_wasteful | ( | ZSTD_cwksp * | ws, |
size_t | additionalNeededSpace | ||
) |
MEM_STATIC void ZSTD_cwksp_clean_tables | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC void ZSTD_cwksp_clear | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC void ZSTD_cwksp_clear_tables | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC size_t ZSTD_cwksp_create | ( | ZSTD_cwksp * | ws, |
size_t | size, | ||
ZSTD_customMem | customMem | ||
) |
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds | ( | const ZSTD_cwksp *const | ws, |
size_t const | estimatedSpace | ||
) |
MEM_STATIC void ZSTD_cwksp_free | ( | ZSTD_cwksp * | ws, |
ZSTD_customMem | customMem | ||
) |
MEM_STATIC void ZSTD_cwksp_init | ( | ZSTD_cwksp * | ws, |
void * | start, | ||
size_t | size, | ||
ZSTD_cwksp_static_alloc_e | isStatic | ||
) |
MEM_STATIC void * ZSTD_cwksp_initialAllocStart | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase | ( | ZSTD_cwksp * | ws, |
ZSTD_cwksp_alloc_phase_e | phase | ||
) |
MEM_STATIC void ZSTD_cwksp_mark_tables_clean | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty | ( | ZSTD_cwksp * | ws | ) |
MEM_STATIC void ZSTD_cwksp_move | ( | ZSTD_cwksp * | dst, |
ZSTD_cwksp * | src | ||
) |
MEM_STATIC int ZSTD_cwksp_owns_buffer | ( | const ZSTD_cwksp * | ws, |
const void * | ptr | ||
) |
MEM_STATIC void* ZSTD_cwksp_reserve_aligned | ( | ZSTD_cwksp * | ws, |
size_t | bytes | ||
) |
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once | ( | ZSTD_cwksp * | ws, |
size_t | bytes | ||
) |
Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). This memory has been initialized at least once in the past. This doesn't mean it has been initialized this time, and it might contain data from previous operations. The main usage is for algorithms that might need read access into uninitialized memory. The algorithm must maintain safety under these conditions and must make sure it doesn't leak any of the past data (directly or in side channels).
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer | ( | ZSTD_cwksp * | ws, |
size_t | bytes | ||
) |
MEM_STATIC int ZSTD_cwksp_reserve_failed | ( | const ZSTD_cwksp * | ws | ) |
MEM_STATIC void* ZSTD_cwksp_reserve_internal | ( | ZSTD_cwksp * | ws, |
size_t | bytes, | ||
ZSTD_cwksp_alloc_phase_e | phase | ||
) |
MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space | ( | ZSTD_cwksp * | ws, |
size_t const | bytes | ||
) |
MEM_STATIC void* ZSTD_cwksp_reserve_object | ( | ZSTD_cwksp * | ws, |
size_t | bytes | ||
) |
MEM_STATIC void* ZSTD_cwksp_reserve_table | ( | ZSTD_cwksp * | ws, |
size_t | bytes | ||
) |
MEM_STATIC size_t ZSTD_cwksp_sizeof | ( | const ZSTD_cwksp * | ws | ) |
MEM_STATIC size_t ZSTD_cwksp_slack_space_required | ( | void | ) |
MEM_STATIC size_t ZSTD_cwksp_used | ( | const ZSTD_cwksp * | ws | ) |
U32 ZSTD_cycleLog | ( | U32 | hashLog, |
ZSTD_strategy | strat | ||
) |
ZSTD_cycleLog() : condition for correct operation : hashLog > 1
MEM_STATIC int ZSTD_DCtx_get_bmi2 | ( | const struct ZSTD_DCtx_s * | dctx | ) |
size_t ZSTD_DCtx_getParameter | ( | ZSTD_DCtx * | dctx, |
ZSTD_dParameter | param, | ||
int * | value | ||
) |
|
static |
|
static |
size_t ZSTD_DCtx_loadDictionary | ( | ZSTD_DCtx * | dctx, |
const void * | dict, | ||
size_t | dictSize | ||
) |
ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ Create an internal DDict from dict buffer, to be used to decompress all future frames. The dictionary remains valid for all future frames, until explicitly invalidated, or a new dictionary is loaded.
dict
content will be copied internally, so dict
can be released after loading. Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of how dictionary content is loaded and interpreted. size_t ZSTD_DCtx_loadDictionary_advanced | ( | ZSTD_DCtx * | dctx, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType | ||
) |
size_t ZSTD_DCtx_loadDictionary_byReference | ( | ZSTD_DCtx * | dctx, |
const void * | dict, | ||
size_t | dictSize | ||
) |
size_t ZSTD_DCtx_refDDict | ( | ZSTD_DCtx * | dctx, |
const ZSTD_DDict * | ddict | ||
) |
ZSTD_DCtx_refDDict() : Requires v1.4.0+ Reference a prepared dictionary, to be used to decompress next frames. The dictionary remains active for decompression of future frames using same DCtx.
If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function will store the DDict references in a table, and the DDict used for decompression will be determined at decompression time, as per the dict ID in the frame. The memory for the table is allocated on the first call to refDDict, and can be freed with ZSTD_freeDCtx().
If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary will be managed, and referencing a dictionary effectively "discards" any previous one.
size_t ZSTD_DCtx_refPrefix | ( | ZSTD_DCtx * | dctx, |
const void * | prefix, | ||
size_t | prefixSize | ||
) |
ZSTD_DCtx_refPrefix() : Requires v1.4.0+ Reference a prefix (single-usage dictionary) to decompress next frame. This is the reverse operation of ZSTD_CCtx_refPrefix(), and must use the same prefix as the one used during compression. Prefix is only used once. Reference is discarded at end of frame. End of frame is reached when ZSTD_decompressStream() returns 0.
size_t ZSTD_DCtx_refPrefix_advanced | ( | ZSTD_DCtx * | dctx, |
const void * | prefix, | ||
size_t | prefixSize, | ||
ZSTD_dictContentType_e | dictContentType | ||
) |
size_t ZSTD_DCtx_reset | ( | ZSTD_DCtx * | dctx, |
ZSTD_ResetDirective | reset | ||
) |
ZSTD_DCtx_reset() : Return a DCtx to clean state. Session and parameters can be reset jointly or separately. Parameters can only be reset when no active frame is being decompressed.
|
static |
|
static |
size_t ZSTD_DCtx_setFormat | ( | ZSTD_DCtx * | dctx, |
ZSTD_format_e | format | ||
) |
size_t ZSTD_DCtx_setMaxWindowSize | ( | ZSTD_DCtx * | dctx, |
size_t | maxWindowSize | ||
) |
size_t ZSTD_DCtx_setParameter | ( | ZSTD_DCtx * | dctx, |
ZSTD_dParameter | param, | ||
int | value | ||
) |
ZSTD_DCtx_setParameter() : Set one compression parameter, selected by enum ZSTD_dParameter. All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). Setting a parameter is only possible during frame initialization (before starting decompression).
|
static |
const void * ZSTD_DDict_dictContent | ( | const ZSTD_DDict * | ddict | ) |
size_t ZSTD_DDict_dictSize | ( | const ZSTD_DDict * | ddict | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
ZSTD_decodeFrameHeader() : headerSize
must be the size provided by ZSTD_frameHeaderSize(). If multiple DDict references are enabled, also will choose the correct DDict to use.
size_t ZSTD_decodeLiteralsBlock | ( | ZSTD_DCtx * | dctx, |
const void * | src, | ||
size_t | srcSize, | ||
void * | dst, | ||
size_t | dstCapacity, | ||
const streaming_operation | streaming | ||
) |
ZSTD_decodeLiteralsBlock() : Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
size_t ZSTD_decodeSeqHeaders | ( | ZSTD_DCtx * | dctx, |
int * | nbSeqPtr, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_decodeSeqHeaders() : decode sequence header from src
FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence | ( | seqState_t * | seqState, |
const ZSTD_longOffset_e | longOffsets | ||
) |
size_t ZSTD_decodingBufferSize_min | ( | unsigned long long | windowSize, |
unsigned long long | frameContentSize | ||
) |
size_t ZSTD_decompress | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | compressedSize | ||
) |
ZSTD_decompress() : compressedSize
: must be the exact size of some number of compressed and/or skippable frames. dstCapacity
is an upper bound of originalSize to regenerate. If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
dst
(<= dstCapacity
), or an errorCode if it fails (which can be tested using ZSTD_isError()).
|
static |
size_t ZSTD_decompress_usingDDict | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const ZSTD_DDict * | ddict | ||
) |
ZSTD_decompress_usingDDict() : Decompression using a pre-digested Dictionary Use dictionary without significant overhead.
size_t ZSTD_decompress_usingDict | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const void * | dict, | ||
size_t | dictSize | ||
) |
ZSTD_decompress_usingDict() : Decompression using a known Dictionary. Dictionary must be identical to the one used during compression. Note : This function loads the dictionary, resulting in significant startup delay. It's intended for a dictionary used only once. Note : When dict == NULL || dictSize < 8
no dictionary is used.
size_t ZSTD_decompressBegin_usingDDict | ( | ZSTD_DCtx * | dctx, |
const ZSTD_DDict * | ddict | ||
) |
size_t ZSTD_decompressBegin_usingDict | ( | ZSTD_DCtx * | dctx, |
const void * | dict, | ||
size_t | dictSize | ||
) |
size_t ZSTD_decompressBlock | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_decompressBlock_deprecated | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
size_t ZSTD_decompressBlock_internal | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
const int | frame, | ||
const streaming_operation | streaming | ||
) |
unsigned long long ZSTD_decompressBound | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_decompressBound() : compatible with legacy mode src
must point to the start of a ZSTD frame or a skippeable frame srcSize
must be at least as large as the frame contained
size_t ZSTD_decompressContinue | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_decompressContinue() : srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
dst
(necessarily <= `dstCapacity) or an error code, which can be tested using ZSTD_isError()
|
static |
size_t ZSTD_decompressDCtx | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_decompressDCtx() : Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx. Compatible with sticky parameters.
|
static |
ZSTD_decompressFrame() : @dctx must be properly initialized will update *srcPtr and *srcSizePtr, to make *srcPtr progress by one frame.
size_t ZSTD_decompressionMargin | ( | void const * | src, |
size_t | srcSize | ||
) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | maxDstSize, | ||
const void * | seqStart, | ||
size_t | seqSize, | ||
int | nbSeq, | ||
const ZSTD_longOffset_e | isLongOffset, | ||
const int | frame | ||
) |
FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_bodySplitLitBuffer | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | maxDstSize, | ||
const void * | seqStart, | ||
size_t | seqSize, | ||
int | nbSeq, | ||
const ZSTD_longOffset_e | isLongOffset, | ||
const int | frame | ||
) |
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | maxDstSize, | ||
const void * | seqStart, | ||
size_t | seqSize, | ||
int | nbSeq, | ||
const ZSTD_longOffset_e | isLongOffset, | ||
const int | frame | ||
) |
|
static |
|
static |
|
static |
size_t ZSTD_decompressStream | ( | ZSTD_DStream * | zds, |
ZSTD_outBuffer * | output, | ||
ZSTD_inBuffer * | input | ||
) |
ZSTD_decompressStream() : Streaming decompression function. Call repetitively to consume full input updating it as necessary. Function will update both input and output pos
fields exposing current state via these fields:
input.pos < input.size
, some input remaining and caller should provide remaining input on the next call.output.pos < output.size
, decoder finished and flushed all remaining buffers.output.pos == output.size
, potentially uncflushed data present in the internal buffers, call ZSTD_decompressStream() again to flush remaining data to output. Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.size_t ZSTD_decompressStream_simpleArgs | ( | ZSTD_DCtx * | dctx, |
void * | dst, | ||
size_t | dstCapacity, | ||
size_t * | dstPos, | ||
const void * | src, | ||
size_t | srcSize, | ||
size_t * | srcPos | ||
) |
|
static |
|
static |
void ZSTD_dedicatedDictSearch_lazy_loadDictionary | ( | ZSTD_matchState_t * | ms, |
const BYTE *const | ip | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_dedicatedDictSearch_lazy_search | ( | size_t * | offsetPtr, |
size_t | ml, | ||
U32 | nbAttempts, | ||
const ZSTD_matchState_t *const | dms, | ||
const BYTE *const | ip, | ||
const BYTE *const | iLimit, | ||
const BYTE *const | prefixStart, | ||
const U32 | curr, | ||
const U32 | dictLimit, | ||
const size_t | ddsIdx | ||
) |
|
static |
int ZSTD_defaultCLevel | ( | void | ) |
|
static |
|
static |
ZSTD_dictAndWindowLog() : Returns an adjusted window log that is large enough to fit the source and the dictionary. The zstd format says that the entire dictionary is valid if one byte of the dictionary is within the window. So the hashLog and chainLog should be large enough to reference both the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing the hashLog and windowLog. NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
|
static |
|
static |
ZSTD_dictTooBig(): When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in one go generically. So we ensure that in that case we reset the tables to zero, so that we can load as much of the dictionary as possible.
|
static |
ZSTD_bounds ZSTD_dParam_getBounds | ( | ZSTD_dParameter | dParam | ) |
ZSTD_dParam_getBounds() : All parameters must belong to an interval with lower and upper bounds, otherwise they will either trigger an error or be automatically clamped.
|
static |
size_t ZSTD_DStreamInSize | ( | void | ) |
size_t ZSTD_DStreamOutSize | ( | void | ) |
|
static |
|
static |
size_t ZSTD_encodeSequences | ( | void * | dst, |
size_t | dstCapacity, | ||
FSE_CTable const * | CTable_MatchLength, | ||
BYTE const * | mlCodeTable, | ||
FSE_CTable const * | CTable_OffsetBits, | ||
BYTE const * | ofCodeTable, | ||
FSE_CTable const * | CTable_LitLength, | ||
BYTE const * | llCodeTable, | ||
seqDef const * | sequences, | ||
size_t | nbSeq, | ||
int | longOffsets, | ||
int | bmi2 | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_encodeSequences_body | ( | void * | dst, |
size_t | dstCapacity, | ||
FSE_CTable const * | CTable_MatchLength, | ||
BYTE const * | mlCodeTable, | ||
FSE_CTable const * | CTable_OffsetBits, | ||
BYTE const * | ofCodeTable, | ||
FSE_CTable const * | CTable_LitLength, | ||
BYTE const * | llCodeTable, | ||
seqDef const * | sequences, | ||
size_t | nbSeq, | ||
int | longOffsets | ||
) |
|
static |
size_t ZSTD_endStream | ( | ZSTD_CStream * | zcs, |
ZSTD_outBuffer * | output | ||
) |
MEM_STATIC size_t ZSTD_entropyCompressSeqStore | ( | const seqStore_t * | seqStorePtr, |
const ZSTD_entropyCTables_t * | prevEntropy, | ||
ZSTD_entropyCTables_t * | nextEntropy, | ||
const ZSTD_CCtx_params * | cctxParams, | ||
void * | dst, | ||
size_t | dstCapacity, | ||
size_t | srcSize, | ||
void * | entropyWorkspace, | ||
size_t | entropyWkspSize, | ||
int | bmi2 | ||
) |
MEM_STATIC size_t ZSTD_entropyCompressSeqStore_internal | ( | const seqStore_t * | seqStorePtr, |
const ZSTD_entropyCTables_t * | prevEntropy, | ||
ZSTD_entropyCTables_t * | nextEntropy, | ||
const ZSTD_CCtx_params * | cctxParams, | ||
void * | dst, | ||
size_t | dstCapacity, | ||
void * | entropyWorkspace, | ||
size_t | entropyWkspSize, | ||
const int | bmi2 | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
size_t ZSTD_estimateCCtxSize_usingCCtxParams | ( | const ZSTD_CCtx_params * | params | ) |
|
static |
size_t ZSTD_estimateCCtxSize_usingCParams | ( | ZSTD_compressionParameters | cParams | ) |
size_t ZSTD_estimateCDictSize | ( | size_t | dictSize, |
int | compressionLevel | ||
) |
size_t ZSTD_estimateCDictSize_advanced | ( | size_t | dictSize, |
ZSTD_compressionParameters | cParams, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod | ||
) |
ZSTD_estimateCDictSize_advanced() : Estimate amount of memory that will be needed to create a dictionary with following arguments
|
static |
size_t ZSTD_estimateCStreamSize_usingCCtxParams | ( | const ZSTD_CCtx_params * | params | ) |
size_t ZSTD_estimateCStreamSize_usingCParams | ( | ZSTD_compressionParameters | cParams | ) |
size_t ZSTD_estimateDDictSize | ( | size_t | dictSize, |
ZSTD_dictLoadMethod_e | dictLoadMethod | ||
) |
ZSTD_estimateDDictSize() : Estimate amount of memory that will be needed to create a dictionary for decompression. Note : dictionary created by reference using ZSTD_dlm_byRef are smaller
size_t ZSTD_estimateDStreamSize_fromFrame | ( | const void * | src, |
size_t | srcSize | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
void ZSTD_fillDoubleHashTable | ( | ZSTD_matchState_t * | ms, |
void const * | end, | ||
ZSTD_dictTableLoadMethod_e | dtlm, | ||
ZSTD_tableFillPurpose_e | tfp | ||
) |
|
static |
|
static |
void ZSTD_fillHashTable | ( | ZSTD_matchState_t * | ms, |
void const * | end, | ||
ZSTD_dictTableLoadMethod_e | dtlm, | ||
ZSTD_tableFillPurpose_e | tfp | ||
) |
|
static |
|
static |
unsigned long long ZSTD_findDecompressedSize | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_findDecompressedSize() : srcSize
must be the exact length of some number of ZSTD compressed and/or skippable frames note: compatible with legacy mode
size_t ZSTD_findFrameCompressedSize | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_findFrameCompressedSize() : compatible with legacy mode src
must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame srcSize
must be at least as large as the frame contained
src
|
static |
size_t ZSTD_flushStream | ( | ZSTD_CStream * | zcs, |
ZSTD_outBuffer * | output | ||
) |
MEM_STATIC U32 ZSTD_fracWeight | ( | U32 | rawStat | ) |
size_t ZSTD_frameHeaderSize | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_frameHeaderSize() : srcSize must be >= ZSTD_frameHeaderSize_prefix.
|
static |
ZSTD_frameHeaderSize_internal() : srcSize must be large enough to reach header size fields. note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
|
static |
size_t ZSTD_freeCDict | ( | ZSTD_CDict * | CDict | ) |
ZSTD_freeCDict() : Function frees memory allocated by ZSTD_createCDict(). If a NULL pointer is passed, no operation is performed.
size_t ZSTD_freeCStream | ( | ZSTD_CStream * | zcs | ) |
size_t ZSTD_freeDDict | ( | ZSTD_DDict * | ddict | ) |
ZSTD_freeDDict() : Function frees memory allocated with ZSTD_createDDict() If a NULL pointer is passed, no operation is performed.
|
static |
size_t ZSTD_freeDStream | ( | ZSTD_DStream * | zds | ) |
size_t ZSTD_fseBitCost | ( | FSE_CTable const * | ctable, |
unsigned const * | count, | ||
unsigned const | max | ||
) |
size_t ZSTD_generateSequences | ( | ZSTD_CCtx * | zc, |
ZSTD_Sequence * | outSeqs, | ||
size_t | outSeqsSize, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
|
static |
size_t ZSTD_getcBlockSize | ( | const void * | src, |
size_t | srcSize, | ||
blockProperties_t * | bpPtr | ||
) |
ZSTD_getcBlockSize() : Provides the size of compressed block from block header src
|
static |
|
static |
ZSTD_compressionParameters ZSTD_getCParams | ( | int | compressionLevel, |
unsigned long long | srcSizeHint, | ||
size_t | dictSize | ||
) |
|
static |
mode
controls how we treat the dictSize
. See docs for ZSTD_cParamMode_e
. ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams | ( | const ZSTD_CCtx_params * | CCtxParams, |
U64 | srcSizeHint, | ||
size_t | dictSize, | ||
ZSTD_cParamMode_e | mode | ||
) |
ZSTD_compressionParameters ZSTD_getCParamsFromCDict | ( | const ZSTD_CDict * | cdict | ) |
ZSTD_getCParamsFromCDict() : as the name implies
|
static |
unsigned long long ZSTD_getDecompressedSize | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_getDecompressedSize() : compatible with legacy mode
srcSize
too small) unsigned ZSTD_getDictID_fromCDict | ( | const ZSTD_CDict * | cdict | ) |
ZSTD_getDictID_fromCDict() : Provides the dictID of the dictionary loaded into cdict
. If
unsigned ZSTD_getDictID_fromDDict | ( | const ZSTD_DDict * | ddict | ) |
ZSTD_getDictID_fromDDict() : Provides the dictID of the dictionary loaded into ddict
. If
unsigned ZSTD_getDictID_fromDict | ( | const void * | dict, |
size_t | dictSize | ||
) |
ZSTD_getDictID_fromDict() : Provides the dictID stored within dictionary. if
unsigned ZSTD_getDictID_fromFrame | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_getDictID_fromFrame() : Provides the dictID required to decompress frame stored within src
. If
srcSize
is too small, and as a result, frame header could not be decoded. Note : possible if srcSize < ZSTD_FRAMEHEADERSIZE_MAX
.ZSTD_ErrorCode ZSTD_getErrorCode | ( | size_t | code | ) |
ZSTD_getErrorCode() : convert a size_t
function result into a ZSTD_ErrorCode
enum type, which can be used to compare with enum list published above
ZSTD_getError() : convert a size_t
function result into a proper ZSTD_errorCode enum
const char* ZSTD_getErrorName | ( | size_t | code | ) |
ZSTD_getErrorName() : provides error code string from function result (useful for debugging)
const char * ZSTD_getErrorString | ( | ZSTD_ErrorCode | code | ) |
Same as ZSTD_getErrorName, but using a ZSTD_ErrorCode
enum argument
ZSTD_getErrorString() : provides error code string from enum
unsigned long long ZSTD_getFrameContentSize | ( | const void * | src, |
size_t | srcSize | ||
) |
ZSTD_getFrameContentSize() : compatible with legacy mode
src
if known, otherwisesize_t ZSTD_getFrameHeader | ( | ZSTD_frameHeader * | zfhPtr, |
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_getFrameHeader() : decode Frame Header, or require larger srcSize
. note : this function does not consume input, it only reads it.
zfhPtr
is correctly filled, >0, srcSize
is too small, value is wanted srcSize
amount, or an error code, which can be tested using ZSTD_isError() size_t ZSTD_getFrameHeader_advanced | ( | ZSTD_frameHeader * | zfhPtr, |
const void * | src, | ||
size_t | srcSize, | ||
ZSTD_format_e | format | ||
) |
ZSTD_getFrameHeader_advanced() : decode Frame Header, or require larger srcSize
. note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
zfhPtr
is correctly filled, >0, srcSize
is too small, value is wanted srcSize
amount, or an error code, which can be tested using ZSTD_isError() ZSTD_frameProgression ZSTD_getFrameProgression | ( | const ZSTD_CCtx * | cctx | ) |
|
static |
MEM_STATIC U32 ZSTD_getLowestMatchIndex | ( | const ZSTD_matchState_t * | ms, |
U32 | curr, | ||
unsigned | windowLog | ||
) |
MEM_STATIC U32 ZSTD_getLowestPrefixIndex | ( | const ZSTD_matchState_t * | ms, |
U32 | curr, | ||
unsigned | windowLog | ||
) |
FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice | ( | U32 const | offBase, |
U32 const | matchLength, | ||
const optState_t *const | optPtr, | ||
int const | optLevel | ||
) |
|
static |
ZSTD_parameters ZSTD_getParams | ( | int | compressionLevel, |
unsigned long long | srcSizeHint, | ||
size_t | dictSize | ||
) |
ZSTD_getParams() : same idea as ZSTD_getCParams()
ZSTD_parameters
structure (instead of ZSTD_compressionParameters
). Fields of ZSTD_frameParameters
are set to default values
|
static |
ZSTD_getParams() : same idea as ZSTD_getCParams()
ZSTD_parameters
structure (instead of ZSTD_compressionParameters
). Fields of ZSTD_frameParameters
are set to default values const seqStore_t * ZSTD_getSeqStore | ( | const ZSTD_CCtx * | ctx | ) |
MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength | ( | seqStore_t const * | seqStore, |
seqDef const * | seq | ||
) |
Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
MEM_STATIC size_t ZSTD_hash3Ptr | ( | const void * | ptr, |
U32 | h | ||
) |
MEM_STATIC size_t ZSTD_hash3PtrS | ( | const void * | ptr, |
U32 | h, | ||
U32 | s | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr | ( | const void * | p, |
U32 | hBits, | ||
U32 | mls | ||
) |
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtrSalted | ( | const void * | p, |
U32 | hBits, | ||
U32 | mls, | ||
const U64 | hashSalt | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch | ( | ZSTD_matchState_t * | ms, |
const BYTE *const | ip, | ||
const BYTE *const | iLimit, | ||
size_t * | offsetPtr, | ||
const U32 | mls, | ||
const ZSTD_dictMode_e | dictMode | ||
) |
MEM_STATIC unsigned ZSTD_highbit32 | ( | U32 | val | ) |
|
static |
|
static |
|
static |
size_t ZSTD_initCStream | ( | ZSTD_CStream * | zcs, |
int | compressionLevel | ||
) |
Equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API to compress with a dictionary.
size_t ZSTD_initCStream_advanced | ( | ZSTD_CStream * | zcs, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_parameters | params, | ||
unsigned long long | pss | ||
) |
size_t ZSTD_initCStream_internal | ( | ZSTD_CStream * | zcs, |
const void * | dict, | ||
size_t | dictSize, | ||
const ZSTD_CDict * | cdict, | ||
const ZSTD_CCtx_params * | params, | ||
unsigned long long | pledgedSrcSize | ||
) |
ZSTD_initCStream_internal() : Private use only. Init streaming operation. expects params to be valid. must receive dict, or cdict, or none, but not both.
ZSTD_initCStream_internal() : Note : for lib/compress only. Used by zstdmt_compress.c. Assumption 1 : params are valid Assumption 2 : either dict, or cdict, is defined, not both
size_t ZSTD_initCStream_srcSize | ( | ZSTD_CStream * | zcs, |
int | compressionLevel, | ||
unsigned long long | pss | ||
) |
size_t ZSTD_initCStream_usingCDict | ( | ZSTD_CStream * | zcs, |
const ZSTD_CDict * | cdict | ||
) |
size_t ZSTD_initCStream_usingCDict_advanced | ( | ZSTD_CStream * | zcs, |
const ZSTD_CDict * | cdict, | ||
ZSTD_frameParameters | fParams, | ||
unsigned long long | pledgedSrcSize | ||
) |
size_t ZSTD_initCStream_usingDict | ( | ZSTD_CStream * | zcs, |
const void * | dict, | ||
size_t | dictSize, | ||
int | compressionLevel | ||
) |
|
static |
|
static |
size_t ZSTD_initDStream | ( | ZSTD_DStream * | zds | ) |
ZSTD_initDStream() : Initialize/reset DStream state for new decompression operation. Call before new decompression operation using same DStream.
Note : This function is redundant with the advanced API and equivalent to: ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_refDDict(zds, NULL);
size_t ZSTD_initDStream_usingDDict | ( | ZSTD_DStream * | dctx, |
const ZSTD_DDict * | ddict | ||
) |
size_t ZSTD_initDStream_usingDict | ( | ZSTD_DStream * | zds, |
const void * | dict, | ||
size_t | dictSize | ||
) |
|
static |
|
static |
ZSTD_CCtx* ZSTD_initStaticCCtx | ( | void * | workspace, |
size_t | workspaceSize | ||
) |
const ZSTD_CDict* ZSTD_initStaticCDict | ( | void * | workspace, |
size_t | workspaceSize, | ||
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType, | ||
ZSTD_compressionParameters | cParams | ||
) |
ZSTD_initStaticCDict_advanced() : Generate a digested dictionary in provided memory area. workspace: The memory area to emplace the dictionary into. Provided pointer must 8-bytes aligned. It must outlive dictionary usage. workspaceSize: Use ZSTD_estimateCDictSize() to determine how large workspace must be. cParams : use ZSTD_getCParams() to transform a compression level into its relevants cParams.
ZSTD_CStream* ZSTD_initStaticCStream | ( | void * | workspace, |
size_t | workspaceSize | ||
) |
ZSTD_DCtx* ZSTD_initStaticDCtx | ( | void * | workspace, |
size_t | workspaceSize | ||
) |
const ZSTD_DDict* ZSTD_initStaticDDict | ( | void * | sBuffer, |
size_t | sBufferSize, | ||
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictLoadMethod_e | dictLoadMethod, | ||
ZSTD_dictContentType_e | dictContentType | ||
) |
ZSTD_DStream* ZSTD_initStaticDStream | ( | void * | workspace, |
size_t | workspaceSize | ||
) |
|
static |
U32 ZSTD_insertAndFindFirstIndex | ( | ZSTD_matchState_t * | ms, |
const BYTE * | ip | ||
) |
FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal | ( | ZSTD_matchState_t * | ms, |
const ZSTD_compressionParameters *const | cParams, | ||
const BYTE * | ip, | ||
U32 const | mls, | ||
U32 const | lazySkipping | ||
) |
|
static |
size_t ZSTD_insertBlock | ( | ZSTD_DCtx * | dctx, |
const void * | blockStart, | ||
size_t | blockSize | ||
) |
ZSTD_insertBlock() : insert src
block into dctx
history. Useful to track uncompressed blocks.
|
static |
ZSTD_insertBt1() : add one or multiple positions to tree.
ip | assumed <= iend-8 . |
target | The target of ZSTD_updateTree_internal() - we are filling to this position |
FORCE_INLINE_TEMPLATE U32 ZSTD_insertBtAndGetAllMatches | ( | ZSTD_match_t * | matches, |
ZSTD_matchState_t * | ms, | ||
U32 * | nextToUpdate3, | ||
const BYTE *const | ip, | ||
const BYTE *const | iLimit, | ||
const ZSTD_dictMode_e | dictMode, | ||
const U32 | rep[ZSTD_REP_NUM], | ||
const U32 | ll0, | ||
const U32 | lengthToBeat, | ||
const U32 | mls | ||
) |
|
static |
ZSTD_insertDUBT1() : sort one already inserted but unsorted position assumption : curr >= btlow == (curr - btmask) doesn't fail
|
static |
ZSTD_invalidateMatchState() Invalidate all the matches in the match finder tables. Requires nextSrc and base to be set (can be NULL).
ZSTD_ipow() : Return base^exponent.
MEM_STATIC int ZSTD_isAligned | ( | void const * | ptr, |
size_t | align | ||
) |
unsigned ZSTD_isError | ( | size_t | code | ) |
ZSTD_isError() : tells if a return value is an error code symbol is required for external callers
unsigned ZSTD_isFrame | ( | const void * | buffer, |
size_t | size | ||
) |
ZSTD_isFrame() : Tells if the content of buffer
starts with a valid Frame Identifier. Note : Frame Identifier is 4 bytes. If size < 4
,
|
static |
unsigned ZSTD_isSkippableFrame | ( | const void * | buffer, |
size_t | size | ||
) |
ZSTD_isSkippableFrame() : Tells if the content of buffer
starts with a valid Frame Identifier for a skippable frame. Note : Frame Identifier is 4 bytes. If size < 4
,
|
static |
void ZSTD_ldm_adjustParameters | ( | ldmParams_t * | params, |
ZSTD_compressionParameters const * | cParams | ||
) |
ZSTD_ldm_adjustParameters() : If the params->hashRateLog is not set, set it to its default value based on windowLog and params->hashLog.
Ensures that params->bucketSizeLog is <= params->hashLog (setting it to params->hashLog if it is not).
Ensures that the minMatchLength >= targetLength during optimal parsing.
size_t ZSTD_ldm_blockCompress | ( | rawSeqStore_t * | rawSeqStore, |
ZSTD_matchState_t * | ms, | ||
seqStore_t * | seqStore, | ||
U32 | rep[ZSTD_REP_NUM], | ||
ZSTD_paramSwitch_e | useRowMatchFinder, | ||
void const * | src, | ||
size_t | srcSize | ||
) |
Compresses a block using the predefined sequences, along with a secondary block compressor. The literals section of every sequence is passed to the secondary block compressor, and those sequences are interspersed with the predefined sequences. Returns the length of the last literals. Updates rawSeqStore.pos
to indicate how many sequences have been consumed. rawSeqStore.seq
may also be updated to split the last sequence between two blocks.
NOTE: The source must be at most the maximum block size, but the predefined sequences can be any size, and may be longer than the block. In the case that they are longer than the block, the last sequences may need to be split into two. We handle that case correctly, and update rawSeqStore
appropriately. NOTE: This function does not return any errors.
|
static |
ZSTD_ldm_countBackwardsMatch() : Returns the number of bytes that match backwards before pIn and pMatch.
We count only bytes where pMatch >= pBase and pIn >= pAnchor.
|
static |
ZSTD_ldm_countBackwardsMatch_2segments() : Returns the number of bytes that match backwards from pMatch, even with the backwards match spanning 2 different segments.
On reaching pMatchBase
, start counting from mEnd
|
static |
Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. This is similar to ZSTD_loadDictionaryContent.
The tables for the other strategies are filled within their block compressors.
void ZSTD_ldm_fillHashTable | ( | ldmState_t * | state, |
const BYTE * | ip, | ||
const BYTE * | iend, | ||
ldmParams_t const * | params | ||
) |
|
static |
Registers in the splits array all the split points found in the first size bytes following the data pointer. This function terminates when either all the data has been processed or LDM_BATCH_SIZE splits are present in the splits array.
Precondition: The splits array must not be full. Returns: The number of bytes processed.
|
static |
Initializes the rolling hash state such that it will honor the settings in params.
|
static |
ZSTD_ldm_gear_reset() Feeds [data, data + minMatchLength) into the hash without registering any splits. This effectively resets the hash state. This is used when skipping over data, either at the beginning of a block, or skipping sections.
size_t ZSTD_ldm_generateSequences | ( | ldmState_t * | ldms, |
rawSeqStore_t * | sequences, | ||
ldmParams_t const * | params, | ||
void const * | src, | ||
size_t | srcSize | ||
) |
Generates the sequences using the long distance match finder. Generates long range matching sequences in sequences
, which parse a prefix of the source. sequences
must be large enough to store every sequence, which can be checked with ZSTD_ldm_getMaxNbSeq()
.
NOTE: The user must have called ZSTD_window_update() for all of the input they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. NOTE: This function returns an error if it runs out of space to store sequences.
|
static |
|
static |
ZSTD_ldm_getBucket() : Returns a pointer to the start of the bucket associated with hash.
size_t ZSTD_ldm_getMaxNbSeq | ( | ldmParams_t | params, |
size_t | maxChunkSize | ||
) |
size_t ZSTD_ldm_getTableSize | ( | ldmParams_t | params | ) |
ZSTD_ldm_getTableSize() : Estimate the space needed for long distance matching tables or 0 if LDM is disabled.
|
static |
ZSTD_ldm_insertEntry() : Insert the entry with corresponding hash into the hash table
|
static |
Sets cctx->nextToUpdate to a position corresponding closer to anchor if it is far way (after a long match, only update tables a limited amount).
|
static |
ZSTD_ldm_reduceTable() : reduce table indexes by reducerValue
void ZSTD_ldm_skipRawSeqStoreBytes | ( | rawSeqStore_t * | rawSeqStore, |
size_t | nbBytes | ||
) |
void ZSTD_ldm_skipSequences | ( | rawSeqStore_t * | rawSeqStore, |
size_t | srcSize, | ||
U32 const | minMatch | ||
) |
Skip past srcSize
bytes worth of sequences in rawSeqStore
. Avoids emitting matches less than minMatch
bytes. Must be called for data that is not passed to ZSTD_ldm_blockCompress().
MEM_STATIC size_t ZSTD_limitCopy | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
MEM_STATIC int ZSTD_literalsCompressionIsDisabled | ( | const ZSTD_CCtx_params * | cctxParams | ) |
|
static |
MEM_STATIC U32 ZSTD_LLcode | ( | U32 | litLength | ) |
size_t ZSTD_loadCEntropy | ( | ZSTD_compressedBlockState_t * | bs, |
void * | workspace, | ||
const void *const | dict, | ||
size_t | dictSize | ||
) |
size_t ZSTD_loadDEntropy | ( | ZSTD_entropyDTables_t * | entropy, |
const void *const | dict, | ||
size_t const | dictSize | ||
) |
ZSTD_loadDEntropy() : dict : must point at beginning of a valid zstd dictionary.
ZSTD_loadDEntropy() : dict : must point at beginning of a valid zstd dictionary.
|
static |
ZSTD_loadDictionaryContent() :
|
static |
|
static |
|
static |
MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode | ( | const ZSTD_matchState_t * | ms | ) |
ZSTD_matchState_dictMode(): Inspects the provided matchState and figures out what dictMode should be passed to the compressor.
int ZSTD_maxCLevel | ( | void | ) |
|
static |
|
static |
|
static |
size_t ZSTD_mergeBlockDelimiters | ( | ZSTD_Sequence * | sequences, |
size_t | seqsSize | ||
) |
int ZSTD_minCLevel | ( | void | ) |
MEM_STATIC size_t ZSTD_minGain | ( | size_t | srcSize, |
ZSTD_strategy | strat | ||
) |
|
static |
MEM_STATIC U32 ZSTD_MLcode | ( | U32 | mlBase | ) |
MEM_STATIC unsigned ZSTD_NbCommonBytes | ( | size_t | val | ) |
|
static |
|
static |
MEM_STATIC repcodes_t ZSTD_newRep | ( | U32 const | rep[ZSTD_REP_NUM], |
U32 const | offBase, | ||
U32 const | ll0 | ||
) |
|
static |
|
static |
ZSTD_nextInputType_e ZSTD_nextInputType | ( | ZSTD_DCtx * | dctx | ) |
|
static |
Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we allow taking a partial block as the input. Currently only raw uncompressed blocks can be streamed.
For blocks that can be streamed, this allows us to reduce the latency until we produce output, and avoid copying the input.
inputSize | - The total amount of input that the caller currently has. |
MEM_STATIC size_t ZSTD_noCompressBlock | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
U32 | lastBlock | ||
) |
size_t ZSTD_noCompressLiterals | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
HINT_INLINE void ZSTD_overlapCopy8 | ( | BYTE ** | op, |
BYTE const ** | ip, | ||
size_t | offset | ||
) |
ZSTD_overlapCopy8() : Copies 8 bytes from ip to op and updates op and ip where ip <= op. If the offset is < 8 then the offset is spread to at least 8 bytes.
Precondition: *ip <= *op Postcondition: *op - *op >= 8
|
static |
|
static |
FORCE_INLINE_TEMPLATE size_t ZSTD_prefetchMatch | ( | size_t | prefetchPos, |
seq_t const | sequence, | ||
const BYTE *const | prefixStart, | ||
const BYTE *const | dictEnd | ||
) |
|
static |
MEM_STATIC U32 ZSTD_readMINMATCH | ( | const void * | memPtr, |
U32 | length | ||
) |
size_t ZSTD_readSkippableFrame | ( | void * | dst, |
size_t | dstCapacity, | ||
unsigned * | magicVariant, | ||
const void * | src, | ||
size_t | srcSize | ||
) |
ZSTD_readSkippableFrame() : Retrieves content of a skippable frame, and writes it to dst buffer.
The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested in the magicVariant.
Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame.
|
static |
ZSTD_reduceIndex() : rescale all indexes to avoid future overflow (indexes are U32)
FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal | ( | U32 *const | table, |
U32 const | size, | ||
U32 const | reducerValue, | ||
int const | preserveMark | ||
) |
ZSTD_reduceTable() : reduce table indexes by reducerValue
, or squash to zero. PreserveMark preserves "unsorted mark" for btlazy2 strategy. It must be set to a clear 0/1 value, to remove branch during inlining. Presume table size is a multiple of ZSTD_ROWSIZE to help auto-vectorization
|
static |
void ZSTD_registerSequenceProducer | ( | ZSTD_CCtx * | zc, |
void * | mState, | ||
ZSTD_sequenceProducer_F * | mFinder | ||
) |
|
static |
void ZSTD_reset_compressedBlockState | ( | ZSTD_compressedBlockState_t * | bs | ) |
|
static |
|
static |
|
static |
|
static |
loadedDictSize | The size of the dictionary to be loaded into the context, if any. If no dictionary is used, or the dictionary is being attached / copied, then pass 0. note : params are assumed fully validated at this stage. |
|
static |
size_t ZSTD_resetCStream | ( | ZSTD_CStream * | zcs, |
unsigned long long | pss | ||
) |
size_t ZSTD_resetDStream | ( | ZSTD_DStream * | dctx | ) |
void ZSTD_resetSeqStore | ( | seqStore_t * | ssPtr | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
Returns the raw offset represented by the combination of offBase, ll0, and repcode history. offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().
|
static |
MEM_STATIC size_t ZSTD_rleCompressBlock | ( | void * | dst, |
size_t | dstCapacity, | ||
BYTE | src, | ||
size_t | srcSize, | ||
U32 | lastBlock | ||
) |
ZSTD_rollingHash_append() : Add the buffer to the hash value.
MEM_STATIC U64 ZSTD_rollingHash_compute | ( | void const * | buf, |
size_t | size | ||
) |
ZSTD_rollingHash_compute() : Compute the rolling hash value of the buffer.
MEM_STATIC U64 ZSTD_rollingHash_primePower | ( | U32 | length | ) |
ZSTD_rollingHash_primePower() : Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash over a window of length bytes.
ZSTD_rollingHash_rotate() : Rotate the rolling hash by one byte.
MEM_STATIC U16 ZSTD_rotateRight_U16 | ( | U16 const | value, |
U32 | count | ||
) |
MEM_STATIC U32 ZSTD_rotateRight_U32 | ( | U32 const | value, |
U32 | count | ||
) |
MEM_STATIC U64 ZSTD_rotateRight_U64 | ( | U64 const | value, |
U32 | count | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache | ( | ZSTD_matchState_t * | ms, |
const BYTE * | base, | ||
U32 const | rowLog, | ||
U32 const | mls, | ||
U32 | idx, | ||
const BYTE *const | iLimit | ||
) |
FORCE_INLINE_TEMPLATE ZSTD_VecMask ZSTD_row_getMatchMask | ( | const BYTE *const | tagRow, |
const BYTE | tag, | ||
const U32 | headGrouped, | ||
const U32 | rowEntries | ||
) |
FORCE_INLINE_TEMPLATE U32 ZSTD_row_matchMaskGroupWidth | ( | const U32 | rowEntries | ) |
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex | ( | BYTE *const | tagRow, |
U32 const | rowMask | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch | ( | U32 const * | hashTable, |
BYTE const * | tagTable, | ||
U32 const | relRow, | ||
U32 const | rowLog | ||
) |
void ZSTD_row_update | ( | ZSTD_matchState_t *const | ms, |
const BYTE * | ip | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal | ( | ZSTD_matchState_t * | ms, |
const BYTE * | ip, | ||
U32 const | mls, | ||
U32 const | rowLog, | ||
U32 const | rowMask, | ||
U32 const | useCache | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl | ( | ZSTD_matchState_t * | ms, |
U32 | updateStartIdx, | ||
U32 const | updateEndIdx, | ||
U32 const | mls, | ||
U32 const | rowLog, | ||
U32 const | rowMask, | ||
U32 const | useCache | ||
) |
FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch | ( | ZSTD_matchState_t * | ms, |
const BYTE *const | ip, | ||
const BYTE *const | iLimit, | ||
size_t * | offsetPtr, | ||
const U32 | mls, | ||
const ZSTD_dictMode_e | dictMode, | ||
const U32 | rowLog | ||
) |
|
static |
|
static |
|
static |
ZSTD_safecopy() : Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer and write up to 16 bytes past oend_w (op >= oend_w is allowed). This function is only called in the uncommon case where the sequence is near the end of the block. It should be fast for a single long sequence, but can be slow for several short sequences.
ovtype | controls the overlap detection
|
|
static |
ZSTD_safecopyLiterals() : memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single large copies.
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax | ( | ZSTD_matchState_t * | ms, |
const BYTE * | ip, | ||
const BYTE * | iend, | ||
size_t * | offsetPtr, | ||
U32 const | mls, | ||
U32 const | rowLog, | ||
searchMethod_e const | searchMethod, | ||
ZSTD_dictMode_e const | dictMode | ||
) |
Searches for the longest match at ip
. Dispatches to the correct implementation function based on the (searchMethod, dictMode, mls, rowLog). We use switch statements here instead of using an indirect function call through a function pointer because after Spectre and Meltdown mitigations, indirect function calls can be very costly, especially in the kernel.
NOTE: dictMode and searchMethod should be templated, so those switch statements should be optimized out. Only the mls & rowLog switches should be left.
ms | The match state. | |
ip | The position to search at. | |
iend | The end of the input data. | |
[out] | offsetPtr | Stores the match offset into this pointer. |
mls | The minimum search length, in the range [4, 6]. | |
rowLog | The row log (if applicable), in the range [4, 6]. | |
searchMethod | The search method to use (templated). | |
dictMode | The dictMode (templated). |
offsetPtr
. ZSTD_blockCompressor ZSTD_selectBlockCompressor | ( | ZSTD_strategy | strat, |
ZSTD_paramSwitch_e | rowMatchfinderMode, | ||
ZSTD_dictMode_e | dictMode | ||
) |
|
static |
symbolEncodingType_e ZSTD_selectEncodingType | ( | FSE_repeat * | repeatMode, |
unsigned const * | count, | ||
unsigned const | max, | ||
size_t const | mostFrequent, | ||
size_t | nbSeq, | ||
unsigned const | FSELog, | ||
FSE_CTable const * | prevCTable, | ||
short const * | defaultNorm, | ||
U32 | defaultNormLog, | ||
ZSTD_defaultPolicy_e const | isDefaultAllowed, | ||
ZSTD_strategy const | strategy | ||
) |
|
static |
|
static |
|
static |
ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise due to emission of RLE/raw blocks that disturb the offset history, and replaces any repcodes within the seqStore that may be invalid.
dRepcodes are updated as would be on the decompression side. cRepcodes are updated exactly in accordance with the seqStore.
Note : this function assumes seq->offBase respects the following numbering scheme : 0 : invalid 1-3 : repcode 1-3 4+ : real_offset+3
int ZSTD_seqToCodes | ( | const seqStore_t * | seqStorePtr | ) |
|
static |
|
static |
|
static |
|
static |
size_t ZSTD_sizeof_CCtx | ( | const ZSTD_CCtx * | cctx | ) |
size_t ZSTD_sizeof_CDict | ( | const ZSTD_CDict * | cdict | ) |
size_t ZSTD_sizeof_CStream | ( | const ZSTD_CStream * | zcs | ) |
size_t ZSTD_sizeof_DDict | ( | const ZSTD_DDict * | ddict | ) |
size_t ZSTD_sizeof_DStream | ( | const ZSTD_DStream * | dctx | ) |
|
static |
|
static |
|
static |
|
static |
HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq | ( | seqStore_t * | seqStorePtr, |
size_t | litLength, | ||
const BYTE * | literals, | ||
const BYTE * | litLimit, | ||
U32 | offBase, | ||
size_t | matchLength | ||
) |
ZSTD_storeSeq() : Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). @matchLength : must be >= MINMATCH Allowed to over-read literals up to litLimit.
size_t ZSTD_toFlushNow | ( | ZSTD_CCtx * | cctx | ) |
ZSTD_toFlushNow() Only useful for multithreading scenarios currently (nbWorkers >= 1).
|
static |
|
static |
FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo | ( | ZSTD_fseState * | DStatePtr, |
BIT_DStream_t * | bitD, | ||
U16 | nextState, | ||
U32 | nbBits | ||
) |
MEM_STATIC void ZSTD_updateRep | ( | U32 | rep[ZSTD_REP_NUM], |
U32 const | offBase, | ||
U32 const | ll0 | ||
) |
|
static |
void ZSTD_updateTree | ( | ZSTD_matchState_t * | ms, |
const BYTE * | ip, | ||
const BYTE * | iend | ||
) |
FORCE_INLINE_TEMPLATE void ZSTD_updateTree_internal | ( | ZSTD_matchState_t * | ms, |
const BYTE *const | ip, | ||
const BYTE *const | iend, | ||
const U32 | mls, | ||
const ZSTD_dictMode_e | dictMode | ||
) |
|
static |
|
static |
MEM_STATIC U32 ZSTD_VecMask_next | ( | ZSTD_VecMask | val | ) |
unsigned ZSTD_versionNumber | ( | void | ) |
ZSTD_versionNumber() : Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE).
const char* ZSTD_versionString | ( | void | ) |
ZSTD_versionString() : Return runtime library version, like "1.4.5". Requires v1.3.0+.
MEM_STATIC FORCE_INLINE_ATTR void ZSTD_wildcopy | ( | void * | dst, |
const void * | src, | ||
ptrdiff_t | length, | ||
ZSTD_overlap_e const | ovtype | ||
) |
ZSTD_wildcopy() : Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
ovtype | controls the overlap detection
|
MEM_STATIC U32 ZSTD_window_canOverflowCorrect | ( | ZSTD_window_t const | window, |
U32 | cycleLog, | ||
U32 | maxDist, | ||
U32 | loadedDictEnd, | ||
void const * | src | ||
) |
ZSTD_window_canOverflowCorrect(): Returns non-zero if the indices are large enough for overflow correction to work correctly without impacting compression ratio.
MEM_STATIC void ZSTD_window_clear | ( | ZSTD_window_t * | window | ) |
ZSTD_window_clear(): Clears the window containing the history by simply setting it to empty.
MEM_STATIC U32 ZSTD_window_correctOverflow | ( | ZSTD_window_t * | window, |
U32 | cycleLog, | ||
U32 | maxDist, | ||
void const * | src | ||
) |
ZSTD_window_correctOverflow(): Reduces the indices to protect from index overflow. Returns the correction made to the indices, which must be applied to every stored index.
The least significant cycleLog bits of the indices must remain the same, which may be 0. Every index up to maxDist in the past must be valid.
MEM_STATIC void ZSTD_window_enforceMaxDist | ( | ZSTD_window_t * | window, |
const void * | blockEnd, | ||
U32 | maxDist, | ||
U32 * | loadedDictEndPtr, | ||
const ZSTD_matchState_t ** | dictMatchStatePtr | ||
) |
ZSTD_window_enforceMaxDist(): Updates lowLimit so that: (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
It ensures index is valid as long as index >= lowLimit. This must be called before a block compression call.
loadedDictEnd is only defined if a dictionary is in use for current compression. As the name implies, loadedDictEnd represents the index at end of dictionary. The value lies within context's referential, it can be directly compared to blockEndIdx.
If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. This is because dictionaries are allowed to be referenced fully as long as the last byte of the dictionary is in the window. Once input has progressed beyond window size, dictionary cannot be referenced anymore.
In normal dict mode, the dictionary lies between lowLimit and dictLimit. In dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary is below them. forceWindow and dictMatchState are therefore incompatible.
MEM_STATIC U32 ZSTD_window_hasExtDict | ( | ZSTD_window_t const | window | ) |
ZSTD_window_hasExtDict(): Returns non-zero if the window has a non-empty extDict.
MEM_STATIC void ZSTD_window_init | ( | ZSTD_window_t * | window | ) |
MEM_STATIC U32 ZSTD_window_isEmpty | ( | ZSTD_window_t const | window | ) |
MEM_STATIC U32 ZSTD_window_needOverflowCorrection | ( | ZSTD_window_t const | window, |
U32 | cycleLog, | ||
U32 | maxDist, | ||
U32 | loadedDictEnd, | ||
void const * | src, | ||
void const * | srcEnd | ||
) |
ZSTD_window_needOverflowCorrection(): Returns non-zero if the indices are getting too large and need overflow protection.
MEM_STATIC U32 ZSTD_window_update | ( | ZSTD_window_t * | window, |
void const * | src, | ||
size_t | srcSize, | ||
int | forceNonContiguous | ||
) |
ZSTD_window_update(): Updates the window by appending [src, src + srcSize) to the window. If it is not contiguous, the current prefix becomes the extDict, and we forget about the extDict. Handles overlap of the prefix and extDict. Returns non-zero if the segment is contiguous.
|
static |
ZSTD_writeEpilogue() : Ends a frame.
size_t ZSTD_writeLastEmptyBlock | ( | void * | dst, |
size_t | dstCapacity | ||
) |
size_t ZSTD_writeSkippableFrame | ( | void * | dst, |
size_t | dstCapacity, | ||
const void * | src, | ||
size_t | srcSize, | ||
unsigned | magicVariant | ||
) |
MEM_STATIC void ZSTD_writeTaggedIndex | ( | U32 *const | hashTable, |
size_t | hashAndTag, | ||
U32 | index | ||
) |
|
static |
|
static |
size_t ZSTDMT_compressStream_generic | ( | ZSTDMT_CCtx * | mtctx, |
ZSTD_outBuffer * | output, | ||
ZSTD_inBuffer * | input, | ||
ZSTD_EndDirective | endOp | ||
) |
ZSTDMT_compressStream_generic() : Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() depending on flush directive.
ZSTDMT_compressStream_generic() : internal use only - exposed to be invoked from zstd_compress.c assumption : output and input are valid (pos <= size)
|
static |
|
static |
|
static |
ZSTDMT_CCtx * ZSTDMT_createCCtx_advanced | ( | unsigned | nbWorkers, |
ZSTD_customMem | cMem, | ||
ZSTD_threadPool * | pool | ||
) |
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal | ( | unsigned | nbWorkers, |
ZSTD_customMem | cMem, | ||
ZSTD_threadPool * | pool | ||
) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
ZSTDMT_flushProduced() : flush whatever data has been produced but not yet flushed in current job. move to next job if current one is fully flushed. output
: pos
will be updated with amount of data flushed . blockToFlush
: if >0, the function will block and wait if there is no data available to flush .
|
static |
size_t ZSTDMT_freeCCtx | ( | ZSTDMT_CCtx * | mtctx | ) |
|
static |
|
static |
|
static |
|
static |
ZSTDMT_getBuffer() : assumption : bufPool must be valid
|
static |
ZSTD_frameProgression ZSTDMT_getFrameProgression | ( | ZSTDMT_CCtx * | mtctx | ) |
ZSTDMT_getFrameProgression(): tells how much data has been consumed (input) and produced (output) for current frame. able to count progression inside worker threads.
|
static |
|
static |
size_t ZSTDMT_initCStream_internal | ( | ZSTDMT_CCtx * | mtctx, |
const void * | dict, | ||
size_t | dictSize, | ||
ZSTD_dictContentType_e | dictContentType, | ||
const ZSTD_CDict * | cdict, | ||
ZSTD_CCtx_params | params, | ||
unsigned long long | pledgedSrcSize | ||
) |
ZSTDMT_initCStream_internal() : Private use only. Init streaming operation. expects params to be valid. must receive dict, or cdict, or none, but not both. mtctx can be freshly constructed or reused from a prior compression. If mtctx is reused, memory allocations from the prior compression may not be freed, even if they are not needed for the current compression.
size_t ZSTDMT_nextInputSizeHint | ( | const ZSTDMT_CCtx * | mtctx | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
size_t ZSTDMT_sizeof_CCtx | ( | ZSTDMT_CCtx * | mtctx | ) |
|
static |
|
static |
size_t ZSTDMT_toFlushNow | ( | ZSTDMT_CCtx * | mtctx | ) |
ZSTDMT_toFlushNow() Tell how many bytes are ready to be flushed immediately. Probe the oldest active job (not yet entirely flushed) and check its output buffer. If return 0, it means there is no active job, or, it means oldest job is still active, but everything produced has been flushed so far, therefore flushing is limited by speed of oldest job.
|
static |
void ZSTDMT_updateCParams_whileCompressing | ( | ZSTDMT_CCtx * | mtctx, |
const ZSTD_CCtx_params * | cctxParams | ||
) |
ZSTDMT_updateCParams_whileCompressing() : Updates only a selected set of compression parameters, to remain compatible with current frame. New parameters will be applied to next compression job.
ZSTDMT_updateCParams_whileCompressing() : Updates a selected set of compression parameters, remaining compatible with currently active frame. New parameters will be applied to next compression job.
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
int g_debuglevel = DEBUGLEVEL |
|
static |
|
static |
-log2(x / 256) lookup table for x in [0, 256). If x == 0: Return 0 Else: Return floor(-log2(x / 256) * 256)
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |