diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index f2eab6807d7..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,124 +0,0 @@ -version: 2 - -jobs: - # the first half of the jobs are in this test - short-tests-0: - # TODO: Create a small custom docker image with all the dependencies we need - # preinstalled to reduce installation time. - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - # TODO: Re-enable aarch64 build: - # make aarch64build && make clean - steps: - - checkout - - run: - name: Test - command: | - ./tests/test-license.py - cc -v - CFLAGS="-O0 -Werror -pedantic" make allmost; make clean - make c99build; make clean - make c11build; make clean - make -j regressiontest; make clean - make shortest; make clean - make cxxtest; make clean - # the second half of the jobs are in this test - short-tests-1: - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - steps: - - checkout - - run: - name: Test - command: | - make gnu90build; make clean - make gnu99build; make clean - make ppc64build V=1; make clean - make ppcbuild V=1; make clean - make armbuild V=1; make clean - make -C tests test-legacy test-longmatch; make clean - make -C lib libzstd-nomt; make clean - # This step should only be run in a cron job - regression-test: - docker: - - image: fbopensource/zstd-circleci-primary:0.0.1 - environment: - CIRCLE_ARTIFACTS: /tmp/circleci-artifacts - steps: - - checkout - # Restore the cached resources. - - restore_cache: - # We try our best to bust the cache when the data changes by hashing - # data.c. If that doesn't work, simply update the version number here - # and below. If we fail to bust the cache, the regression testing will - # still work, since it has its own stamp, but will need to redownload - # everything. - keys: - - regression-cache-{{ checksum "tests/regression/data.c" }}-v0 - - run: - name: Regression Test - command: | - make -C programs zstd - make -C tests/regression test - mkdir -p $CIRCLE_ARTIFACTS - ./tests/regression/test \ - --cache tests/regression/cache \ - --output $CIRCLE_ARTIFACTS/results.csv \ - --zstd programs/zstd - echo "NOTE: The new results.csv is uploaded as an artifact to this job" - echo " If this fails, go to the Artifacts pane in CircleCI, " - echo " download /tmp/circleci-artifacts/results.csv, and if they " - echo " are still good, copy it into the repo and commit it." - echo "> diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv" - diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv - # Only save the cache on success (default), since if the failure happened - # before we stamp the data cache, we will have a bad cache for this key. - - save_cache: - key: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 - paths: - - tests/regression/cache - - store_artifacts: - path: /tmp/circleci-artifacts - - -workflows: - version: 2 - commit: - jobs: - # Run the tests in parallel - - short-tests-0 - - short-tests-1 - - regression-test - - nightly: - triggers: - - schedule: - cron: "0 0 * * *" - filters: - branches: - only: - - release - - dev - - master - jobs: - # Run daily regression tests - - regression-test - - - - # Longer tests - #- make -C tests test-zstd-nolegacy && make clean - #- pyenv global 3.4.4; make -C tests versionsTest && make clean - #- make zlibwrapper && make clean - #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean - #- make uasan && make clean - #- make asan32 && make clean - #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu" - # Valgrind tests - #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean - #- make -C tests valgrindTest && make clean - # ARM, AArch64, PowerPC, PowerPC64 tests - #- make ppctest && make clean - #- make ppc64test && make clean - #- make armtest && make clean - #- make aarch64test && make clean diff --git a/.circleci/images/primary/Dockerfile b/.circleci/images/primary/Dockerfile deleted file mode 100644 index dd800415252..00000000000 --- a/.circleci/images/primary/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM circleci/buildpack-deps:bionic - -RUN sudo dpkg --add-architecture i386 -RUN sudo apt-get -y -qq update -RUN sudo apt-get -y install \ - gcc-multilib-powerpc-linux-gnu gcc-arm-linux-gnueabi \ - libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross \ - libc6-dev-ppc64-powerpc-cross zstd gzip coreutils \ - libcurl4-openssl-dev diff --git a/.cirrus.yml b/.cirrus.yml index 27ca65e8d29..b8782ca48a6 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,10 +1,9 @@ task: - name: FreeBSD (shortest) + name: FreeBSD (make check) freebsd_instance: matrix: - image_family: freebsd-13-0 - image_family: freebsd-12-2 + image_family: freebsd-15-0-amd64-zfs install_script: pkg install -y gmake coreutils script: | MOREFLAGS="-Werror" gmake -j all - gmake shortest + gmake check diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..8ac6b8c4984 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/android-ndk-build.yml b/.github/workflows/android-ndk-build.yml new file mode 100644 index 00000000000..bd4aa6fde7f --- /dev/null +++ b/.github/workflows/android-ndk-build.yml @@ -0,0 +1,50 @@ +name: Android NDK Build + +on: + pull_request: + branches: [ dev, release, actionsTest ] + push: + branches: [ actionsTest, '*ndk*' ] + +permissions: read-all + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + + - name: Set up JDK 17 + uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0 + with: + java-version: '17' + distribution: 'temurin' + + - name: Setup Android SDK + uses: android-actions/setup-android@651bceb6f9ca583f16b8d75b62c36ded2ae6fc9c # v4.0.0 + + - name: Install Android NDK + run: | + sdkmanager --install "ndk;27.0.12077973" + echo "ANDROID_NDK_HOME=$ANDROID_SDK_ROOT/ndk/27.0.12077973" >> $GITHUB_ENV + + - name: Build with NDK + run: | + export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH + make CC=aarch64-linux-android21-clang \ + AR=llvm-ar \ + RANLIB=llvm-ranlib \ + STRIP=llvm-strip + + - name: Build with CMake and NDK + run: | + mkdir -p build-android + cd build-android + cmake --version + cmake ../build/cmake \ + -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-21 \ + -DCMAKE_BUILD_TYPE=Release + cmake --build . --parallel diff --git a/.github/workflows/cmake-tests.yml b/.github/workflows/cmake-tests.yml new file mode 100644 index 00000000000..47817a65911 --- /dev/null +++ b/.github/workflows/cmake-tests.yml @@ -0,0 +1,168 @@ +name: cmake-tests +# CMake-specific build and test workflows +# This workflow validates zstd builds across different CMake configurations, +# platforms, and edge cases to ensure broad compatibility. + +concurrency: + group: cmake-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + branches: [ dev, release, actionsTest ] + +permissions: read-all + +env: + # Centralized test timeouts for consistency + QUICK_TEST_TIME: "30s" + STANDARD_TEST_TIME: "1mn" + # Common CMake flags + COMMON_CMAKE_FLAGS: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + +jobs: + # Basic cmake build using the root CMakeLists.txt + # Provides a lightweight sanity check that the top-level project config builds + # with the default Unix Makefiles generator driven purely through cmake commands + cmake-root-basic: + name: "CMake Root Build" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Configure (Root) + run: | + cmake -S . -B cmake-build -DCMAKE_BUILD_TYPE=Release ${{ env.COMMON_CMAKE_FLAGS }} + - name: Build (Root) + run: | + cmake --build cmake-build --config Release + + # Ubuntu-based cmake build using make wrapper + # This test uses the make-driven cmake build to ensure compatibility + # with the existing build system integration + cmake-ubuntu-basic: + name: "CMake build using make wrapper" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Install dependencies + run: | + sudo apt install liblzma-dev # Required for compression algorithms + - name: CMake build and test via make + run: | + # Use make wrapper for cmake build with quick test timeouts + FUZZERTEST=-T${{ env.STANDARD_TEST_TIME }} ZSTREAM_TESTTIME=-T${{ env.STANDARD_TEST_TIME }} make cmakebuild V=1 + + # Cross-platform cmake build with edge case: source paths containing spaces + # This test ensures cmake handles filesystem paths with spaces correctly + # across different operating systems and build generators + cmake-cross-platform-spaces: + name: "CMake Cross-Platform (Spaces in Path)" + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + generator: "Unix Makefiles" + name: "Linux" + - os: windows-latest + generator: "NMake Makefiles" + name: "Windows NMake" + - os: macos-latest + generator: "Unix Makefiles" + name: "macOS" + env: + SRC_DIR: "source directory with spaces" + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + with: + path: "${{ env.SRC_DIR }}" + - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1.13.0 + if: ${{ matrix.generator == 'NMake Makefiles' }} + - name: "CMake build and install (${{ matrix.name }})" + run: | + # Test Release build with installation to verify packaging + cmake -S "${{ env.SRC_DIR }}/build/cmake" -B build -DBUILD_TESTING=ON -G "${{ matrix.generator }}" -DCMAKE_BUILD_TYPE=Release --install-prefix "${{ runner.temp }}/install" + cmake --build build --config Release + cmake --install build --config Release + + # Windows-specific cmake testing with Visual Studio 2022 + # Tests multiple generators and toolchains to ensure broad Windows compatibility + # including MSVC (x64, Win32, ARM64), MinGW, and Clang-CL with various architectures and optimizations + cmake-windows-comprehensive: + name: "CMake Windows VS2022 (${{ matrix.name }})" + runs-on: ${{ matrix.runner }} + strategy: + fail-fast: false + matrix: + include: + - generator: "Visual Studio 17 2022" + flags: "-A x64" + name: "MSVC x64" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-A Win32" + name: "MSVC Win32" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-A x64" + name: "MSVC x64 (No ZSTD_BUILD_TESTS)" + runner: "windows-2022" + # Intentionally omit ZSTD_BUILD_TESTS to reproduce the CXX language configuration bug + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" + - generator: "Visual Studio 17 2022" + flags: "-A ARM64" + name: "MSVC ARM64" + runner: "windows-11-arm" # githuh runner for WOA instance + - generator: "MinGW Makefiles" + flags: "" + name: "MinGW" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-T ClangCL" + name: "Clang-CL" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + - generator: "Visual Studio 17 2022" + flags: "-T ClangCL -A x64 -DCMAKE_C_FLAGS=/arch:AVX2" + name: "Clang-CL AVX2" + runner: "windows-2022" + cmake_extra_flags: "-DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DZSTD_BUILD_TESTS=ON" + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@30375c66a4eea26614e0d39710365f22f8b0af57 # tag=v3.0.0 + - name: "Configure CMake (${{ matrix.name }})" + run: | + cd build\cmake + mkdir build + cd build + cmake.exe -G "${{matrix.generator}}" ${{matrix.flags}} -DCMAKE_BUILD_TYPE=Debug ${{ matrix.cmake_extra_flags }} -DZSTD_ZSTREAM_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FUZZER_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FULLBENCH_FLAGS=-i0 .. + - name: "Build (${{ matrix.name }})" + run: | + cd build\cmake\build + cmake.exe --build . + - name: "Test (${{ matrix.name }})" + run: | + cd build\cmake\build + ctest.exe -V -C Debug + + # macOS ARM64 (Apple Silicon) specific cmake testing + # Validates zstd builds and runs correctly on Apple Silicon architecture + # Uses native ARM64 hardware for optimal performance and compatibility testing + cmake-macos-arm64: + name: "CMake macOS ARM64 (Apple Silicon)" + runs-on: macos-14 # ARM64 runner + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: "CMake build and test (ARM64)" + run: | + # Configure and build with ARM64-specific optimizations + cd build/cmake + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release ${{ env.COMMON_CMAKE_FLAGS }} -DZSTD_ZSTREAM_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FUZZER_FLAGS=-T${{ env.QUICK_TEST_TIME }} -DZSTD_FULLBENCH_FLAGS=-i1 .. + make -j$(sysctl -n hw.ncpu) + ctest -V diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml new file mode 100644 index 00000000000..af3028e3cd1 --- /dev/null +++ b/.github/workflows/commit.yml @@ -0,0 +1,104 @@ +name: facebook/zstd/commit +on: + push: + branches: + - dev + pull_request: + branches: + - dev + +permissions: read-all + +jobs: + short-tests-0: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + steps: + - uses: actions/checkout@v6.0.2 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install libcurl4-gnutls-dev + - name: Test + run: | + ./tests/test-license.py + cc -v + CFLAGS="-O0 -Werror -pedantic" make allmost; make clean + make c99build; make clean + make c11build; make clean + make -j regressiontest; make clean + make check; make clean + make cxxtest; make clean + + short-tests-1: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + steps: + - uses: actions/checkout@v6.0.2 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi gcc-aarch64-linux-gnu libc6-dev-ppc64-powerpc-cross libcurl4-gnutls-dev lib64gcc-13-dev-powerpc-cross + - name: gnu90 build + run: make gnu90build && make clean + - name: gnu99 build + run: make gnu99build && make clean + - name: ppc64 build + run: make ppc64build V=1 && make clean + - name: ppc build + run: make ppcbuild V=1 && make clean + - name: arm build + run: make armbuild V=1 && make clean + - name: aarch64 build + run: make aarch64build V=1 && make clean + - name: test-legacy + run: make -C tests test-legacy V=1 && make clean + - name: test-longmatch + run: make -C tests test-longmatch V=1 && make clean + - name: libzstd-nomt build + run: make -C lib libzstd-nomt V=1 && make clean + + regression-test: + runs-on: ubuntu-latest + services: + docker: + image: fbopensource/zstd-circleci-primary:0.0.1 + options: --entrypoint /bin/bash + env: + CIRCLE_ARTIFACTS: "/tmp/circleci-artifacts" + steps: + - uses: actions/checkout@v6.0.2 + - name: restore_cache + uses: actions/cache@v5 + with: + key: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 + path: tests/regression/cache + restore-keys: regression-cache-{{ checksum "tests/regression/data.c" }}-v0 + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install libcurl4-gnutls-dev + - name: Regression Test + run: | + make -C programs zstd + make -C tests/regression test + mkdir -p $CIRCLE_ARTIFACTS + ./tests/regression/test \ + --cache tests/regression/cache \ + --output $CIRCLE_ARTIFACTS/results.csv \ + --zstd programs/zstd + echo "NOTE: The new results.csv is uploaded as an artifact to this job" + echo " If this fails, go to the Artifacts pane in CircleCI, " + echo " download /tmp/circleci-artifacts/results.csv, and if they " + echo " are still good, copy it into the repo and commit it." + echo "> diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv" + diff tests/regression/results.csv $CIRCLE_ARTIFACTS/results.csv + - uses: actions/upload-artifact@v7.0.0 + with: + path: "/tmp/circleci-artifacts" diff --git a/.github/workflows/dev-long-tests.yml b/.github/workflows/dev-long-tests.yml index aadc0ab5499..447a865c573 100644 --- a/.github/workflows/dev-long-tests.yml +++ b/.github/workflows/dev-long-tests.yml @@ -1,5 +1,5 @@ name: dev-long-tests -# Tests longer than 10mn +# Tests generally longer than 10mn concurrency: group: long-${{ github.ref }} @@ -9,69 +9,113 @@ on: pull_request: branches: [ dev, release, actionsTest ] +permissions: read-all + jobs: + # lasts ~7mn make-all: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: make all run: make all - # lasts ~24mn + # lasts ~19mn make-test: runs-on: ubuntu-latest env: DEVNULLRIGHTS: 1 READFROMBLOCKDEVICE: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: make test - run: make test + run: | + make test + make -j zstd + ./tests/test_process_substitution.bash ./zstd - # lasts ~26mn - make-test-osx: + # lasts ~16mn + make-test-macos: runs-on: macos-latest steps: - - uses: actions/checkout@v3 - - name: OS-X test - run: make test # make -c lib all doesn't work because of the fact that it's not a tty + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: make test on macos + run: make test + + # lasts ~10mn + make-test-32bit: + runs-on: ubuntu-latest + env: + DEVNULLRIGHTS: 1 + READFROMBLOCKDEVICE: 1 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: make test # note: `make -j test success` seems to require a clean state + run: | + sudo apt-get -qqq update + make libc6install + make clean + CFLAGS="-m32 -O2" make -j test V=1 + + # lasts ~7mn + test-largeDictionary: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: largeDictionary + run: | + CFLAGS="-Werror -O3" make -j -C tests test-largeDictionary + # lasts ~9mn no-intrinsics-fuzztest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: no intrinsics fuzztest run: MOREFLAGS="-DZSTD_NO_INTRINSICS" make -C tests fuzztest + # lasts ~8mn tsan-zstreamtest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: thread sanitizer zstreamtest run: CC=clang ZSTREAM_TESTTIME=-T3mn make tsan-test-zstream - ubsan-zstreamtest: + uasan-zstreamtest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - name: undefined behavior sanitizer zstreamtest + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: ub + address sanitizer on zstreamtest run: CC=clang make uasan-test-zstream - # lasts ~15mn + # lasts ~11mn tsan-fuzztest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: thread sanitizer fuzztest run: CC=clang make tsan-fuzztest - # lasts ~23mn + big-tests-zstreamtest32: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: zstream tests in 32bit mode, with big tests + run: | + sudo apt-get -qqq update + make libc6install + CC=clang make -C tests test-zstream32 FUZZER_FLAGS="--big-tests" + + # lasts ~13mn gcc-8-asan-ubsan-testzstd: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: gcc-8 + ASan + UBSan + Test Zstd + # See https://askubuntu.com/a/1428822 run: | + echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu focal main universe" | sudo tee -a /etc/apt/sources.list sudo apt-get -qqq update make gcc8install CC=gcc-8 make -j uasan-test-zstd cross.ini < + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=${{matrix.toolset}} + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} /warnaserror + - name: Build ${{matrix.name}} working-directory: ${{env.GITHUB_WORKSPACE}} # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference + if: ${{ matrix.arch != '' }} run: > - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v142 - /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} - -# TODO: fix as part of https://github.com/facebook/zstd/issues/3064 -# visual-2015: -# # only GH actions windows-2016 contains VS 2015 -# runs-on: windows-2016 -# strategy: -# matrix: -# platform: [x64, Win32] -# configuration: [Debug, Release] -# steps: -# - uses: actions/checkout@v3 -# - name: Add MSBuild to PATH -# uses: microsoft/setup-msbuild@v1.0.2 -# - name: Build -# working-directory: ${{env.GITHUB_WORKSPACE}} -# run: > -# msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 -# /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=${{matrix.toolset}} + /t:Clean,Build /p:Platform=${{matrix.platform}} /p:Configuration=${{matrix.configuration}} /warnaserror + /p:InstructionSet=${{matrix.arch}} + + # This tests that we don't accidentally grow the size too much. + # If the size grows intentionally, you can raise these numbers. + # But we do need to think about binary size, since it is a concern. + libzstd-size: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: libzstd size test + run: | + make clean && make -j -C lib libzstd && ./tests/check_size.py lib/libzstd.so 1100000 + make clean && make -j -C lib libzstd ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 400000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 && ./tests/check_size.py lib/libzstd.so 300000 + make clean && make -j -C lib libzstd ZSTD_LIB_MINIFY=1 ZSTD_LIB_COMPRESSION=0 ZSTD_LIB_DICTBUILDER=0 && ./tests/check_size.py lib/libzstd.so 80000 minimal-decompressor-macros: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: minimal decompressor macros run: | make clean && make -j all ZSTD_LIB_MINIFY=1 MOREFLAGS="-Werror" @@ -246,11 +366,13 @@ jobs: make clean && make check MOREFLAGS="-Werror -DHUF_FORCE_DECOMPRESS_X2 -DZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG" make clean && make -j all MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" make clean && make check MOREFLAGS="-Werror -DZSTD_NO_INLINE -DZSTD_STRIP_ERROR_STRINGS" + make clean && make check ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1 MOREFLAGS="-Werror" + make clean && make check ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP=1 MOREFLAGS="-Werror" dynamic-bmi2: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: dynamic bmi2 tests run: | make clean && make -j check MOREFLAGS="-O0 -Werror -mbmi2" @@ -262,33 +384,34 @@ jobs: test-variants: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: make all variants & validate run: | make -j -C programs allVariants MOREFLAGS=-O0 ./tests/test-variants.sh - qemu-consistency: name: QEMU ${{ matrix.name }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 strategy: fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. matrix: include: [ { name: ARM, xcc_pkg: gcc-arm-linux-gnueabi, xcc: arm-linux-gnueabi-gcc, xemu_pkg: qemu-system-arm, xemu: qemu-arm-static }, - { name: ARM64, xcc_pkg: gcc-aarch64-linux-gnu, xcc: aarch64-linux-gnu-gcc, xemu_pkg: qemu-system-arm, xemu: qemu-aarch64-static }, + { name: ARM64, xcc_pkg: gcc-aarch64-linux-gnu, xcc: aarch64-linux-gnu-gcc, xemu_pkg: qemu-system-aarch64,xemu: qemu-aarch64-static }, { name: PPC, xcc_pkg: gcc-powerpc-linux-gnu, xcc: powerpc-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc-static }, { name: PPC64LE, xcc_pkg: gcc-powerpc64le-linux-gnu, xcc: powerpc64le-linux-gnu-gcc, xemu_pkg: qemu-system-ppc, xemu: qemu-ppc64le-static }, { name: S390X, xcc_pkg: gcc-s390x-linux-gnu, xcc: s390x-linux-gnu-gcc, xemu_pkg: qemu-system-s390x, xemu: qemu-s390x-static }, { name: MIPS, xcc_pkg: gcc-mips-linux-gnu, xcc: mips-linux-gnu-gcc, xemu_pkg: qemu-system-mips, xemu: qemu-mips-static }, + { name: RISC-V, xcc_pkg: gcc-14-riscv64-linux-gnu, xcc: riscv64-linux-gnu-gcc-14, xemu_pkg: qemu-system-riscv64,xemu: qemu-riscv64-static }, { name: M68K, xcc_pkg: gcc-m68k-linux-gnu, xcc: m68k-linux-gnu-gcc, xemu_pkg: qemu-system-m68k, xemu: qemu-m68k-static }, + { name: SPARC, xcc_pkg: gcc-sparc64-linux-gnu, xcc: sparc64-linux-gnu-gcc, xemu_pkg: qemu-system-sparc, xemu: qemu-sparc64-static }, ] env: # Set environment variables XCC: ${{ matrix.xcc }} XEMU: ${{ matrix.xemu }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: apt update & install run: | sudo apt-get update @@ -308,7 +431,15 @@ jobs: - name: ARM64 if: ${{ matrix.name == 'ARM64' }} run: | - LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + make clean + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j check + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j -C tests test-cli-tests + CFLAGS="-O3 -march=armv8.2-a+sve2" LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j check + CFLAGS="-O3 -march=armv8.2-a+sve2" LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make -j -C tests test-cli-tests +# This test is only compatible with standard libraries that support BTI (Branch Target Identification). +# Unfortunately, the standard library provided on Ubuntu 24.04 does not have this feature enabled. +# make clean +# LDFLAGS="-static -z force-bti" MOREFLAGS="-mbranch-protection=standard" CC=$XCC QEMU_SYS=$XEMU make check V=1 - name: PPC if: ${{ matrix.name == 'PPC' }} run: | @@ -325,58 +456,75 @@ jobs: if: ${{ matrix.name == 'MIPS' }} run: | LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: RISC-V + if: ${{ matrix.name == 'RISC-V' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=128" make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=256" make clean check + CFLAGS="-march=rv64gcv -O3" LDFLAGS="-static -DMEM_FORCE_MEMORY_ACCESS=0" CC=$XCC QEMU_SYS="$XEMU -cpu rv64,v=true,vlen=512" make clean check - name: M68K if: ${{ matrix.name == 'M68K' }} run: | LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check + - name: SPARC + if: ${{ matrix.name == 'SPARC' }} + run: | + LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check mingw-short-test: - runs-on: windows-2019 + runs-on: windows-latest strategy: - fail-fast: false + fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. matrix: include: [ - { compiler: gcc, platform: x64, script: "CFLAGS=-Werror make -j allzstd DEBUGLEVEL=2"}, - { compiler: gcc, platform: x86, script: "CFLAGS=-Werror make -j allzstd"}, - { compiler: clang, platform: x64, script: "CFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make -j allzstd V=1"}, + { compiler: gcc, msystem: MINGW32, cflags: "-Werror"}, + { compiler: gcc, msystem: MINGW64, cflags: "-Werror"}, + { compiler: clang, msystem: MINGW64, cflags: "--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion -Wno-unused-command-line-argument"}, ] + defaults: + run: + shell: msys2 {0} steps: - - uses: actions/checkout@v3 - - name: Mingw short test - run: | - ECHO "Building ${{matrix.compiler}} ${{matrix.platform}}" - $env:PATH_ORIGINAL = $env:PATH - $env:PATH_MINGW32 = "C:\msys64\mingw32\bin" - $env:PATH_MINGW64 = "C:\msys64\mingw64\bin" - COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw32\bin\make.exe - COPY C:\msys64\usr\bin\make.exe C:\msys64\mingw64\bin\make.exe - IF ("${{matrix.platform}}" -eq "x64") - { - $env:PATH = $env:PATH_MINGW64 + ";" + $env:PATH_ORIGINAL - } - ELSEIF ("${{matrix.platform}}" -eq "x86") - { - $env:PATH = $env:PATH_MINGW32 + ";" + $env:PATH_ORIGINAL - } + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - uses: msys2/setup-msys2@cafece8e6baf9247cf9b1bf95097b0b983cc558d # tag=v2.31.0 + with: + msystem: ${{ matrix.msystem }} + install: make diffutils + update: true + # Based on https://ariya.io/2020/07/on-github-actions-with-msys2 + - name: install mingw gcc i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-i686-gcc + - name: install mingw gcc x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'gcc') }} + run: pacman --noconfirm -S mingw-w64-x86_64-gcc + - name: install mingw clang i686 + if: ${{ (matrix.msystem == 'MINGW32') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-i686-clang + - name: install mingw clang x86_64 + if: ${{ (matrix.msystem == 'MINGW64') && (matrix.compiler == 'clang') }} + run: pacman --noconfirm -S mingw-w64-x86_64-clang + - name: run mingw tests + run: | make -v - sh -c "${{matrix.compiler}} -v" - $env:CC = "${{matrix.compiler}}" - sh -c "${{matrix.script}}" - ECHO "Testing ${{matrix.compiler}} ${{matrix.platform}}" + export CC=${{ matrix.compiler }} + $CC --version + CFLAGS="${{ matrix.cflags }}" make -j allzstd + echo "Testing $CC ${{ matrix.msystem }}" make clean - make check - + MSYS="" make check visual-runtime-tests: - runs-on: windows-2019 + runs-on: windows-latest strategy: matrix: platform: [x64, Win32] configuration: [Release] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: Add MSBuild to PATH - uses: microsoft/setup-msbuild@v1.0.2 + uses: microsoft/setup-msbuild@30375c66a4eea26614e0d39710365f22f8b0af57 # tag=v3.0.0 - name: Build and run tests working-directory: ${{env.GITHUB_WORKSPACE}} env: @@ -388,34 +536,46 @@ jobs: COPY build\VS2010\bin\${{matrix.platform}}_${{matrix.configuration}}\*.exe tests\ CD tests sh -e playTests.sh - DIR .\fuzzer.exe -T2m - intel-cet-compatibility: - runs-on: ubuntu-latest + # Following instructions at: https://github.com/marketplace/actions/install-cygwin-action + cygwin-tests: + runs-on: windows-latest steps: - - uses: actions/checkout@v3 - - name: Build Zstd - run: | - make -j zstd V=1 - readelf -n zstd - - name: Get Intel SDE - run: | - curl -LO https://downloadmirror.intel.com/684899/sde-external-9.0.0-2021-11-07-lin.tar.xz - tar xJvf sde-external-9.0.0-2021-11-07-lin.tar.xz - - name: Configure Permissions - run: | - echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope - - name: Run Under SDE - run: | - sde-external-9.0.0-2021-11-07-lin/sde -cet -cet-raise 0 -cet-endbr-exe -cet-stderr -cet-abort -- ./zstd -b3 + - run: git config --global core.autocrlf input + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - uses: cygwin/cygwin-install-action@711d29f3da23c9f4a1798e369a6f01198c13b11a # tag=v6 + with: + platform: x86_64 + packages: >- + autoconf, + automake, + gcc-g++, + make, + mingw64-x86_64-gcc-g++, + patch, + perl + - name: cygwin tests + shell: C:\cygwin\bin\bash.exe --noprofile --norc -eo pipefail '{0}' + run: >- + export PATH=/usr/bin:$(cygpath ${SYSTEMROOT})/system32 && + export CFLAGS="-Werror -O1" && + ls && + make -j allzstd && + make -C tests fuzzer && + ./tests/fuzzer.exe -v -T1m + - name: cygwin install test + shell: C:\cygwin\bin\bash.exe --noprofile --norc -eo pipefail '{0}' + run: >- + make -j && + make install pkg-config: runs-on: ubuntu-latest container: image: debian:testing steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: Install dependencies run: | apt -y update @@ -427,39 +587,94 @@ jobs: cc -Wall -Wextra -Wpedantic -Werror -o simple examples/simple_compression.c $(pkg-config --cflags --libs libzstd) ./simple LICENSE + versions-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Versions Compatibility Test + run: | + make -C tests versionsTest + + clangbuild: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: make clangbuild + run: | + make clangbuild -# This test currently fails on Github Actions specifically. -# Possible reason : TTY emulation. -# Note that the same test works fine locally and on travisCI. -# This will have to be fixed before transferring the test to GA. -# versions-compatibility: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v3 -# - name: Versions Compatibility Test -# run: | -# make -C tests versionsTest - - -# For reference : icc tests -# icc tests are currently failing on Github Actions, likely to issues during installation stage -# To be fixed later -# -# icc: -# name: icc-check -# runs-on: ubuntu-latest -# steps: -# - name: install icc -# run: | -# export DEBIAN_FRONTEND=noninteractive -# sudo apt-get -qqq update -# sudo apt-get install -y wget build-essential pkg-config cmake ca-certificates gnupg -# sudo wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB -# sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB -# sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" -# sudo apt-get update -# sudo apt-get install -y intel-basekit intel-hpckit -# - uses: actions/checkout@v3 -# - name: make check -# run: | -# make CC=/opt/intel/oneapi/compiler/latest/linux/bin/intel64/icc check + gcc-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Build PGO Zstd with GCC + env: + CC: gcc + run: | + make -C programs zstd-pgo + ./programs/zstd -b + + clang-pgo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Build PGO Zstd with Clang + env: + CC: clang + run: | + sudo apt install -y llvm + llvm-profdata --version + make -C programs zstd-pgo + ./programs/zstd -b + + musl-build: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Install musl-tools + run: | + sudo apt install -y musl-tools + - name: Compile with musl-gcc and test-zstd + run: | + CC=musl-gcc CFLAGS="-Werror -O3" CPPFLAGS=-DZDICT_QSORT=ZDICT_QSORT_C90 make -j -C tests test-zstd V=1 + + intel-cet-compatibility: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: Build Zstd + run: | + make -j zstd V=1 + readelf -n zstd + - name: Get Intel SDE + run: | + curl -LO https://downloadmirror.intel.com/813591/sde-external-9.33.0-2024-01-07-lin.tar.xz + tar xJvf sde-external-9.33.0-2024-01-07-lin.tar.xz + - name: Configure Permissions + run: | + echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope + - name: Run Under SDE + run: | + sde-external-9.33.0-2024-01-07-lin/sde -cet -cet-raise 0 -cet-endbr-exe -cet-stderr -cet-abort -- ./zstd -b3 + + icx: + # install instructions: https://www.intel.com/content/www/us/en/docs/oneapi/installation-guide-linux/2025-0/apt-005.html + name: icx-check + runs-on: ubuntu-latest + steps: + - name: install icx + run: | + # download the key to system keyring + wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ + | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null + + # add signed entry to apt sources and configure the APT client to use Intel repository: + echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list + sudo apt-get update + sudo apt-get install -y intel-basekit intel-hpckit + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + - name: make check + run: | + source /opt/intel/oneapi/setvars.sh + make CC=icx check + make CC=icx -C tests test-cli-tests diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 00000000000..fe296208c3b --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,38 @@ +name: facebook/zstd/nightly +on: + schedule: + - cron: '0 0 * * *' + push: + branches: + - release + - dev + - '*nightly*' +permissions: read-all +jobs: + regression-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6.0.2 + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y libcurl4-openssl-dev + - name: Regression Test + run: | + make -C programs zstd + make -C tests/regression test + +# Longer tests + #- make -C tests test-zstd-nolegacy && make clean + #- pyenv global 3.4.4; make -C tests versionsTest && make clean + #- make zlibwrapper && make clean + #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean + #- make uasan && make clean + #- make asan32 && make clean + #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu" +# Valgrind tests + #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean + #- make -C tests valgrindTest && make clean +# ARM, AArch64, PowerPC, PowerPC64 tests + #- make ppctest && make clean + #- make ppc64test && make clean + #- make armtest && make clean + #- make aarch64test && make clean diff --git a/.github/workflows/publish-release-artifacts.yml b/.github/workflows/publish-release-artifacts.yml index b5a3ac689bf..3ce3c0e841e 100644 --- a/.github/workflows/publish-release-artifacts.yml +++ b/.github/workflows/publish-release-artifacts.yml @@ -5,21 +5,19 @@ on: types: - published -permissions: - contents: read +permissions: read-all jobs: publish-release-artifacts: permissions: - contents: read # to fetch code (actions/checkout) - actions: write # to attach binaries to release artifacts (skx/github-action-publish-binaries) + contents: write # to fetch code and upload artifacts runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - name: Archive env: @@ -68,7 +66,7 @@ jobs: fi - name: Publish - uses: skx/github-action-publish-binaries@release-1.3 + uses: skx/github-action-publish-binaries@b9ca5643b2f1d7371a6cba7f35333f1461bbc703 # tag=release-2.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/release_check.yml b/.github/workflows/release_check.yml new file mode 100644 index 00000000000..674da6b9b8b --- /dev/null +++ b/.github/workflows/release_check.yml @@ -0,0 +1,64 @@ +name: release_checks + +on: + push: + branches: + - release + pull_request: + branches: + - release + +permissions: read-all + +jobs: + verify-manual: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v6.0.2 + + - name: Save current manual + run: mv doc/zstd_manual.html doc/zstd_manual_saved.html + + - name: Generate new manual + run: make manual + + - name: Compare manuals + run: | + if ! cmp -s doc/zstd_manual.html doc/zstd_manual_saved.html; then + echo "The API manual was not updated before release !" + exit 1 + fi + + verify-man-pages: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v6.0.2 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y ruby ruby-dev + sudo gem install ronn + + - name: Display ronn version + run: ronn --version + + - name: Save current man pages + run: | + mv programs/zstd.1 programs/zstd.1.saved + mv programs/zstdgrep.1 programs/zstdgrep.1.saved + mv programs/zstdless.1 programs/zstdless.1.saved + + - name: Generate new manual pages + run: make -C programs man + + - name: Compare man pages + run: | + for file in zstd.1 zstdgrep.1 zstdless.1; do + if ! cmp -s programs/$file programs/$file.saved; then + echo "The man page $file should have been updated." + exit 1 + fi + done diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 00000000000..1e372f63a95 --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,64 @@ +name: Scorecards supply-chain security +on: + # Only the default branch is supported. + branch_protection_rule: + schedule: + - cron: '22 21 * * 2' + push: + # TODO: Add release branch when supported? + branches: [ "dev" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecards analysis + if: github.repository == 'facebook/zstd' + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Used to receive a badge. + id-token: write + # Needs for private repositories. + contents: read + actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # tag=v2.4.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) Read-only PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_READ_TOKEN }} + + # Publish the results for public repositories to enable scorecard badges. For more details, see + # https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories, `publish_results` will automatically be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # tag=v7.0.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # tag=v4.35.1 + with: + sarif_file: results.sarif diff --git a/.github/workflows/windows-artifacts.yml b/.github/workflows/windows-artifacts.yml new file mode 100644 index 00000000000..cb659e5fbe7 --- /dev/null +++ b/.github/workflows/windows-artifacts.yml @@ -0,0 +1,119 @@ +name: windows-artifacts + +on: + push: + branches: [ test_artifacts, win_artifacts, release ] + release: + types: + - published + +permissions: read-all + +jobs: + windows-artifacts: + permissions: + contents: write # to fetch code and upload artifacts + # For msys2, see https://ariya.io/2020/07/on-github-actions-with-msys2 + runs-on: ${{ matrix.shell == 'cmake' && 'windows-11-arm' || 'windows-latest' }} + strategy: + # For msys2, see https://github.com/msys2/setup-msys2 + matrix: + include: + - { msystem: mingw64, env: x86_64, ziparch: win64, shell: msys2 } + - { msystem: mingw32, env: i686, ziparch: win32, shell: msys2 } + - { msystem: null, env: arm64, ziparch: win-arm64, shell: cmake } + + defaults: + run: + shell: ${{ matrix.shell == 'cmake' && 'pwsh' || 'msys2 {0}' }} + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 + + # MSYS2 setup + - uses: msys2/setup-msys2@cafece8e6baf9247cf9b1bf95097b0b983cc558d # tag=v2.31.0 + if: matrix.shell == 'msys2' + with: + msystem: ${{ matrix.msystem }} + install: make p7zip git mingw-w64-${{matrix.env}}-gcc + update: true + + - name: display versions (MSYS2) + if: matrix.shell == 'msys2' + run: | + make -v + cc -v + + - name: display versions (CMake) + if: matrix.shell == 'cmake' + run: | + cmake --version + + # Build dependencies (MSYS2 only) + - name: Building zlib to static link + if: matrix.shell == 'msys2' + run: | + git clone --depth 1 --branch v1.3.1 https://github.com/madler/zlib + make -C zlib -f win32/Makefile.gcc libz.a + + - name: Building lz4 to static link + if: matrix.shell == 'msys2' + run: | + git clone --depth 1 --branch v1.10.0 https://github.com/lz4/lz4 + # ensure both libraries use the same version of libxxhash + cp lib/common/xxhash.* lz4/lib + CPPFLAGS=-DXXH_NAMESPACE=LZ4_ make -C lz4/lib liblz4.a V=1 + + # Build zstd + - name: Building zstd programs + if: matrix.shell == 'msys2' + run: | + CPPFLAGS="-I../zlib -I../lz4/lib" LDFLAGS=-static make -j allzstd V=1 HAVE_ZLIB=1 HAVE_LZ4=1 HAVE_LZMA=0 LDLIBS="../zlib/libz.a ../lz4/lib/liblz4.a" + + - name: Build zstd (CMake ARM64) + if: matrix.shell == 'cmake' + run: | + cd build\cmake + mkdir build + cd build + cmake.exe -G "Visual Studio 17 2022" -A ARM64 -DCMAKE_BUILD_TYPE=Release -DZSTD_BUILD_PROGRAMS=ON -DZSTD_BUILD_SHARED=ON -DZSTD_BUILD_STATIC=ON .. + cmake.exe --build . --config Release --parallel + + - name: Create artifacts (MSYS2) + if: matrix.shell == 'msys2' + run: | + ./lib/dll/example/build_package.bat || exit 1 + mv bin/ zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + + - name: Create artifacts (CMake) + if: matrix.shell == 'cmake' + run: | + .\lib\dll\example\build_package.bat + if ($LASTEXITCODE -ne 0) { exit 1 } + mv bin/ zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + + - name: Publish zstd-$VERSION-${{matrix.ziparch}}.zip for manual inspection + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # tag=v7.0.0 + with: + compression-level: 9 # maximum compression + if-no-files-found: error # defaults to `warn` + path: ${{ github.workspace }}/zstd-${{ github.ref_name }}-${{matrix.ziparch}}/ + name: zstd-${{ github.ref_name }}-${{matrix.ziparch}} + + - name: Package artifact for upload (MSYS2) + if: matrix.shell == 'msys2' + run: | + 7z a -tzip -mx9 "$(cygpath -u '${{ github.workspace }}/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip')" "$(cygpath -u '${{ github.workspace }}/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}')" + + - name: Package artifact for upload (CMake) + if: matrix.shell == 'cmake' + run: | + Compress-Archive -Path "zstd-${{ github.ref_name }}-${{ matrix.ziparch }}" -DestinationPath "zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip" -CompressionLevel Optimal + + - name: Upload release asset + if: github.event_name == 'release' + shell: pwsh + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release upload "${{ github.ref_name }}" "$env:GITHUB_WORKSPACE/zstd-${{ github.ref_name }}-${{ matrix.ziparch }}.zip" --clobber \ No newline at end of file diff --git a/.gitignore b/.gitignore index a136ea39496..03ecbde6af8 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ *.so *.so.* *.dylib +*.framework +*.xcframework # Executables /zstd @@ -20,14 +22,6 @@ zstdmt *.out *.app -# Test artefacts -tmp* -*.zst -*.zstd -dictionary. -dictionary -NUL - # Build artefacts contrib/linux-kernel/linux/ projects/ @@ -36,12 +30,27 @@ bin/ buck-out/ build-* *.gcda +cmakebuild/ +cmake-build/ + +# Test artefacts +tmp* +*.zst +*.zstd +dictionary. +dictionary +NUL +install/ + +# IDE +.clang_complete +compile_flags.txt +.clang-format # Other files .directory _codelite/ _zstdbench/ -.clang_complete *.idea *.swp .DS_Store diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b96bf8ba217..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,128 +0,0 @@ -# Travis CI is used to test platforms that github-actions currently doesn't support -# without either self-hosting or some finnicky work-around. Also, some tests -# are troublesome to migrate since GH Actions runs tests not in a tty. -language: c - -git: - depth: 1 - -branches: - only: - - dev - - release - - master - - travisTest - -addons: - apt: - update: true - -env: - global: - - FUZZERTEST=-T1mn - ZSTREAM_TESTTIME=-T1mn - DECODECORPUS_TESTTIME=-T1mn - -matrix: - fast_finish: true - include: - - name: S390X (big endian) + Fuzz test - dist: trusty - arch: s390x - script: - - FUZZER_FLAGS=--no-big-tests make -C tests fuzztest - - - name: S390X (big endian) + Fuzz test + no intrinsics - dist: trusty - arch: s390x - script: - - MOREFLAGS="-DZSTD_NO_INTRINSICS" FUZZER_FLAGS=--no-big-tests make -C tests fuzztest - - - name: arm64 # ~2.5 mn - os: linux - arch: arm64 - script: - - make check - - - name: arm64fuzz - os: linux - arch: arm64 - script: - - make -C tests fuzztest - - # TODO: migrate to GH Actions once newest clang staticanalyze warnings are fixed - - name: static analyzer scanbuild # ~8mn - dist: trusty # note : it's important to pin down a version of static analyzer, since different versions report different false positives - script: - - make staticAnalyze - - # GH actions can't run this command on OS-X, non-tty issues - - name: OS-X make all lib - os: osx - script: - - make -C lib all - - # Introduced to check compat with old toolchains, to prevent e.g. #1872 - - name: ARM Build Test (on Trusty) - dist: trusty - script: - - make arminstall - - make armbuild - - # check release number (release/new tag only) - - name: Tag-Specific Test - if: tag =~ ^v[0-9]\.[0-9] - script: - - make -C tests checkTag - - tests/checkTag "$TRAVIS_BRANCH" - - - name: PPC64LE + Fuzz test # ~13mn - arch: ppc64le - env: - - FUZZER_FLAGS=--no-big-tests - - MOREFLAGS="-static" - script: - - cat /proc/cpuinfo - - make -C tests fuzztest - - # This test currently fails on GA specifically, for no obvious reason - # (it works fine on travisCI, and on local test platforms). - - name: Versions Compatibility Test # ~6mn - script: - - make -C tests versionsTest - - # meson dedicated test - - name: Focal (Meson + clang) # ~15mn - dist: focal - language: cpp - compiler: clang - install: - - sudo apt-get install -qq liblz4-dev valgrind tree - - | - travis_retry curl -o ~/ninja.zip -L 'https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip' && - unzip ~/ninja.zip -d ~/.local/bin - - | - travis_retry curl -o ~/get-pip.py -L 'https://bootstrap.pypa.io/pip/3.6/get-pip.py' && - python3 ~/get-pip.py --user && - pip3 install --user meson - script: - - | - meson setup \ - --buildtype=debugoptimized \ - -Db_lundef=false \ - -Dauto_features=enabled \ - -Dbin_programs=true \ - -Dbin_tests=true \ - -Dbin_contrib=true \ - -Ddefault_library=both \ - build/meson builddir - - pushd builddir - - ninja - - meson test --verbose --no-rebuild - - DESTDIR=./staging ninja install - - tree ./staging - after_failure: - - cat "$TRAVIS_BUILD_DIR"/builddir/meson-logs/testlog.txt - - allow_failures: - - env: ALLOW_FAILURES=true diff --git a/CHANGELOG b/CHANGELOG index e8c40f302e3..eb46d711872 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,37 +1,148 @@ -v1.5.3 (June, 2022) -perf: 5-30% faster dictionary compression at levels 1-4 (#3086, #3114, #3152, @embg) -perf: 5-10% faster streaming compression at levels 1-2 (#3114, @embg) -perf: Remove branch in ZSTD_fast_noDict (#3129, @felixhandte) -perf: Add option to prefetch CDict tables (#3177, @embg) -perf: Minor compression ratio improvement (#2983, @Cyan4973) -perf: Minor speed improvement for Huffman decoding (#3013, @WojciechMula) -perf: Enable STATIC_BMI2 for gcc/clang (#3080, @TocarIP) -perf: Optimize ZSTD_row_getMatchMask for ARM levels 8-10 (#3139, #3160, @danlark1) -perf: aarch64 performance improvements (#3145, #3141, @JunHe77) -perf: Lazy parameters adaptation (#2974, @Cyan4973) -cli: Async write for decompression (#2975, @yoniko) -cli: Use buffered output (#2985, @yoniko) -cli: Change zstdless behavior to align with zless (#2909, @binhdvo) -cli: AsyncIO compression (#3021, #3022, @yoniko) -cli: Print zlib/lz4/lzma library versions in verbose version output (#3030, @terrelln) -cli: Fix for -r on empty directory (#3027, @brailovich) -cli: Fix required decompression memory usage reported by -vv + --long (#3042, @u1f35c) +v1.6.0 (Dec 2025) +api: legacy format support is now disabled by default +build: `ZSTD_LEGACY_SUPPORT` defaults to `0` in Makefile and CMake + +V1.5.7 (Feb 2025) +fix: compression bug in 32-bit mode associated with long-lasting sessions +api: new method `ZSTD_compressSequencesAndLiterals()` (#4217, #4232) +api: `ZSTD_getFrameHeader()` works on skippable frames (#4228) +perf: substantial compression speed improvements (up to +30%) on small data, by @TocarIP (#4144) and @cyan4973 (#4165) +perf: improved compression speed (~+5%) for dictionary compression at low levels (#4170) +perf: much faster speed for `--patch-from` at high compression levels (#4276) +perf: higher `--patch-from` compression ratios, notably at high levels (#4288) +perf: better speed for binaries on Windows (@pps83) and when compiled with Visual Studio (@MessyHack) +perf: slight compression ratio improvement thanks to better block boundaries (#4136, #4176, #4178) +perf: slight compression ratio improvement for `dfast`, aka levels 3 and 4 (#4171) +perf: runtime bmi2 detection enabled on x86 32-bit mode (#4251) +cli: multi-threading as default CLI setting, by @daniellerozenblit +cli: new `--max` command (#4290) +build: improve `msbuild` version autodetection, support VS2022, by @ManuelBlanc +build: fix `meson` build by @artem and @Victor-C-Zhang, and on Windows by @bgilbert +build: compatibility with Apple Framework, by @Treata11 +build: improve icc/icx compatibility, by @josepho0918 and @luau-project +build: improve compatibility with Android NDK, by Adenilson Cavalcanti +portability: linux kernel branch, with improved support for Sequence producers (@embg, @gcabiddu, @cyan4973) +portability: improved qnx compatibility, suggested by @rainbowball +portability: improved install script for FreeBSD, by @sunpoet +portability: fixed test suite compatibility with gnu hurd, by @diegonc +doc: clarify specification, by @elasota +misc: improved tests/decodecorpus validation tool (#4102), by antmicro + +V1.5.6 (Mar 2024) +api: Promote `ZSTD_c_targetCBlockSize` to Stable API by @felixhandte +api: new `ZSTD_d_maxBlockSize` experimental parameter, to reduce streaming decompression memory, by @terrelln +perf: improve performance of param `ZSTD_c_targetCBlockSize`, by @Cyan4973 +perf: improved compression of arrays of integers at high compression, by @Cyan4973 +lib: reduce binary size with selective build-time exclusion, by @felixhandte +lib: improved huffman speed on small data and linux kernel, by @terrelln +lib: accept dictionaries with partial literal tables, by @terrelln +lib: fix CCtx size estimation with external sequence producer, by @embg +lib: fix corner case decoder behaviors, by @Cyan4973 and @aimuz +lib: fix zdict prototype mismatch in static_only mode, by @ldv-alt +lib: fix several bugs in magicless-format decoding, by @embg +cli: add common compressed file types to `--exclude-compressed`` by @daniellerozenblit +cli: fix mixing `-c` and `-o` commands with `--rm`, by @Cyan4973 +cli: fix erroneous exclusion of hidden files with `--output-dir-mirror` by @felixhandte +cli: improved time accuracy on BSD, by @felixhandte +cli: better errors on argument parsing, by @KapJI +tests: better compatibility with older versions of `grep`, by @Cyan4973 +tests: lorem ipsum generator as default backup content, by @Cyan4973 +build: cmake improvements by @terrelln, @sighingnow, @gjasny, @JohanMabille, @Saverio976, @gruenich, @teo-tsirpanis +build: bazel support, by @jondo2010 +build: fix cross-compiling for AArch64 with lld by @jcelerier +build: fix Apple platform compatibility, by @nidhijaju +build: fix Visual 2012 and lower compatibility, by @Cyan4973 +build: improve win32 support, by @DimitriPapadopoulos +build: better C90 compliance for zlibWrapper, by @emaste +port: make: fat binaries on macos, by @mredig +port: ARM64EC compatibility for Windows, by @dunhor +port: QNX support by @klausholstjacobsen +port: MSYS2 and Cygwin makefile installation and test support, by @QBos07 +port: risc-v support validation in CI, by @Cyan4973 +port: sparc64 support validation in CI, by @Cyan4973 +port: AIX compatibility, by @likema +port: HP-UX compatibility, by @likema +doc: Improved specification accuracy, by @elasota +bug: Fix and deprecate ZSTD_generateSequences (#3981) + +v1.5.5 (Apr 2023) +fix: fix rare corruption bug affecting the high compression mode, reported by @danlark1 (#3517, @terrelln) +perf: improve mid-level compression speed (#3529, #3533, #3543, @yoniko and #3552, @terrelln) +lib: deprecated bufferless block-level API (#3534) by @terrelln +cli: mmap large dictionaries to save memory, by @daniellerozenblit +cli: improve speed of --patch-from mode (~+50%) (#3545) by @daniellerozenblit +cli: improve i/o speed (~+10%) when processing lots of small files (#3479) by @felixhandte +cli: zstd no longer crashes when requested to write into write-protected directory (#3541) by @felixhandte +cli: fix decompression into block device using -o, reported by @georgmu (#3583) +build: fix zstd CLI compiled with lzma support but not zlib support (#3494) by @Hello71 +build: fix cmake does no longer require 3.18 as minimum version (#3510) by @kou +build: fix MSVC+ClangCL linking issue (#3569) by @tru +build: fix zstd-dll, version of zstd CLI that links to the dynamic library (#3496) by @yoniko +build: fix MSVC warnings (#3495) by @embg +doc: updated zstd specification to clarify corner cases, by @Cyan4973 +doc: document how to create fat binaries for macos (#3568) by @rickmark +misc: improve seekable format ingestion speed (~+100%) for very small chunk sizes (#3544) by @Cyan4973 +misc: tests/fullbench can benchmark multiple files (#3516) by @dloidolt + +v1.5.4 (Feb 2023) +perf: +20% faster huffman decompression for targets that can't compile x64 assembly (#3449, @terrelln) +perf: up to +10% faster streaming compression at levels 1-2 (#3114, @embg) +perf: +4-13% for levels 5-12 by optimizing function generation (#3295, @terrelln) +pref: +3-11% compression speed for `arm` target (#3199, #3164, #3145, #3141, #3138, @JunHe77 and #3139, #3160, @danlark1) +perf: +5-30% faster dictionary compression at levels 1-4 (#3086, #3114, #3152, @embg) +perf: +10-20% cold dict compression speed by prefetching CDict tables (#3177, @embg) +perf: +1% faster compression by removing a branch in ZSTD_fast_noDict (#3129, @felixhandte) +perf: Small compression ratio improvements in high compression mode (#2983, #3391, @Cyan4973 and #3285, #3302, @daniellerozenblit) +perf: small speed improvement by better detecting `STATIC_BMI2` for `clang` (#3080, @TocarIP) +perf: Improved streaming performance when `ZSTD_c_stableInBuffer` is set (#2974, @Cyan4973) +cli: Asynchronous I/O for improved cli speed (#2975, #2985, #3021, #3022, @yoniko) +cli: Change `zstdless` behavior to align with `zless` (#2909, @binhdvo) +cli: Keep original file if `-c` or `--stdout` is given (#3052, @dirkmueller) +cli: Keep original files when result is concatenated into a single output with `-o` (#3450, @Cyan4973) +cli: Preserve Permissions and Ownership of regular files (#3432, @felixhandte) +cli: Print zlib/lz4/lzma library versions with `-vv` (#3030, @terrelln) +cli: Print checksum value for single frame files with `-lv` (#3332, @Cyan4973) +cli: Print `dictID` when present with `-lv` (#3184, @htnhan) +cli: when `stderr` is *not* the console, disable status updates, but preserve final summary (#3458, @Cyan4973) +cli: support `--best` and `--no-name` in `gzip` compatibility mode (#3059, @dirkmueller) +cli: support for `posix` high resolution timer `clock_gettime()`, for improved benchmark accuracy (#3423, @Cyan4973) +cli: improved help/usage (`-h`, `-H`) formatting (#3094, @dirkmueller and #3385, @jonpalmisc) +cli: Fix better handling of bogus numeric values (#3268, @ctkhanhly) +cli: Fix input consists of multiple files _and_ `stdin` (#3222, @yoniko) +cli: Fix tiny files passthrough (#3215, @cgbur) +cli: Fix for `-r` on empty directory (#3027, @brailovich) +cli: Fix empty string as argument for `--output-dir-*` (#3220, @embg) +cli: Fix decompression memory usage reported by `-vv --long` (#3042, @u1f35c, and #3232, @zengyijing) cli: Fix infinite loop when empty input is passed to trainer (#3081, @terrelln) -cli: Implement more gzip compatibility (#3059, @dirkmueller) -cli: Keep original file if -c or --stdout is given (#3052, @dirkmueller) +cli: Fix `--adapt` doesn't work when `--no-progress` is also set (#3354, @terrelln) +api: Support for Block-Level Sequence Producer (#3333, @embg) +api: Support for in-place decompression (#3432, @terrelln) +api: New `ZSTD_CCtx_setCParams()` function, set all parameters defined in a `ZSTD_compressionParameters` structure (#3403, @Cyan4973) +api: Streaming decompression detects incorrect header ID sooner (#3175, @Cyan4973) +api: Window size resizing optimization for edge case (#3345, @daniellerozenblit) +api: More accurate error codes for busy-loop scenarios (#3413, #3455, @Cyan4973) +api: Fix limit overflow in `compressBound` and `decompressBound` (#3362, #3373, Cyan4973) reported by @nigeltao +api: Deprecate several advanced experimental functions: streaming (#3408, @embg), copy (#3196, @mileshu) +bug: Fix corruption that rarely occurs in 32-bit mode with wlog=25 (#3361, @terrelln) bug: Fix for block-splitter (#3033, @Cyan4973) bug: Fixes for Sequence Compression API (#3023, #3040, @Cyan4973) bug: Fix leaking thread handles on Windows (#3147, @animalize) bug: Fix timing issues with cmake/meson builds (#3166, #3167, #3170, @Cyan4973) build: Allow user to select legacy level for cmake (#3050, @shadchin) build: Enable legacy support by default in cmake (#3079, @niamster) -build: Meson improvements (#3039, #3122, @eli-schwartz) +build: Meson build script improvements (#3039, #3120, #3122, #3327, #3357, @eli-schwartz and #3276, @neheb) build: Add aarch64 to supported architectures for zstd_trace (#3054, @ooosssososos) -doc: Split help in long and short version, cleanup formatting (#3094, @dirkmueller) -doc: Updated man page, providing more details for --train mode (#3112, @Cyan4973) +build: support AIX architecture (#3219, @qiongsiwu) +build: Fix `ZSTD_LIB_MINIFY` build macro, which now reduces static library size by half (#3366, @terrelln) +build: Fix Windows issues with Multithreading translation layer (#3364, #3380, @yoniko) and ARM64 target (#3320, @cwoffenden) +build: Fix `cmake` script (#3382, #3392, @terrelln and #3252 @Tachi107 and #3167 @Cyan4973) +doc: Updated man page, providing more details for `--train` mode (#3112, @Cyan4973) doc: Add decompressor errata document (#3092, @terrelln) misc: Enable Intel CET (#2992, #2994, @hjl-tools) -misc: Streaming decompression can detect incorrect header ID sooner (#3175, @Cyan4973) +misc: Fix `contrib/` seekable format (#3058, @yhoogstrate and #3346, @daniellerozenblit) +misc: Improve speed of the one-file library generator (#3241, @wahern and #3005, @cwoffenden) + +v1.5.3 (dev version, unpublished) v1.5.2 (Jan, 2022) perf: Regain Minimal memset()-ing During Reuse of Compression Contexts (@Cyan4973, #2969) @@ -54,7 +165,7 @@ build: support for m68k (Motorola 68000's), by @cyan4973 build: improved AIX support, by @Helflym build: improved meson unofficial build, by @eli-schwartz cli : custom memory limit when training dictionary (#2925), by @embg -cli : report advanced parameters information when compressing in very verbose mode (``-vv`), by @Svetlitski-FB +cli : report advanced parameters information when compressing in very verbose mode (`-vv`), by @Svetlitski-FB v1.5.0 (May 11, 2021) api: Various functions promoted from experimental to stable API: (#2579-2581, @senhuang42) @@ -121,7 +232,7 @@ api: Add Function to Generate Skippable Frame (#2439, @senhuang42) perf: New Algorithms for the Long Distance Matcher (#2483, @mpu) perf: Performance Improvements for Long Distance Matcher (#2464, @mpu) perf: Don't Shrink Window Log when Streaming with a Dictionary (#2451, @terrelln) -cli: Fix `--output-dir-mirror`'s Rejection of `..`-Containing Paths (#2512, @felixhandte) +cli: Fix `--output-dir-mirror` rejection of `..` -containing paths (#2512, @felixhandte) cli: Allow Input From Console When `-f`/`--force` is Passed (#2466, @felixhandte) cli: Improve Help Message (#2500, @senhuang42) tests: Remove Flaky Tests (#2455, #2486, #2445, @Cyan4973) @@ -408,7 +519,7 @@ misc: added /contrib/docker script by @gyscos v1.3.3 (Dec 21, 2017) perf: faster zstd_opt strategy (levels 16-19) -fix : bug #944 : multithreading with shared ditionary and large data, reported by @gsliepen +fix : bug #944 : multithreading with shared dictionary and large data, reported by @gsliepen cli : fix : content size written in header by default cli : fix : improved LZ4 format support, by @felixhandte cli : new : hidden command `-S`, to benchmark multiple files while generating one result per file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000000..10fef39b909 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.10) + +# Thin wrapper so `cmake -S .` behaves like `cmake -S build/cmake`. +# Policy lives in build/cmake; keep parent project language-less. +project(zstd-superbuild LANGUAGES NONE) + +if(CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR) + message(FATAL_ERROR "In-source builds are not supported. Specify -B .") +endif() + +add_subdirectory(build/cmake) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5e747ae1b9..a9ad99ebdab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,7 +60,7 @@ Our contribution process works in three main stages: * Note: run local tests to ensure that your changes didn't break existing functionality * Quick check ``` - make shortest + make check ``` * Longer check ``` @@ -171,8 +171,8 @@ who want earlier signal. | Cirrus CI | Used for testing on FreeBSD | https://github.com/marketplace/cirrus-ci/ | `.cirrus.yml` | | Circle CI | Historically was used to provide faster signal,
but we may be able to migrate these to Github Actions | https://circleci.com/docs/2.0/getting-started/#setting-up-circleci
https://youtu.be/Js3hMUsSZ2c
https://circleci.com/docs/2.0/enable-checks/ | `.circleci/config.yml` | -Note: the instructions linked above mostly cover how to set up a repository with CI from scratch. -The general idea should be the same for setting up CI on your fork of zstd, but you may have to +Note: the instructions linked above mostly cover how to set up a repository with CI from scratch. +The general idea should be the same for setting up CI on your fork of zstd, but you may have to follow slightly different steps. In particular, please ignore any instructions related to setting up config files (since zstd already has configs for each of these services). @@ -216,7 +216,7 @@ will typically not be stable enough to obtain reliable benchmark results. If you hands on a desktop, this is usually a better scenario. Of course, benchmarking can be done on non-hyper-stable machines as well. You will just have to -do a little more work to ensure that you are in fact measuring the changes you've made not and +do a little more work to ensure that you are in fact measuring the changes you've made and not noise. Here are some things you can do to make your benchmarks more stable: 1. The most simple thing you can do to drastically improve the stability of your benchmark is @@ -373,7 +373,12 @@ counter `L1-dcache-load-misses` #### Visual Studio -TODO +Build Zstd with symbols first (for example `cmake -B build -S build/cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo && ninja -C build zstd`) so the profiler resolves call stacks. + +* Launch Visual Studio’s Performance Profiler (`Alt+F2`), enable CPU Usage (optionally Instrumentation), and point it at the `programs/zstd` benchmark you want to run. +* If you prefer to start the benchmark from a terminal, use “Attach to running process†to latch onto it mid-run; keep frame pointers (`-fno-omit-frame-pointer`) for clean stacks. +* When you stop the capture, review the call tree, hot path, and annotated source panes +* Microsoft’s [Performance Profiling docs](https://learn.microsoft.com/en-us/visualstudio/profiling/?view=vs-2022) cover deeper sampling, ETW, and collection options if required. ## Issues We use GitHub issues to track public bugs. Please ensure your description is diff --git a/LICENSE b/LICENSE index a793a802892..75800288cc2 100644 --- a/LICENSE +++ b/LICENSE @@ -2,7 +2,7 @@ BSD License For Zstandard software -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -14,9 +14,9 @@ are permitted provided that the following conditions are met: this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. + * Neither the name Facebook, nor Meta, nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED diff --git a/Makefile b/Makefile index 429c90ff1f4..b726f7970ce 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-2021, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -85,14 +85,10 @@ test: $(MAKE) -C $(TESTDIR) $@ ZSTD=../../programs/zstd $(MAKE) -C doc/educational_decoder $@ -## shortest: same as `make check` -.PHONY: shortest -shortest: - $(Q)$(MAKE) -C $(TESTDIR) $@ - ## check: run basic tests for `zstd` cli .PHONY: check -check: shortest +check: + $(Q)$(MAKE) -C $(TESTDIR) $@ .PHONY: automated_benchmarking automated_benchmarking: @@ -123,6 +119,7 @@ contrib: lib $(MAKE) -C contrib/seekable_format/examples all $(MAKE) -C contrib/seekable_format/tests test $(MAKE) -C contrib/largeNbDicts all + $(MAKE) -C contrib/externalSequenceProducer all cd build/single_file_libs/ ; ./build_decoder_test.sh cd build/single_file_libs/ ; ./build_library_test.sh @@ -142,14 +139,18 @@ clean: $(Q)$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID) $(Q)$(MAKE) -C contrib/seekable_format/tests $@ > $(VOID) $(Q)$(MAKE) -C contrib/largeNbDicts $@ > $(VOID) + $(Q)$(MAKE) -C contrib/externalSequenceProducer $@ > $(VOID) $(Q)$(RM) zstd$(EXT) zstdmt$(EXT) tmp* - $(Q)$(RM) -r lz4 + $(Q)$(RM) -r lz4 cmakebuild mesonbuild install @echo Cleaning completed +LIBZSTD_MK_DIR = $(ZSTDDIR) +include $(LIBZSTD_MK_DIR)/install_oses.mk # UNAME, INSTALL_OS_LIST + #------------------------------------------------------------------------------ # make install is validated only for Linux, macOS, Hurd and some BSD targets #------------------------------------------------------------------------------ -ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD DragonFly NetBSD MSYS_NT Haiku AIX)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) HOST_OS = POSIX @@ -195,18 +196,27 @@ uninstall: travis-install: $(MAKE) install PREFIX=~/install_test_dir +.PHONY: clangbuild-darwin-fat +clangbuild-darwin-fat: clean + clang -v + CXX=clang++ CC=clang CFLAGS+="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation -arch arm64" $(MAKE) zstd-release + mv programs/zstd programs/zstd_arm64 + CXX=clang++ CC=clang CFLAGS+="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation -arch x86_64" $(MAKE) zstd-release + mv programs/zstd programs/zstd_x64 + lipo -create programs/zstd_x64 programs/zstd_arm64 -output programs/zstd + .PHONY: gcc5build gcc6build gcc7build clangbuild m32build armbuild aarch64build ppcbuild ppc64build gcc5build: clean gcc-5 -v - CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" gcc6build: clean gcc-6 -v - CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" gcc7build: clean gcc-7 -v - CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" clangbuild: clean clang -v @@ -230,17 +240,17 @@ ppc64build: clean .PHONY: armfuzz aarch64fuzz ppcfuzz ppc64fuzz armfuzz: clean - CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest aarch64fuzz: clean ld -v - CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppcfuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppc64fuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest .PHONY: cxxtest gcc5test gcc6test armtest aarch64test ppctest ppc64test cxxtest: CXXFLAGS += -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror @@ -249,49 +259,49 @@ cxxtest: clean gcc5test: clean gcc-5 -v - $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror $(MOREFLAGS)" gcc6test: clean gcc-6 -v - $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror $(MOREFLAGS)" armtest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" aarch64test: $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppctest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppc64test: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" .PHONY: arm-ppc-compilation arm-ppc-compilation: - $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" + $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" regressiontest: $(MAKE) -C $(FUZZDIR) regressiontest uasanregressiontest: - $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined" CXXFLAGS="-O3 -fsanitize=address,undefined" + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined -Werror" CXXFLAGS="-O3 -fsanitize=address,undefined -Werror" msanregressiontest: - $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory" CXXFLAGS="-O3 -fsanitize=memory" + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory -Werror" CXXFLAGS="-O3 -fsanitize=memory -Werror" update_regressionResults : REGRESS_RESULTS_DIR := /tmp/regress_results_dir/ update_regressionResults: - $(MAKE) -C programs zstd - $(MAKE) -C tests/regression test - $(RM) -rf $(REGRESS_RESULTS_DIR) + $(MAKE) -j -C programs zstd + $(MAKE) -j -C tests/regression test + $(RM) -r $(REGRESS_RESULTS_DIR) $(MKDIR) $(REGRESS_RESULTS_DIR) ./tests/regression/test \ --cache tests/regression/cache \ @@ -306,31 +316,32 @@ update_regressionResults: # run UBsan with -fsanitize-recover=pointer-overflow # this only works with recent compilers such as gcc 8+ usan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=undefined -Werror $(MOREFLAGS)" asan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror $(MOREFLAGS)" asan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* msan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason -msan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $* +msan-%: + $(MAKE) clean + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -j -C $(TESTDIR) HAVE_LZMA=0 $* asan32: clean - $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address" + $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address $(MOREFLAGS)" uasan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)" uasan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* tsan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS=--no-big-tests + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" .PHONY: apt-install apt-install: @@ -343,7 +354,7 @@ apt-add-repo: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test sudo apt-get update -y -qq -.PHONY: ppcinstall arminstall valgrindinstall libc6install gcc6install gcc7install gcc8install gpp6install clang38install lz4install +.PHONY: ppcinstall arminstall valgrindinstall libc6install gcc6install gcc7install gcc8install gpp6install clang38install ppcinstall: APT_PACKAGES="qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu" $(MAKE) apt-install @@ -371,38 +382,56 @@ gpp6install: apt-add-repo clang38install: APT_PACKAGES="clang-3.8" $(MAKE) apt-install -# Ubuntu 14.04 ships a too-old lz4 -lz4install: - [ -e lz4 ] || git clone https://github.com/lz4/lz4 && sudo $(MAKE) -C lz4 install - endif -CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON -DCMAKE_BUILD_TYPE=Release - -ifneq (,$(filter MSYS%,$(shell uname))) +ifneq (,$(filter MSYS%,$(shell sh -c 'MSYSTEM="MSYS" uname') )) HOST_OS = MSYS -CMAKE_PARAMS = -G"MSYS Makefiles" -DCMAKE_BUILD_TYPE=Debug -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON endif #------------------------------------------------------------------------ # target specific tests #------------------------------------------------------------------------ -ifneq (,$(filter $(HOST_OS),MSYS POSIX)) -.PHONY: cmakebuild c89build gnu90build c99build gnu99build c11build bmix64build bmix32build bmi32build staticAnalyze -cmakebuild: - cmake --version - $(RM) -r $(BUILDIR)/cmake/build - $(MKDIR) $(BUILDIR)/cmake/build - cd $(BUILDIR)/cmake/build; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. - $(MAKE) -C $(BUILDIR)/cmake/build -j4; - $(MAKE) -C $(BUILDIR)/cmake/build install; - $(MAKE) -C $(BUILDIR)/cmake/build uninstall; - cd $(BUILDIR)/cmake/build; ctest -V -L Medium +ifneq (,$(filter MSYS POSIX,$(HOST_OS))) + +CMAKE ?= cmake +CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON +ifneq (,$(filter MSYS%,$(shell sh -c 'MSYSTEM="MSYS" uname'))) +CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON +endif + +.PHONY: cmakebuild +cmakebuild: + $(CMAKE) --version + $(RM) -r cmakebuild install + $(MKDIR) cmakebuild install + cd cmakebuild; $(CMAKE) -Wdev -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS="-Werror -O0" -DCMAKE_INSTALL_PREFIX=install $(CMAKE_PARAMS) ../build/cmake + $(CMAKE) --build cmakebuild --target install -- -j V=1 + cd cmakebuild; ctest -V -L Medium + +MESON ?= meson +NINJA ?= ninja + +.PHONY: mesonbuild +mesonbuild: + $(MESON) setup \ + --buildtype=debugoptimized \ + -Db_lundef=false \ + -Dauto_features=enabled \ + -Dbin_programs=true \ + -Dbin_tests=true \ + -Dbin_contrib=true \ + -Ddefault_library=both \ + build/meson mesonbuild + $(NINJA) -C mesonbuild/ + $(MESON) test -C mesonbuild/ --print-errorlogs + $(MESON) install -C mesonbuild --destdir staging/ + +.PHONY: c89build gnu90build c99build gnu99build c11build bmix64build bmix32build bmi32build staticAnalyze c89build: clean $(CC) -v - CFLAGS="-std=c89 -Werror -O0" $(MAKE) allmost # will fail, due to missing support for `long long` + CFLAGS="-std=c89 -Werror -Wno-attributes -Wpedantic -Wno-long-long -Wno-variadic-macros -O0" $(MAKE) lib zstd gnu90build: clean $(CC) -v diff --git a/README.md b/README.md index da8622d7276..3aa01a0c1d1 100644 --- a/README.md +++ b/README.md @@ -5,23 +5,20 @@ targeting real-time compression scenarios at zlib-level and better compression r It's backed by a very fast entropy stage, provided by [Huff0 and FSE library](https://github.com/Cyan4973/FiniteStateEntropy). Zstandard's format is stable and documented in [RFC8878](https://datatracker.ietf.org/doc/html/rfc8878). Multiple independent implementations are already available. -This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, +This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) OR [GPLv2](COPYING) licensed **C** library, and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files. Should your project require another programming language, -a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages). +a list of known ports and bindings is provided on [Zstandard homepage](https://facebook.github.io/zstd/#other-languages). **Development branch status:** [![Build Status][travisDevBadge]][travisLink] -[![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink] [![Build status][CirrusDevBadge]][CirrusLink] [![Fuzzing Status][OSSFuzzBadge]][OSSFuzzLink] [travisDevBadge]: https://api.travis-ci.com/facebook/zstd.svg?branch=dev "Continuous Integration test suite" [travisLink]: https://travis-ci.com/facebook/zstd -[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite" -[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0 [CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite" [CircleLink]: https://circleci.com/gh/facebook/zstd [CirrusDevBadge]: https://api.cirrus-ci.com/github/facebook/zstd.svg?branch=dev @@ -32,36 +29,35 @@ a list of known ports and bindings is provided on [Zstandard homepage](http://ww ## Benchmarks For reference, several fast compression algorithms were tested and compared -on a desktop running Ubuntu 20.04 (`Linux 5.11.0-41-generic`), -with a Core i7-9700K CPU @ 4.9GHz, +on a desktop featuring a Core i7-9700K CPU @ 4.9GHz +and running Ubuntu 24.04 (`Linux 6.8.0-53-generic`), using [lzbench], an open-source in-memory benchmark by @inikep -compiled with [gcc] 9.3.0, +compiled with [gcc] 14.2.0, on the [Silesia compression corpus]. [lzbench]: https://github.com/inikep/lzbench -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia [gcc]: https://gcc.gnu.org/ | Compressor name | Ratio | Compression| Decompress.| | --------------- | ------| -----------| ---------- | -| **zstd 1.5.1 -1** | 2.887 | 530 MB/s | 1700 MB/s | -| [zlib] 1.2.11 -1 | 2.743 | 95 MB/s | 400 MB/s | -| brotli 1.0.9 -0 | 2.702 | 395 MB/s | 450 MB/s | -| **zstd 1.5.1 --fast=1** | 2.437 | 600 MB/s | 2150 MB/s | -| **zstd 1.5.1 --fast=3** | 2.239 | 670 MB/s | 2250 MB/s | -| quicklz 1.5.0 -1 | 2.238 | 540 MB/s | 760 MB/s | -| **zstd 1.5.1 --fast=4** | 2.148 | 710 MB/s | 2300 MB/s | -| lzo1x 2.10 -1 | 2.106 | 660 MB/s | 845 MB/s | -| [lz4] 1.9.3 | 2.101 | 740 MB/s | 4500 MB/s | -| lzf 3.6 -1 | 2.077 | 410 MB/s | 830 MB/s | -| snappy 1.1.9 | 2.073 | 550 MB/s | 1750 MB/s | - -[zlib]: http://www.zlib.net/ -[lz4]: http://www.lz4.org/ +| **zstd 1.5.7 -1** | 2.896 | 510 MB/s | 1550 MB/s | +| brotli 1.1.0 -1 | 2.883 | 290 MB/s | 425 MB/s | +| [zlib] 1.3.1 -1 | 2.743 | 105 MB/s | 390 MB/s | +| **zstd 1.5.7 --fast=1** | 2.439 | 545 MB/s | 1850 MB/s | +| quicklz 1.5.0 -1 | 2.238 | 520 MB/s | 750 MB/s | +| **zstd 1.5.7 --fast=4** | 2.146 | 665 MB/s | 2050 MB/s | +| lzo1x 2.10 -1 | 2.106 | 650 MB/s | 780 MB/s | +| [lz4] 1.10.0 | 2.101 | 675 MB/s | 3850 MB/s | +| snappy 1.2.1 | 2.089 | 520 MB/s | 1500 MB/s | +| lzf 3.6 -1 | 2.077 | 410 MB/s | 820 MB/s | + +[zlib]: https://www.zlib.net/ +[lz4]: https://lz4.github.io/lz4/ The negative compression levels, specified with `--fast=#`, offer faster compression and decompression speed -at the cost of compression ratio (compared to level 1). +at the cost of compression ratio. Zstd can also offer stronger compression ratios at the cost of compression speed. Speed vs Compression trade-off is configurable by small increments. @@ -124,32 +120,55 @@ Dictionary gains are mostly effective in the first few KB. Then, the compression ## Build instructions -`make` is the officially maintained build system of this project. -All other build systems are "compatible" and 3rd-party maintained, -they may feature small differences in advanced options. -When your system allows it, prefer using `make` to build `zstd` and `libzstd`. +`make` is the main build system of this project. +It is the reference, and other build systems are periodically updated to stay compatible. +However, small drifts and feature differences can be present, since perfect synchronization is difficult. +For this reason, when your build system allows it, prefer employing `make`. ### Makefile -If your system is compatible with standard `make` (or `gmake`), -invoking `make` in root directory will generate `zstd` cli in root directory. -It will also create `libzstd` into `lib/`. +Assuming your system supports standard `make` (or `gmake`), +just invoking `make` in root directory generates `zstd` cli at root, +and also generates `libzstd` into `lib/`. -Other available options include: -- `make install` : create and install zstd cli, library and man pages -- `make check` : create and run `zstd`, test its behavior on local platform +Other standard targets include: +- `make install` : install zstd cli, library and man pages +- `make check` : run `zstd`, test its essential behavior on local platform The `Makefile` follows the [GNU Standard Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html), -allowing staged install, standard flags, directory variables and command variables. +allowing staged install, standard compilation flags, directory variables and command variables. + +For advanced use cases, specialized flags which control binary generation and installation paths are documented +in [`lib/README.md`](lib/README.md#modular-build) for the `libzstd` library +and in [`programs/README.md`](programs/README.md#compilation-variables) for the `zstd` CLI. ### cmake -A `cmake` project generator is provided within `build/cmake`. -It can generate Makefiles or other build scripts -to create `zstd` binary, and `libzstd` dynamic and static libraries. +A `cmake` project generator is available for generating Makefiles or other build scripts +to create the `zstd` binary as well as `libzstd` dynamic and static libraries. +The repository root now contains a minimal `CMakeLists.txt` that forwards to `build/cmake`, +so you can configure the project with a standard `cmake -S .` invocation, +while the historical `cmake -S build/cmake` entry point remains fully supported. + +```bash +cmake -S . -B build-cmake +cmake --build build-cmake +``` By default, `CMAKE_BUILD_TYPE` is set to `Release`. +#### Support for Fat (Universal2) Output + +`zstd` can be built and installed with support for both Apple Silicon (M1/M2) as well as Intel by using CMake's Universal2 support. +To perform a Fat/Universal2 build and install use the following commands: + +```bash +cmake -S . -B build-cmake-debug -G Ninja -DCMAKE_OSX_ARCHITECTURES="x86_64;x86_64h;arm64" +cd build-cmake-debug +ninja +sudo ninja install +``` + ### Meson A Meson project is provided within [`build/meson`](build/meson). Follow @@ -172,37 +191,54 @@ You can build and install zstd [vcpkg](https://github.com/Microsoft/vcpkg/) depe The zstd port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. +### Conan + +You can install pre-built binaries for zstd or build it from source using [Conan](https://conan.io/). Use the following command: + +```bash +conan install --requires="zstd/[*]" --build=missing +``` + +The zstd Conan recipe is kept up to date by Conan maintainers and community contributors. +If the version is out of date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the ConanCenterIndex repository. + ### Visual Studio (Windows) Going into `build` directory, you will find additional possibilities: -- Projects for Visual Studio 2005, 2008 and 2010. +- Projects for Visual Studio 2008 and 2010. + VS2010 project is compatible with VS2012, VS2013, VS2015 and VS2017. - Automated build scripts for Visual compiler by [@KrzysFR](https://github.com/KrzysFR), in `build/VS_scripts`, which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution. +- It is now recommended to generate Visual Studio solutions from `cmake` ### Buck You can build the zstd binary via buck by executing: `buck build programs:zstd` from the root of the repo. The output binary will be in `buck-out/gen/programs/`. +### Bazel + +You can integrate zstd into your Bazel project by using the module hosted on the [Bazel Central Repository](https://registry.bazel.build/modules/zstd). + ## Testing -You can run quick local smoke tests by executing the `playTest.sh` script from the `src/tests` directory. -Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the zstd and datagen binary. -For information on CI testing, please refer to TESTING.md +You can run quick local smoke tests by running `make check`. +If you can't use `make`, execute the `playTest.sh` script from the `src/tests` directory. +Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the `zstd` and `datagen` binary. +For information on CI testing, please refer to `TESTING.md`. ## Status -Zstandard is currently deployed within Facebook. It is used continuously to compress large amounts of data in multiple formats and use cases. -Zstandard is considered safe for production environments. +Zstandard is deployed within Meta and many other large cloud infrastructures, +to compress humongous amounts of data in various formats and use cases. +It is also continuously fuzzed for security issues by Google's [oss-fuzz](https://github.com/google/oss-fuzz/tree/master/projects/zstd) program. ## License -Zstandard is dual-licensed under [BSD](LICENSE) and [GPLv2](COPYING). +Zstandard is dual-licensed under [BSD](LICENSE) OR [GPLv2](COPYING). ## Contributing The `dev` branch is the one where all contributions are merged before reaching `release`. -If you plan to propose a patch, please commit into the `dev` branch, or its own feature branch. Direct commit to `release` are not permitted. For more information, please read [CONTRIBUTING](CONTRIBUTING.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..a5f9a7e1fdb --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,15 @@ +# Reporting and Fixing Security Issues + +Please do not open GitHub issues or pull requests - this makes the problem immediately visible to everyone, including malicious actors. Security issues in this open source project can be safely reported via the Meta Bug Bounty program: + +https://www.facebook.com/whitehat + +Meta's security team will triage your report and determine whether or not is it eligible for a bounty under our program. + +# Receiving Vulnerability Notifications + +In the case that a significant security vulnerability is reported to us or discovered by us---without being publicly known---we will, at our discretion, notify high-profile, high-exposure users of Zstandard ahead of our public disclosure of the issue and associated fix. + +If you believe your project would benefit from inclusion in this list, please reach out to one of the maintainers. + + diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index c58ef91a1f2..00000000000 --- a/appveyor.yml +++ /dev/null @@ -1,205 +0,0 @@ -# Following tests are run _only_ on `release` branch -# and on selected feature branch named `appveyorTest` or `visual*` - -- - version: 1.0.{build} - branches: - only: - - release - - master - - /appveyor*/ - - /visual*/ - environment: - matrix: - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x64" - SCRIPT: "make allzstd MOREFLAGS=-static" - ARTIFACT: "true" - BUILD: "true" - - COMPILER: "gcc" - HOST: "mingw" - PLATFORM: "x86" - SCRIPT: "make allzstd MOREFLAGS=-static" - ARTIFACT: "true" - BUILD: "true" - - - COMPILER: "clang-cl" - HOST: "cmake-visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - CMAKE_GENERATOR: "Visual Studio 15 2017" - CMAKE_GENERATOR_PLATFORM: "x64" - CMAKE_GENERATOR_TOOLSET: "LLVM" - APPVEYOR_BUILD_WORKER_IMAGE: "Visual Studio 2017" - - install: - - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET PATH_ORIGINAL=%PATH% - - if [%HOST%]==[mingw] ( - SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && - SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && - COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe - ) - - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( - SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" - ) - - build_script: - - if [%HOST%]==[mingw] ( - ( if [%PLATFORM%]==[x64] ( - SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" - ) else if [%PLATFORM%]==[x86] ( - SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" - ) ) - ) - - if [%HOST%]==[mingw] if [%BUILD%]==[true] ( - make -v && - sh -c "%COMPILER% -v" && - ECHO Building zlib to static link && - SET "CC=%COMPILER%" && - sh -c "cd .. && git clone --depth 1 --branch v1.2.11 https://github.com/madler/zlib" && - sh -c "cd ../zlib && make -f win32/Makefile.gcc libz.a" - ECHO Building zstd && - SET "CPPFLAGS=-I../../zlib" && - SET "LDFLAGS=../../zlib/libz.a" && - sh -c "%SCRIPT%" && - ( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true] - ECHO Creating artifacts && - ECHO %cd% && - lib\dll\example\build_package.bat && - make -C programs DEBUGFLAGS= clean zstd && - cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe && - appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip && - cp zstd.exe ..\bin\zstd.exe && - git clone --depth 1 --branch release https://github.com/facebook/zstd && - cd zstd && - git archive --format=tar release -o zstd-src.tar && - ..\zstd -19 zstd-src.tar && - appveyor PushArtifact zstd-src.tar.zst && - certUtil -hashfile zstd-src.tar.zst SHA256 > zstd-src.tar.zst.sha256.sig && - appveyor PushArtifact zstd-src.tar.zst.sha256.sig && - cd ..\..\bin\ && - 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * && - appveyor PushArtifact zstd-win-release-%PLATFORM%.zip - ) - ) - - if [%HOST%]==[cmake-visual] ( - ECHO *** && - ECHO *** Building %CMAKE_GENERATOR% ^(%CMAKE_GENERATOR_TOOLSET%^) %PLATFORM%\%CONFIGURATION% && - PUSHD build\cmake && - cmake -DBUILD_TESTING=ON . && - cmake --build . --config %CONFIGURATION% -j4 && - POPD && - ECHO *** - ) - - test_script: - - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET "CC=gcc" - - SET "CXX=g++" - - if [%TEST%]==[cmake] ( - mkdir build\cmake\build && - cd build\cmake\build && - SET FUZZERTEST=-T2mn && - SET ZSTREAM_TESTTIME=-T2mn && - cmake -G "Visual Studio 14 2015 Win64" .. && - cd ..\..\.. && - make clean - ) - - -# The following tests are for regular pushes -# into `dev` or some feature branch -# There run less tests, for shorter feedback loop - -- - version: 1.0.{build} - environment: - matrix: - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Debug" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - - COMPILER: "visual" - HOST: "visual" - PLATFORM: "Win32" - CONFIGURATION: "Release" - - - COMPILER: "gcc" - HOST: "cygwin" - PLATFORM: "x64" - - - COMPILER: "clang-cl" - HOST: "cmake-visual" - PLATFORM: "x64" - CONFIGURATION: "Release" - CMAKE_GENERATOR: "Visual Studio 15 2017" - CMAKE_GENERATOR_PLATFORM: "x64" - CMAKE_GENERATOR_TOOLSET: "LLVM" - APPVEYOR_BUILD_WORKER_IMAGE: "Visual Studio 2017" - - install: - - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET PATH_ORIGINAL=%PATH% - - if [%HOST%]==[cygwin] ( - ECHO Installing Cygwin Packages && - C:\cygwin64\setup-x86_64.exe -qnNdO -R "C:\cygwin64" -g -P ^ - gcc,^ - cmake,^ - make - ) - - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( - SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" - ) - - build_script: - - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% - - if [%HOST%]==[cygwin] ( - set CHERE_INVOKING=yes && - set CC=%COMPILER% && - C:\cygwin64\bin\bash --login -c " - set -e; - cd build/cmake; - CFLAGS='-Werror' cmake -G 'Unix Makefiles' -DCMAKE_BUILD_TYPE=Debug -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_FUZZER_FLAGS=-T20s -DZSTD_ZSTREAM_FLAGS=-T20s -DZSTD_FULLBENCH_FLAGS=-i0 .; - make VERBOSE=1 -j; - ctest -V -L Medium; - " - ) - - if [%HOST%]==[cmake-visual] ( - ECHO *** && - ECHO *** Building %CMAKE_GENERATOR% ^(%CMAKE_GENERATOR_TOOLSET%^) %PLATFORM%\%CONFIGURATION% && - PUSHD build\cmake && - cmake -DBUILD_TESTING=ON . && - cmake --build . --config %CONFIGURATION% -j4 && - POPD && - ECHO *** - ) - - if [%HOST%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && - msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe - ) - - - test_script: - - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET "FUZZERTEST=-T10s" - - if [%HOST%]==[mingw] ( - set "CC=%COMPILER%" && - make clean && - make check - ) \ No newline at end of file diff --git a/build/VS2008/zstd/zstd.vcproj b/build/VS2008/zstd/zstd.vcproj index 91f2bda536c..fc87625a950 100644 --- a/build/VS2008/zstd/zstd.vcproj +++ b/build/VS2008/zstd/zstd.vcproj @@ -45,7 +45,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -122,7 +122,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" @@ -197,7 +197,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -275,7 +275,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" @@ -356,6 +356,10 @@ RelativePath="..\..\..\programs\dibio.c" > + + diff --git a/build/VS2008/zstdlib/zstdlib.vcproj b/build/VS2008/zstdlib/zstdlib.vcproj index 88c1aee26d5..61c88f5bcb4 100644 --- a/build/VS2008/zstdlib/zstdlib.vcproj +++ b/build/VS2008/zstdlib/zstdlib.vcproj @@ -45,7 +45,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -121,7 +121,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" @@ -195,7 +195,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -272,7 +272,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" diff --git a/build/VS2010/datagen/datagen.vcxproj b/build/VS2010/datagen/datagen.vcxproj index a66358a0d3a..aaba4788b9c 100644 --- a/build/VS2010/datagen/datagen.vcxproj +++ b/build/VS2010/datagen/datagen.vcxproj @@ -157,6 +157,8 @@ + + diff --git a/build/VS2010/fullbench-dll/fullbench-dll.vcxproj b/build/VS2010/fullbench-dll/fullbench-dll.vcxproj deleted file mode 100644 index befdc044513..00000000000 --- a/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +++ /dev/null @@ -1,189 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {00000000-1CC8-4FD7-9281-6B8DBB9D3DF8} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - MultiByte - - - Application - true - MultiByte - - - Application - false - true - MultiByte - - - Application - false - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - true - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(UniversalCRT_IncludePath); - false - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - false - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - false - - - - - Level4 - - - MaxSpeed - true - true - WIN32;_DEBUG;_CONSOLE;ZSTD_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - libzstd.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - - - - {00000000-94d5-4bf9-8a50-7bd9929a0850} - - - - - - diff --git a/build/VS2010/fullbench/fullbench.vcxproj b/build/VS2010/fullbench/fullbench.vcxproj index 2e0a042b0e8..c78a72eccdd 100644 --- a/build/VS2010/fullbench/fullbench.vcxproj +++ b/build/VS2010/fullbench/fullbench.vcxproj @@ -170,6 +170,7 @@ + @@ -183,6 +184,7 @@ + diff --git a/build/VS2010/fuzzer/fuzzer.vcxproj b/build/VS2010/fuzzer/fuzzer.vcxproj index 91974ec7fa2..ccbd2517de9 100644 --- a/build/VS2010/fuzzer/fuzzer.vcxproj +++ b/build/VS2010/fuzzer/fuzzer.vcxproj @@ -170,6 +170,7 @@ + diff --git a/build/VS2010/libzstd-dll/libzstd-dll.rc b/build/VS2010/libzstd-dll/libzstd-dll.rc index ee9f5628043..13e8746ffb5 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.rc +++ b/build/VS2010/libzstd-dll/libzstd-dll.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "libzstd.dll" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "libzstd.dll" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj index a0aa897ce01..ddb8e3216f5 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj +++ b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj @@ -34,6 +34,7 @@ + @@ -168,8 +169,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) - true + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL EditAndContinue @@ -188,7 +188,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -208,7 +208,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false MultiThreaded ProgramDatabase @@ -229,7 +229,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false false MultiThreaded diff --git a/build/VS2010/libzstd/libzstd.vcxproj b/build/VS2010/libzstd/libzstd.vcxproj index 17c08d70361..09ead245a64 100644 --- a/build/VS2010/libzstd/libzstd.vcxproj +++ b/build/VS2010/libzstd/libzstd.vcxproj @@ -34,6 +34,7 @@ + @@ -161,8 +162,7 @@ Level4 Disabled - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) - true + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL EditAndContinue @@ -181,7 +181,7 @@ Level4 Disabled - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -201,7 +201,7 @@ MaxSpeed true true - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false MultiThreaded ProgramDatabase @@ -222,7 +222,7 @@ MaxSpeed true true - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false false MultiThreaded diff --git a/build/VS2010/zstd.sln b/build/VS2010/zstd.sln index 12032db44fc..528aae8aa01 100644 --- a/build/VS2010/zstd.sln +++ b/build/VS2010/zstd.sln @@ -7,11 +7,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fuzzer", "fuzzer\fuzzer.vcx EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench", "fullbench\fullbench.vcxproj", "{61ABD629-1CC8-4FD7-9281-6B8DBB9D3DF8}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll\fullbench-dll.vcxproj", "{00000000-1CC8-4FD7-9281-6B8DBB9D3DF8}" - ProjectSection(ProjectDependencies) = postProject - {00000000-94D5-4BF9-8A50-7BD9929A0850} = {00000000-94D5-4BF9-8A50-7BD9929A0850} - EndProjectSection -EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "datagen", "datagen\datagen.vcxproj", "{037E781E-81A6-494B-B1B3-438AB1200523}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libzstd", "libzstd\libzstd.vcxproj", "{8BFD8150-94D5-4BF9-8A50-7BD9929A0850}" diff --git a/build/VS2010/zstd/zstd.rc b/build/VS2010/zstd/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/build/VS2010/zstd/zstd.rc +++ b/build/VS2010/zstd/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/build/VS2010/zstd/zstd.vcxproj b/build/VS2010/zstd/zstd.vcxproj index 8ab239dd814..230fd7d09e7 100644 --- a/build/VS2010/zstd/zstd.vcxproj +++ b/build/VS2010/zstd/zstd.vcxproj @@ -35,6 +35,7 @@ + @@ -63,6 +64,7 @@ + @@ -114,6 +116,7 @@ zstd $(SolutionDir)bin\$(Platform)_$(Configuration)\ $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ + NotSet @@ -184,9 +187,10 @@ Level4 Disabled - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false + $(InstructionSet) Console @@ -200,9 +204,10 @@ Level4 Disabled - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false + $(InstructionSet) Console @@ -218,10 +223,11 @@ MaxSpeed true true - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) false false MultiThreaded + $(InstructionSet) Console @@ -239,11 +245,12 @@ MaxSpeed true true - ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=5;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=0;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) false false MultiThreaded /DZSTD_MULTITHREAD %(AdditionalOptions) + $(InstructionSet) Console diff --git a/build/VS_scripts/build.VSPreview.cmd b/build/VS_scripts/build.VSPreview.cmd new file mode 100644 index 00000000000..c594bb12c2b --- /dev/null +++ b/build/VS_scripts/build.VSPreview.cmd @@ -0,0 +1,7 @@ +@echo off + +rem build 32-bit +call "%~p0%build.generic.cmd" preview Win32 Release v143 + +rem build 64-bit +call "%~p0%build.generic.cmd" preview x64 Release v143 \ No newline at end of file diff --git a/build/VS_scripts/build.generic.cmd b/build/VS_scripts/build.generic.cmd index b24e6ed4bdb..0912d2347a8 100644 --- a/build/VS_scripts/build.generic.cmd +++ b/build/VS_scripts/build.generic.cmd @@ -2,7 +2,7 @@ IF "%1%" == "" GOTO display_help -SETLOCAL +SETLOCAL ENABLEDELAYEDEXPANSION SET msbuild_version=%1 @@ -19,39 +19,34 @@ GOTO build :display_help echo Syntax: build.generic.cmd msbuild_version msbuild_platform msbuild_configuration msbuild_toolset -echo msbuild_version: VS installed version (VS2012, VS2013, VS2015, VS2017, VS2019, ...) +echo msbuild_version: VS installed version (latest, VS2012, VS2013, VS2015, VS2017, VS2019, VS2022, ...) echo msbuild_platform: Platform (x64 or Win32) echo msbuild_configuration: VS configuration (Release or Debug) -echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141, v142, ...) +echo msbuild_toolset: Platform Toolset (v100, v110, v120, v140, v141, v142, v143, ...) EXIT /B 1 :build SET msbuild="%windir%\Microsoft.NET\Framework\v4.0.30319\MSBuild.exe" -SET msbuild_vs2017community="%programfiles(x86)%\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" -SET msbuild_vs2017professional="%programfiles(x86)%\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" -SET msbuild_vs2017enterprise="%programfiles(x86)%\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\MSBuild.exe" IF %msbuild_version% == VS2013 SET msbuild="%programfiles(x86)%\MSBuild\12.0\Bin\MSBuild.exe" IF %msbuild_version% == VS2015 SET msbuild="%programfiles(x86)%\MSBuild\14.0\Bin\MSBuild.exe" -IF %msbuild_version% == VS2017Community SET msbuild=%msbuild_vs2017community% -IF %msbuild_version% == VS2017Professional SET msbuild=%msbuild_vs2017professional% -IF %msbuild_version% == VS2017Enterprise SET msbuild=%msbuild_vs2017enterprise% -IF %msbuild_version% == VS2017 ( - IF EXIST %msbuild_vs2017community% SET msbuild=%msbuild_vs2017community% - IF EXIST %msbuild_vs2017professional% SET msbuild=%msbuild_vs2017professional% - IF EXIST %msbuild_vs2017enterprise% SET msbuild=%msbuild_vs2017enterprise% -) - -:: VS2019 -SET msbuild_vs2019community="%programfiles(x86)%\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe" -SET msbuild_vs2019professional="%programfiles(x86)%\Microsoft Visual Studio\2019\Professional\MSBuild\Current\Bin\MSBuild.exe" -SET msbuild_vs2019enterprise="%programfiles(x86)%\Microsoft Visual Studio\2019\Enterprise\MSBuild\Current\Bin\MSBuild.exe" -IF %msbuild_version% == VS2019 ( - IF EXIST %msbuild_vs2019community% SET msbuild=%msbuild_vs2019community% - IF EXIST %msbuild_vs2019professional% SET msbuild=%msbuild_vs2019professional% - IF EXIST %msbuild_vs2019enterprise% SET msbuild=%msbuild_vs2019enterprise% +IF %msbuild_version% == VS2017 SET vswhere_params=-version [15,16) -products * +IF %msbuild_version% == VS2017Community SET vswhere_params=-version [15,16) -products Community +IF %msbuild_version% == VS2017Enterprise SET vswhere_params=-version [15,16) -products Enterprise +IF %msbuild_version% == VS2017Professional SET vswhere_params=-version [15,16) -products Professional +IF %msbuild_version% == VS2019 SET vswhere_params=-version [16,17) -products * +IF %msbuild_version% == VS2022 SET vswhere_params=-version [17,18) -products * +REM Add the next Visual Studio version here. +IF %msbuild_version% == latest SET vswhere_params=-latest -products * +IF %msbuild_version% == preview SET vswhere_params=-prerelease -products * + +IF NOT DEFINED vswhere_params GOTO skip_vswhere +SET vswhere="%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" +FOR /F "USEBACKQ TOKENS=*" %%F IN (`%vswhere% !vswhere_params! -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe`) DO ( + SET msbuild="%%F" ) +:skip_vswhere SET project="%~p0\..\VS2010\zstd.sln" diff --git a/build/cmake/CMakeLists.txt b/build/cmake/CMakeLists.txt index 5e2cf3d5634..7cb0b684378 100644 --- a/build/cmake/CMakeLists.txt +++ b/build/cmake/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -7,204 +7,75 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) - -# As of 2018-12-26 ZSTD has been validated to build with cmake version 3.13.2 new policies. -# Set and use the newest cmake policies that are validated to work -set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") -set(ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION "13") #Policies never changed at PATCH level -if("${CMAKE_MAJOR_VERSION}" LESS 3) - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -elseif( "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND - "${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}" GREATER "${CMAKE_MINOR_VERSION}") - set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") -else() - set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") -endif() -cmake_policy(VERSION ${ZSTD_CMAKE_POLICY_VERSION}) - -set(CMAKE_BUILD_WITH_INSTALL_RPATH on) +cmake_minimum_required(VERSION 3.10 FATAL_ERROR) +#----------------------------------------------------------------------------- +# Setup CMake environment +#----------------------------------------------------------------------------- +set(CMAKE_BUILD_WITH_INSTALL_RPATH ON) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") + +# Define project paths set(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..") set(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) -# Parse version -include(GetZstdLibraryVersion) -GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h zstd_VERSION_MAJOR zstd_VERSION_MINOR zstd_VERSION_PATCH) -if( CMAKE_MAJOR_VERSION LESS 3 ) - ## Provide cmake 3+ behavior for older versions of cmake - project(zstd) - set(PROJECT_VERSION_MAJOR ${zstd_VERSION_MAJOR}) - set(PROJECT_VERSION_MINOR ${zstd_VERSION_MINOR}) - set(PROJECT_VERSION_PATCH ${zstd_VERSION_PATCH}) - set(PROJECT_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") - enable_language(C) # Main library is in C - enable_language(CXX) # Testing contributed code also utilizes CXX -else() - project(zstd - VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}" - LANGUAGES C # Main library is in C - CXX # Testing contributed code also utilizes CXX - ) -endif() -message(STATUS "ZSTD VERSION: ${zstd_VERSION}") -set(zstd_HOMEPAGE_URL "http://www.zstd.net") -set(zstd_DESCRIPTION "Zstandard is a real-time compression algorithm, providing high compression ratios.") +#----------------------------------------------------------------------------- +# Configure CMake policies and version +#----------------------------------------------------------------------------- +include(ZstdVersion) -# Set a default build type if none was specified +#----------------------------------------------------------------------------- +# Project declaration +#----------------------------------------------------------------------------- +project(zstd + VERSION "${ZSTD_FULL_VERSION}" + LANGUAGES C # Main library is in C and ASM, ASM is enabled conditionally + HOMEPAGE_URL "${zstd_HOMEPAGE_URL}" + DESCRIPTION "${zstd_DESCRIPTION}" +) + +#----------------------------------------------------------------------------- +# Build type configuration +#----------------------------------------------------------------------------- if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) - message(STATUS "Setting build type to 'Release' as none was specified.") - set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) - # Set the possible values of build type for cmake-gui - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") + message(STATUS "Setting build type to 'Release' as none was specified.") + set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() -include(GNUInstallDirs) - #----------------------------------------------------------------------------- -# Add extra compilation flags +# Include standard modules #----------------------------------------------------------------------------- -include(AddZstdCompilationFlags) -ADD_ZSTD_COMPILATION_FLAGS() - -# Always hide XXHash symbols -add_definitions(-DXXH_NAMESPACE=ZSTD_) +include(GNUInstallDirs) #----------------------------------------------------------------------------- -# Installation variables +# Display installation information #----------------------------------------------------------------------------- message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}") message(STATUS "CMAKE_INSTALL_LIBDIR: ${CMAKE_INSTALL_LIBDIR}") #----------------------------------------------------------------------------- -# Options +# Configure build options #----------------------------------------------------------------------------- +include(ZstdOptions) -# Legacy support -option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" ON) - -if (ZSTD_LEGACY_SUPPORT) - message(STATUS "ZSTD_LEGACY_SUPPORT defined!") - set(ZSTD_LEGACY_LEVEL 5 CACHE STRING "") - add_definitions(-DZSTD_LEGACY_SUPPORT=${ZSTD_LEGACY_LEVEL}) -else () - message(STATUS "ZSTD_LEGACY_SUPPORT not defined!") - add_definitions(-DZSTD_LEGACY_SUPPORT=0) -endif () - -if (ANDROID) - set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT OFF) -else() - set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT ON) -endif() - -# Multi-threading support -option(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ${ZSTD_MULTITHREAD_SUPPORT_DEFAULT}) - -if (ZSTD_MULTITHREAD_SUPPORT) - message(STATUS "ZSTD_MULTITHREAD_SUPPORT is enabled") -else () - message(STATUS "ZSTD_MULTITHREAD_SUPPORT is disabled") -endif () - -option(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" ON) -option(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF) - -# Respect the conventional CMake option for enabling tests if it was specified on the first configure -if (BUILD_TESTING) - set(ZSTD_BUILD_TESTS_default ON) -else() - set(ZSTD_BUILD_TESTS_default OFF) -endif() -option(ZSTD_BUILD_TESTS "BUILD TESTS" ${ZSTD_BUILD_TESTS_default}) -if (MSVC) - option(ZSTD_USE_STATIC_RUNTIME "LINK TO STATIC RUN-TIME LIBRARIES" OFF) -endif () - #----------------------------------------------------------------------------- -# External dependencies -#----------------------------------------------------------------------------- -if (ZSTD_MULTITHREAD_SUPPORT AND UNIX) - set(THREADS_PREFER_PTHREAD_FLAG ON) - find_package(Threads REQUIRED) - if(CMAKE_USE_PTHREADS_INIT) - set(THREADS_LIBS "${CMAKE_THREAD_LIBS_INIT}") - else() - message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") - endif() -endif () +# Configure compilation flags +#----------------------------------------------------------------------------- +include(AddZstdCompilationFlags) +ADD_ZSTD_COMPILATION_FLAGS(ON ZSTD_ENABLE_CXX ON) #----------------------------------------------------------------------------- -# Add source directories +# Configure dependencies #----------------------------------------------------------------------------- -add_subdirectory(lib) - -option(ZSTD_PROGRAMS_LINK_SHARED "PROGRAMS LINK SHARED" OFF) - -if (ZSTD_BUILD_PROGRAMS) - if (NOT ZSTD_BUILD_STATIC AND NOT ZSTD_PROGRAMS_LINK_SHARED) - message(SEND_ERROR "You need to build static library to build zstd CLI") - elseif(NOT ZSTD_BUILD_SHARED AND ZSTD_PROGRAMS_LINK_SHARED) - message(SEND_ERROR "You need to build shared library to build zstd CLI") - endif () - - add_subdirectory(programs) -endif () - -if (ZSTD_BUILD_TESTS) - enable_testing() - if (NOT ZSTD_BUILD_STATIC) - message(SEND_ERROR "You need to build static library to build tests") - endif () - - add_subdirectory(tests) -endif () - -if (ZSTD_BUILD_CONTRIB) - add_subdirectory(contrib) -endif () +include(ZstdDependencies) #----------------------------------------------------------------------------- -# Add clean-all target +# Configure build targets #----------------------------------------------------------------------------- -add_custom_target(clean-all - COMMAND ${CMAKE_BUILD_TOOL} clean - COMMAND rm -rf ${CMAKE_BINARY_DIR}/ -) +include(ZstdBuild) #----------------------------------------------------------------------------- -# Generate Package Config files -# -# This section is based on the boiler plate code from: -# https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#creating-packages -#----------------------------------------------------------------------------- -include(CMakePackageConfigHelpers) -write_basic_package_version_file( - "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" - VERSION ${zstd_VERSION} - COMPATIBILITY SameMajorVersion - ) - -# A Package Config file that works from the build directory -export(EXPORT zstdExports - FILE "${CMAKE_CURRENT_BINARY_DIR}/zstdTargets.cmake" - NAMESPACE zstd:: - ) -configure_file(zstdConfig.cmake - "${CMAKE_CURRENT_BINARY_DIR}/zstdConfig.cmake" - COPYONLY - ) - -# A Package Config file that works from the installation directory -set(ConfigPackageLocation ${CMAKE_INSTALL_LIBDIR}/cmake/zstd) -install(EXPORT zstdExports - FILE zstdTargets.cmake - NAMESPACE zstd:: - DESTINATION ${ConfigPackageLocation} - ) -install(FILES - zstdConfig.cmake - "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" - DESTINATION ${ConfigPackageLocation} - ) +# Configure package generation +#----------------------------------------------------------------------------- +include(ZstdPackage) diff --git a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake index 8d04458c3eb..3e7bcce5974 100644 --- a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake +++ b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake @@ -1,7 +1,18 @@ -include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) +if(CMAKE_CXX_COMPILER) + include(CheckCXXCompilerFlag) +endif() -function(EnableCompilerFlag _flag _C _CXX) +if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.18) + set(ZSTD_HAVE_CHECK_LINKER_FLAG true) +else () + set(ZSTD_HAVE_CHECK_LINKER_FLAG false) +endif () +if (ZSTD_HAVE_CHECK_LINKER_FLAG) + include(CheckLinkerFlag) +endif() + +function(EnableCompilerFlag _flag _C _CXX _LD) string(REGEX REPLACE "\\+" "PLUS" varname "${_flag}") string(REGEX REPLACE "[^A-Za-z0-9]+" "_" varname "${varname}") string(REGEX REPLACE "^_+" "" varname "${varname}") @@ -12,51 +23,93 @@ function(EnableCompilerFlag _flag _C _CXX) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_flag}" PARENT_SCOPE) endif () endif () - if (_CXX) + if (_CXX AND CMAKE_CXX_COMPILER) CHECK_CXX_COMPILER_FLAG(${_flag} CXX_FLAG_${varname}) if (CXX_FLAG_${varname}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_flag}" PARENT_SCOPE) endif () endif () + if (_LD) + # We never add a linker flag with CMake < 3.18. We will + # implement CHECK_LINKER_FLAG() like feature for CMake < 3.18 + # or require CMake >= 3.18 when we need to add a required + # linker flag in future. + # + # We also skip linker flags check for MSVC compilers (which includes + # clang-cl) since currently check_linker_flag() doesn't give correct + # results for this configuration, + # see: https://gitlab.kitware.com/cmake/cmake/-/issues/22023 + if (ZSTD_HAVE_CHECK_LINKER_FLAG AND NOT MSVC) + CHECK_LINKER_FLAG(C ${_flag} LD_FLAG_${varname}) + else () + set(LD_FLAG_${varname} false) + endif () + if (LD_FLAG_${varname}) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_flag}" PARENT_SCOPE) + endif () + endif () endfunction() -macro(ADD_ZSTD_COMPILATION_FLAGS) +macro(ADD_ZSTD_COMPILATION_FLAGS _C _CXX _LD) + # We set ZSTD_HAS_NOEXECSTACK if we are certain we've set all the required + # compiler flags to mark the stack as non-executable. + set(ZSTD_HAS_NOEXECSTACK false) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW) #Not only UNIX but also WIN32 for MinGW # It's possible to select the exact standard used for compilation. # It's not necessary, but can be employed for specific purposes. # Note that zstd source code is compatible with both C++98 and above # and C-gnu90 (c90 + long long + variadic macros ) and above # EnableCompilerFlag("-std=c++11" false true) # Set C++ compilation to c++11 standard - # EnableCompilerFlag("-std=c99" true false) # Set C compiation to c99 standard + # EnableCompilerFlag("-std=c99" true false) # Set C compilation to c99 standard if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND MSVC) # clang-cl normally maps -Wall to -Weverything. - EnableCompilerFlag("/clang:-Wall" true true) + EnableCompilerFlag("/clang:-Wall" _C _CXX false) else () - EnableCompilerFlag("-Wall" true true) + EnableCompilerFlag("-Wall" _C _CXX false) endif () - EnableCompilerFlag("-Wextra" true true) - EnableCompilerFlag("-Wundef" true true) - EnableCompilerFlag("-Wshadow" true true) - EnableCompilerFlag("-Wcast-align" true true) - EnableCompilerFlag("-Wcast-qual" true true) - EnableCompilerFlag("-Wstrict-prototypes" true false) + EnableCompilerFlag("-Wextra" _C _CXX false) + EnableCompilerFlag("-Wundef" _C _CXX false) + EnableCompilerFlag("-Wshadow" _C _CXX false) + EnableCompilerFlag("-Wcast-align" _C _CXX false) + EnableCompilerFlag("-Wcast-qual" _C _CXX false) + EnableCompilerFlag("-Wstrict-prototypes" _C false false) # Enable asserts in Debug mode if (CMAKE_BUILD_TYPE MATCHES "Debug") - EnableCompilerFlag("-DDEBUGLEVEL=1" true true) + EnableCompilerFlag("-DDEBUGLEVEL=1" _C _CXX false) endif () + # Add noexecstack flags + # LDFLAGS + EnableCompilerFlag("-Wl,-z,noexecstack" false false _LD) + # CFLAGS & CXXFLAGS + EnableCompilerFlag("-Qunused-arguments" _C _CXX false) + EnableCompilerFlag("-Wa,--noexecstack" _C _CXX false) + # NOTE: Using 3 nested ifs because the variables are sometimes + # empty if the condition is false, and sometimes equal to false. + # This implicitly converts them to truthy values. There may be + # a better way to do this, but this reliably works. + if (${LD_FLAG_WL_Z_NOEXECSTACK}) + if (${C_FLAG_WA_NOEXECSTACK}) + if (${CXX_FLAG_WA_NOEXECSTACK}) + # We've succeeded in marking the stack as non-executable + set(ZSTD_HAS_NOEXECSTACK true) + endif() + endif() + endif() elseif (MSVC) # Add specific compilation flags for Windows Visual set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate multi-threaded compilation (/MP flag)") if (CMAKE_GENERATOR MATCHES "Visual Studio" AND ACTIVATE_MULTITHREADED_COMPILATION) - EnableCompilerFlag("/MP" true true) + EnableCompilerFlag("/MP" _C _CXX false) endif () # UNICODE SUPPORT - EnableCompilerFlag("/D_UNICODE" true true) - EnableCompilerFlag("/DUNICODE" true true) + EnableCompilerFlag("/D_UNICODE" _C _CXX false) + EnableCompilerFlag("/DUNICODE" _C _CXX false) # Enable asserts in Debug mode if (CMAKE_BUILD_TYPE MATCHES "Debug") - EnableCompilerFlag("/DDEBUGLEVEL=1" true true) + EnableCompilerFlag("/DDEBUGLEVEL=1" _C _CXX false) endif () endif () diff --git a/build/cmake/CMakeModules/ZstdBuild.cmake b/build/cmake/CMakeModules/ZstdBuild.cmake new file mode 100644 index 00000000000..ada44a966ba --- /dev/null +++ b/build/cmake/CMakeModules/ZstdBuild.cmake @@ -0,0 +1,42 @@ +# ################################################################ +# ZSTD Build Targets Configuration +# ################################################################ + +# Always build the library first (this defines ZSTD_BUILD_STATIC/SHARED options) +add_subdirectory(lib) + +# Validate build configuration after lib options are defined +if(ZSTD_BUILD_PROGRAMS) + if(NOT ZSTD_BUILD_STATIC AND NOT ZSTD_PROGRAMS_LINK_SHARED) + message(SEND_ERROR "Static library required to build zstd CLI programs") + elseif(NOT ZSTD_BUILD_SHARED AND ZSTD_PROGRAMS_LINK_SHARED) + message(SEND_ERROR "Shared library required to build zstd CLI programs") + endif() +endif() + +if(ZSTD_BUILD_TESTS AND NOT ZSTD_BUILD_STATIC) + message(SEND_ERROR "Static library required to build test suite") +endif() + +# Add programs if requested +if(ZSTD_BUILD_PROGRAMS) + add_subdirectory(programs) +endif() + +# Add tests if requested +if(ZSTD_BUILD_TESTS) + enable_testing() + add_subdirectory(tests) +endif() + +# Add contrib utilities if requested +if(ZSTD_BUILD_CONTRIB) + add_subdirectory(contrib) +endif() + +# Clean-all target for thorough cleanup +add_custom_target(clean-all + COMMAND ${CMAKE_BUILD_TOOL} clean + COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/ + COMMENT "Performing complete clean including build directory" +) diff --git a/build/cmake/CMakeModules/ZstdDependencies.cmake b/build/cmake/CMakeModules/ZstdDependencies.cmake new file mode 100644 index 00000000000..4e90c5f1b98 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdDependencies.cmake @@ -0,0 +1,30 @@ +# ################################################################ +# ZSTD Dependencies Configuration +# ################################################################ + +# Function to handle HP-UX thread configuration +function(setup_hpux_threads) + find_package(Threads) + if(NOT Threads_FOUND) + set(CMAKE_USE_PTHREADS_INIT 1 PARENT_SCOPE) + set(CMAKE_THREAD_LIBS_INIT -lpthread PARENT_SCOPE) + set(CMAKE_HAVE_THREADS_LIBRARY 1 PARENT_SCOPE) + set(Threads_FOUND TRUE PARENT_SCOPE) + endif() +endfunction() + +# Configure threading support +if(ZSTD_MULTITHREAD_SUPPORT AND UNIX) + if(CMAKE_SYSTEM_NAME MATCHES "HP-UX") + setup_hpux_threads() + else() + set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) + endif() + + if(CMAKE_USE_PTHREADS_INIT) + set(THREADS_LIBS "${CMAKE_THREAD_LIBS_INIT}") + else() + message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") + endif() +endif() diff --git a/build/cmake/CMakeModules/ZstdOptions.cmake b/build/cmake/CMakeModules/ZstdOptions.cmake new file mode 100644 index 00000000000..dc7813444e2 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdOptions.cmake @@ -0,0 +1,68 @@ +# ################################################################ +# ZSTD Build Options Configuration +# ################################################################ + +# Legacy support configuration (disabled by default) +option(ZSTD_LEGACY_SUPPORT "Enable legacy format support" OFF) + +if(ZSTD_LEGACY_SUPPORT) + message(STATUS "ZSTD_LEGACY_SUPPORT enabled") + set(ZSTD_LEGACY_LEVEL 5 CACHE STRING "Legacy support level") + add_definitions(-DZSTD_LEGACY_SUPPORT=${ZSTD_LEGACY_LEVEL}) +else() + message(STATUS "ZSTD_LEGACY_SUPPORT disabled") + add_definitions(-DZSTD_LEGACY_SUPPORT=0) +endif() + +# Platform-specific options +if(APPLE) + option(ZSTD_FRAMEWORK "Build as Apple Framework" OFF) +endif() + +# Android-specific configuration +if(ANDROID) + set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT OFF) + # Handle old Android API levels + if((NOT ANDROID_PLATFORM_LEVEL) OR (ANDROID_PLATFORM_LEVEL VERSION_LESS 24)) + message(STATUS "Configuring for old Android API - disabling fseeko/ftello") + add_compile_definitions(LIBC_NO_FSEEKO) + endif() +else() + set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT ON) +endif() + +# Multi-threading support +option(ZSTD_MULTITHREAD_SUPPORT "Enable multi-threading support" ${ZSTD_MULTITHREAD_SUPPORT_DEFAULT}) + +if(ZSTD_MULTITHREAD_SUPPORT) + message(STATUS "Multi-threading support enabled") +else() + message(STATUS "Multi-threading support disabled") +endif() + +# Build component options +option(ZSTD_BUILD_PROGRAMS "Build command-line programs" ON) +option(ZSTD_BUILD_CONTRIB "Build contrib utilities" OFF) +option(ZSTD_PROGRAMS_LINK_SHARED "Link programs against shared library" OFF) + +# Test configuration +if(BUILD_TESTING) + set(ZSTD_BUILD_TESTS_default ON) +else() + set(ZSTD_BUILD_TESTS_default OFF) +endif() +option(ZSTD_BUILD_TESTS "Build test suite" ${ZSTD_BUILD_TESTS_default}) + +# MSVC-specific options +if(MSVC) + option(ZSTD_USE_STATIC_RUNTIME "Link to static runtime libraries" OFF) +endif() + +# C++ support (needed for tests) +set(ZSTD_ENABLE_CXX ${ZSTD_BUILD_TESTS}) +if(ZSTD_ENABLE_CXX) + enable_language(CXX) +endif() + +# Set global definitions +add_definitions(-DXXH_NAMESPACE=ZSTD_) diff --git a/build/cmake/CMakeModules/ZstdPackage.cmake b/build/cmake/CMakeModules/ZstdPackage.cmake new file mode 100644 index 00000000000..ed20f3c9775 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdPackage.cmake @@ -0,0 +1,45 @@ +# ################################################################ +# ZSTD Package Configuration +# ################################################################ + +include(CMakePackageConfigHelpers) + +# Generate version file +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" + VERSION ${zstd_VERSION} + COMPATIBILITY SameMajorVersion +) + +# Configure package for installation +set(ConfigPackageLocation ${CMAKE_INSTALL_LIBDIR}/cmake/zstd) + +foreach(target_suffix IN ITEMS "_shared" "_static" "") + if(TARGET "libzstd${target_suffix}") + # Export targets for build directory + export(EXPORT "zstdExports${target_suffix}" + FILE "${CMAKE_CURRENT_BINARY_DIR}/zstdTargets${target_suffix}.cmake" + NAMESPACE zstd:: + ) + # Install exported targets + install(EXPORT "zstdExports${target_suffix}" + FILE "zstdTargets${target_suffix}.cmake" + NAMESPACE zstd:: + DESTINATION ${ConfigPackageLocation} + ) + endif() +endforeach() + +# Configure and install package config file +configure_package_config_file( + zstdConfig.cmake.in + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfig.cmake" + INSTALL_DESTINATION ${ConfigPackageLocation} +) + +# Install config files +install(FILES + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfig.cmake" + "${CMAKE_CURRENT_BINARY_DIR}/zstdConfigVersion.cmake" + DESTINATION ${ConfigPackageLocation} +) diff --git a/build/cmake/CMakeModules/ZstdVersion.cmake b/build/cmake/CMakeModules/ZstdVersion.cmake new file mode 100644 index 00000000000..fceb0ec0c23 --- /dev/null +++ b/build/cmake/CMakeModules/ZstdVersion.cmake @@ -0,0 +1,31 @@ +# ################################################################ +# ZSTD Version Configuration +# ################################################################ + +# Setup CMake policy version +set(ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION "3") +set(ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION "13") + +# Determine appropriate policy version +if("${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}" EQUAL "${CMAKE_MAJOR_VERSION}" AND + "${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}" GREATER "${CMAKE_MINOR_VERSION}") + set(ZSTD_CMAKE_POLICY_VERSION "${CMAKE_VERSION}") +else() + set(ZSTD_CMAKE_POLICY_VERSION "${ZSTD_MAX_VALIDATED_CMAKE_MAJOR_VERSION}.${ZSTD_MAX_VALIDATED_CMAKE_MINOR_VERSION}.0") +endif() + +cmake_policy(VERSION ${ZSTD_CMAKE_POLICY_VERSION}) + +# Parse version from header file +include(GetZstdLibraryVersion) +GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h zstd_VERSION_MAJOR zstd_VERSION_MINOR zstd_VERSION_PATCH) + +# Set version variables +set(ZSTD_SHORT_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}") +set(ZSTD_FULL_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") + +# Project metadata +set(zstd_HOMEPAGE_URL "https://facebook.github.io/zstd") +set(zstd_DESCRIPTION "Zstandard is a real-time compression algorithm, providing high compression ratios.") + +message(STATUS "ZSTD VERSION: ${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}") diff --git a/build/cmake/README.md b/build/cmake/README.md index a460dd16187..41c8bbb1a65 100644 --- a/build/cmake/README.md +++ b/build/cmake/README.md @@ -7,6 +7,15 @@ variables. ## How to build +You can configure the project from the repository root thanks to the forwarding +`CMakeLists.txt`: +```sh +cmake -S . -B build-cmake +cmake --build build-cmake +``` +The historical workflow that starts configuration from `build/cmake` continues +to work as described below. + As cmake doesn't support command like `cmake clean`, it's recommended to perform an "out of source build". To do this, you can create a new directory and build in it: ```sh @@ -41,6 +50,48 @@ cmake -DZSTD_BUILD_TESTS=ON -DZSTD_LEGACY_SUPPORT=OFF .. make ``` +**Apple Frameworks** +It's generally recommended to have CMake with versions higher than 3.14 for [iOS-derived platforms](https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html#id27). +```sh +cmake -S. -B build-cmake -DZSTD_FRAMEWORK=ON -DCMAKE_SYSTEM_NAME=iOS +``` +Or you can utilize [iOS-CMake](https://github.com/leetal/ios-cmake) toolchain for CMake versions lower than 3.14 +```sh +cmake -B build -G Xcode -DCMAKE_TOOLCHAIN_FILE= -DPLATFORM=OS64 -DZSTD_FRAMEWORK=ON +``` + +### how to use it with CMake FetchContent + +For all options available, you can see it on +```cmake +include(FetchContent) + +set(ZSTD_BUILD_STATIC ON) +set(ZSTD_BUILD_SHARED OFF) + +FetchContent_Declare( + zstd + URL "https://github.com/facebook/zstd/releases/download/v1.5.5/zstd-1.5.5.tar.gz" + DOWNLOAD_EXTRACT_TIMESTAMP TRUE + SOURCE_SUBDIR build/cmake +) + +FetchContent_MakeAvailable(zstd) + +target_link_libraries( + ${PROJECT_NAME} + PRIVATE + libzstd_static +) + +# On windows and macos this is needed +target_include_directories( + ${PROJECT_NAME} + PRIVATE + ${zstd_SOURCE_DIR}/lib +) +``` + ### referring [Looking for a 'cmake clean' command to clear up CMake output](https://stackoverflow.com/questions/9680420/looking-for-a-cmake-clean-command-to-clear-up-cmake-output) diff --git a/build/cmake/contrib/CMakeLists.txt b/build/cmake/contrib/CMakeLists.txt index f7631d08ca6..8df2a17b3a8 100644 --- a/build/cmake/contrib/CMakeLists.txt +++ b/build/cmake/contrib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/gen_html/CMakeLists.txt b/build/cmake/contrib/gen_html/CMakeLists.txt index 8fdd61131ea..d1ff6c64bba 100644 --- a/build/cmake/contrib/gen_html/CMakeLists.txt +++ b/build/cmake/contrib/gen_html/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/build/cmake/contrib/pzstd/CMakeLists.txt b/build/cmake/contrib/pzstd/CMakeLists.txt index 27af86c88dd..e1c8e0672fe 100644 --- a/build/cmake/contrib/pzstd/CMakeLists.txt +++ b/build/cmake/contrib/pzstd/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -18,6 +18,7 @@ set(PZSTD_DIR ${ZSTD_SOURCE_DIR}/contrib/pzstd) include_directories(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${PZSTD_DIR}) add_executable(pzstd ${PROGRAMS_DIR}/util.c ${PZSTD_DIR}/main.cpp ${PZSTD_DIR}/Options.cpp ${PZSTD_DIR}/Pzstd.cpp ${PZSTD_DIR}/SkippableFrame.cpp) +target_compile_features(pzstd PRIVATE cxx_std_11) set_property(TARGET pzstd APPEND PROPERTY COMPILE_DEFINITIONS "NDEBUG") set_property(TARGET pzstd APPEND PROPERTY COMPILE_OPTIONS "-Wno-shadow") diff --git a/build/cmake/lib/CMakeLists.txt b/build/cmake/lib/CMakeLists.txt index cf7af0f8cd6..82f4f50869c 100644 --- a/build/cmake/lib/CMakeLists.txt +++ b/build/cmake/lib/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -7,50 +7,76 @@ # in the COPYING file in the root directory of this source tree). # ################################################################ -project(libzstd C ASM) +project(libzstd C) # ASM language is conditionally enabled where supported set(CMAKE_INCLUDE_CURRENT_DIR TRUE) option(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" ON) option(ZSTD_BUILD_SHARED "BUILD SHARED LIBRARIES" ON) +option(ZSTD_BUILD_COMPRESSION "BUILD COMPRESSION MODULE" ON) +option(ZSTD_BUILD_DECOMPRESSION "BUILD DECOMPRESSION MODULE" ON) +option(ZSTD_BUILD_DICTBUILDER "BUILD DICTBUILDER MODULE" ON) +option(ZSTD_BUILD_DEPRECATED "BUILD DEPRECATED MODULE" OFF) + +set(ZSTDLIB_VISIBLE "" CACHE STRING "Visibility for ZSTDLIB API") +set(ZSTDERRORLIB_VISIBLE "" CACHE STRING "Visibility for ZSTDERRORLIB_VISIBLE API") +set(ZDICTLIB_VISIBLE "" CACHE STRING "Visibility for ZDICTLIB_VISIBLE API") +set(ZSTDLIB_STATIC_API "" CACHE STRING "Visibility for ZSTDLIB_STATIC_API API") +set(ZDICTLIB_STATIC_API "" CACHE STRING "Visibility for ZDICTLIB_STATIC_API API") + +set_property(CACHE ZSTDLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZSTDERRORLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZDICTLIB_VISIBLE PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZSTDLIB_STATIC_API PROPERTY STRINGS "" "hidden" "default" "protected" "internal") +set_property(CACHE ZDICTLIB_STATIC_API PROPERTY STRINGS "" "hidden" "default" "protected" "internal") if(NOT ZSTD_BUILD_SHARED AND NOT ZSTD_BUILD_STATIC) message(SEND_ERROR "You need to build at least one flavor of libzstd") endif() -# Define library directory, where sources and header files are located -include_directories(${LIBRARY_DIR} ${LIBRARY_DIR}/common) - file(GLOB CommonSources ${LIBRARY_DIR}/common/*.c) file(GLOB CompressSources ${LIBRARY_DIR}/compress/*.c) +file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c) if (MSVC) - file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c) add_compile_options(-DZSTD_DISABLE_ASM) else () - file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c ${LIBRARY_DIR}/decompress/*.S) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|AMD64.*|x86_64.*|X86_64.*" AND ${ZSTD_HAS_NOEXECSTACK}) + enable_language(ASM) + set(DecompressSources ${DecompressSources} ${LIBRARY_DIR}/decompress/huf_decompress_amd64.S) + else() + add_compile_options(-DZSTD_DISABLE_ASM) + endif() endif () file(GLOB DictBuilderSources ${LIBRARY_DIR}/dictBuilder/*.c) +file(GLOB DeprecatedSources ${LIBRARY_DIR}/deprecated/*.c) -set(Sources - ${CommonSources} - ${CompressSources} - ${DecompressSources} - ${DictBuilderSources}) - +file(GLOB PublicHeaders ${LIBRARY_DIR}/*.h) file(GLOB CommonHeaders ${LIBRARY_DIR}/common/*.h) file(GLOB CompressHeaders ${LIBRARY_DIR}/compress/*.h) file(GLOB DecompressHeaders ${LIBRARY_DIR}/decompress/*.h) file(GLOB DictBuilderHeaders ${LIBRARY_DIR}/dictBuilder/*.h) +file(GLOB DeprecatedHeaders ${LIBRARY_DIR}/deprecated/*.h) -set(Headers - ${LIBRARY_DIR}/zstd.h - ${CommonHeaders} - ${CompressHeaders} - ${DecompressHeaders} - ${DictBuilderHeaders}) +set(Sources ${CommonSources}) +set(Headers ${PublicHeaders} ${CommonHeaders}) +if (ZSTD_BUILD_COMPRESSION) + set(Sources ${Sources} ${CompressSources}) + set(Headers ${Headers} ${CompressHeaders}) +endif() +if (ZSTD_BUILD_DECOMPRESSION) + set(Sources ${Sources} ${DecompressSources}) + set(Headers ${Headers} ${DecompressHeaders}) +endif() +if (ZSTD_BUILD_DICTBUILDER) + set(Sources ${Sources} ${DictBuilderSources}) + set(Headers ${Headers} ${DictBuilderHeaders}) +endif() +if (ZSTD_BUILD_DEPRECATED) + set(Sources ${Sources} ${DeprecatedSources}) + set(Headers ${Headers} ${DeprecatedHeaders}) +endif() if (ZSTD_LEGACY_SUPPORT) set(LIBRARY_LEGACY_DIR ${LIBRARY_DIR}/legacy) - include_directories(${LIBRARY_LEGACY_DIR}) set(Sources ${Sources} ${LIBRARY_LEGACY_DIR}/zstd_v01.c @@ -72,32 +98,84 @@ if (ZSTD_LEGACY_SUPPORT) ${LIBRARY_LEGACY_DIR}/zstd_v07.h) endif () -if (MSVC) +if (MSVC AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")) set(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/libzstd-dll) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/libzstd-dll.rc) +else() + set(PlatformDependResources) endif () +# Explicitly set the language to C for all files, including ASM files. +# Our assembly expects to be compiled by a C compiler, and is only enabled for +# __GNUC__ compatible compilers. Otherwise all the ASM code is disabled by +# macros. +if(NOT CMAKE_ASM_COMPILER STREQUAL CMAKE_C_COMPILER) + set_source_files_properties(${Sources} PROPERTIES LANGUAGE C) +endif() + +macro (add_definition target var) + if (NOT ("${${var}}" STREQUAL "")) + target_compile_definitions(${target} PUBLIC "${var}=__attribute__((visibility(\"${${var}}\")))") + endif () +endmacro () + +# Define directories containing the library's public headers +set(PUBLIC_INCLUDE_DIRS ${LIBRARY_DIR}) +set(CMAKE_RC_FLAGS "${CMAKE_RC_FLAGS} /I \"${LIBRARY_DIR}\"") # Split project to static and shared libraries build -set(library_targets) if (ZSTD_BUILD_SHARED) add_library(libzstd_shared SHARED ${Sources} ${Headers} ${PlatformDependResources}) - list(APPEND library_targets libzstd_shared) + target_include_directories(libzstd_shared INTERFACE $) if (ZSTD_MULTITHREAD_SUPPORT) - set_property(TARGET libzstd_shared APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") + target_compile_definitions(libzstd_shared PUBLIC ZSTD_MULTITHREAD) if (UNIX) target_link_libraries(libzstd_shared ${THREADS_LIBS}) endif () - endif() + endif () + add_definition(libzstd_shared ZSTDLIB_VISIBLE) + add_definition(libzstd_shared ZSTDERRORLIB_VISIBLE) + add_definition(libzstd_shared ZDICTLIB_VISIBLE) endif () if (ZSTD_BUILD_STATIC) add_library(libzstd_static STATIC ${Sources} ${Headers}) - list(APPEND library_targets libzstd_static) + target_include_directories(libzstd_static INTERFACE $) if (ZSTD_MULTITHREAD_SUPPORT) - set_property(TARGET libzstd_static APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") + target_compile_definitions(libzstd_static PUBLIC ZSTD_MULTITHREAD) if (UNIX) target_link_libraries(libzstd_static ${THREADS_LIBS}) endif () endif () + add_definition(libzstd_static ZSTDLIB_VISIBLE) + add_definition(libzstd_static ZSTDERRORLIB_VISIBLE) + add_definition(libzstd_static ZDICTLIB_VISIBLE) + add_definition(libzstd_static ZSTDLIB_STATIC_API) + add_definition(libzstd_static ZDICTLIB_STATIC_API) +endif () +if (ZSTD_BUILD_SHARED AND NOT ZSTD_BUILD_STATIC) + if (NOT BUILD_SHARED_LIBS) + message(WARNING "BUILD_SHARED_LIBS is OFF, but ZSTD_BUILD_SHARED is ON and ZSTD_BUILD_STATIC is OFF, which takes precedence, so libzstd is a shared library") + endif () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_shared) +endif () +if (ZSTD_BUILD_STATIC AND NOT ZSTD_BUILD_SHARED) + if (BUILD_SHARED_LIBS) + message(WARNING "BUILD_SHARED_LIBS is ON, but ZSTD_BUILD_SHARED is OFF and ZSTD_BUILD_STATIC is ON, which takes precedence, is set so libzstd is a static library") + endif () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_static) +endif () +if (ZSTD_BUILD_SHARED AND ZSTD_BUILD_STATIC) + # If both ZSTD_BUILD_SHARED and ZSTD_BUILD_STATIC are set, which is the + # default, fallback to using BUILD_SHARED_LIBS to determine whether to + # set libzstd to static or shared. + if (BUILD_SHARED_LIBS) + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_shared) + else () + add_library(libzstd INTERFACE) + target_link_libraries(libzstd INTERFACE libzstd_static) + endif () endif () # Add specific compile definitions for MSVC project @@ -123,8 +201,28 @@ if (ZSTD_BUILD_SHARED) libzstd_shared PROPERTIES OUTPUT_NAME zstd - VERSION ${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH} + VERSION ${ZSTD_FULL_VERSION} SOVERSION ${zstd_VERSION_MAJOR}) + + if (ZSTD_FRAMEWORK) + set_target_properties( + libzstd_shared + PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION "${ZSTD_FULL_VERSION}" + PRODUCT_BUNDLE_IDENTIFIER "github.com/facebook/zstd" + XCODE_ATTRIBUTE_INSTALL_PATH "@rpath" + PUBLIC_HEADER "${PublicHeaders}" + OUTPUT_NAME "zstd" + XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "" + XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO" + XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED "NO" + MACOSX_FRAMEWORK_IDENTIFIER "github.com/facebook/zstd" + MACOSX_FRAMEWORK_BUNDLE_VERSION "${ZSTD_FULL_VERSION}" + MACOSX_FRAMEWORK_SHORT_VERSION_STRING "${ZSTD_SHORT_VERSION}" + MACOSX_RPATH TRUE + RESOURCE ${PublicHeaders}) + endif () endif () if (ZSTD_BUILD_STATIC) @@ -133,6 +231,26 @@ if (ZSTD_BUILD_STATIC) PROPERTIES POSITION_INDEPENDENT_CODE On OUTPUT_NAME ${STATIC_LIBRARY_BASE_NAME}) + + if (ZSTD_FRAMEWORK) + set_target_properties( + libzstd_static + PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION "${ZSTD_FULL_VERSION}" + PRODUCT_BUNDLE_IDENTIFIER "github.com/facebook/zstd/${STATIC_LIBRARY_BASE_NAME}" + XCODE_ATTRIBUTE_INSTALL_PATH "@rpath" + PUBLIC_HEADER "${PublicHeaders}" + OUTPUT_NAME "${STATIC_LIBRARY_BASE_NAME}" + XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "" + XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED "NO" + XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED "NO" + MACOSX_FRAMEWORK_IDENTIFIER "github.com/facebook/zstd/${STATIC_LIBRARY_BASE_NAME}" + MACOSX_FRAMEWORK_BUNDLE_VERSION "${ZSTD_FULL_VERSION}" + MACOSX_FRAMEWORK_SHORT_VERSION_STRING "${ZSTD_SHORT_VERSION}" + MACOSX_RPATH TRUE + RESOURCE ${PublicHeaders}) + endif () endif () # pkg-config @@ -148,20 +266,22 @@ configure_file("${LIBRARY_DIR}/libzstd.pc.in" "${CMAKE_CURRENT_BINARY_DIR}/libzs install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") # install target -install(FILES - "${LIBRARY_DIR}/zstd.h" - "${LIBRARY_DIR}/zdict.h" - "${LIBRARY_DIR}/zstd_errors.h" - DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") - -install(TARGETS ${library_targets} - EXPORT zstdExports - INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" - ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" - LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" - BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}" - ) +install(FILES ${PublicHeaders} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") + +foreach(target_suffix IN ITEMS "_shared" "_static" "") + if(TARGET "libzstd${target_suffix}") + install(TARGETS "libzstd${target_suffix}" + EXPORT "zstdExports${target_suffix}" + INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}" + FRAMEWORK DESTINATION "${CMAKE_INSTALL_LIBDIR}" COMPONENT runtime OPTIONAL + PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ) + endif() +endforeach() # uninstall target if (NOT TARGET uninstall) diff --git a/build/cmake/programs/CMakeLists.txt b/build/cmake/programs/CMakeLists.txt index 28b1e1d166b..5e239e32a3d 100644 --- a/build/cmake/programs/CMakeLists.txt +++ b/build/cmake/programs/CMakeLists.txt @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -32,7 +32,12 @@ if (MSVC) set(PlatformDependResources ${MSVC_RESOURCE_DIR}/zstd.rc) endif () -add_executable(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PROGRAMS_DIR}/zstdcli_trace.c ${PlatformDependResources}) +file(GLOB ZSTD_PROGRAM_SRCS "${PROGRAMS_DIR}/*.c") +if (MSVC AND ZSTD_PROGRAMS_LINK_SHARED) + list(APPEND ZSTD_PROGRAM_SRCS ${LIBRARY_DIR}/common/pool.c ${LIBRARY_DIR}/common/threading.c) +endif () + +add_executable(zstd ${ZSTD_PROGRAM_SRCS}) target_link_libraries(zstd ${PROGRAMS_ZSTD_LINK_TARGET}) if (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)") target_link_libraries(zstd rt) @@ -75,7 +80,9 @@ if (UNIX) ${CMAKE_CURRENT_BINARY_DIR}/zstdless.1 DESTINATION "${MAN_INSTALL_DIR}") - add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c) + add_executable(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c + ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c + ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/fileio_asyncio.c) target_link_libraries(zstd-frugal ${PROGRAMS_ZSTD_LINK_TARGET}) set_property(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT;ZSTD_NOTRACE") endif () diff --git a/build/cmake/tests/CMakeLists.txt b/build/cmake/tests/CMakeLists.txt index 6c1ea298a1c..56104a4e341 100644 --- a/build/cmake/tests/CMakeLists.txt +++ b/build/cmake/tests/CMakeLists.txt @@ -1,6 +1,6 @@ # ################################################################ # zstd - Makefile -# Copyright (C) Yann Collet 2014-present +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # BSD license @@ -27,7 +27,7 @@ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # You can contact the author at : -# - zstd homepage : http://www.zstd.net/ +# - zstd homepage : https://facebook.github.io/zstd/ # ################################################################ project(tests) @@ -50,18 +50,18 @@ set(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) set(TESTS_DIR ${ZSTD_SOURCE_DIR}/tests) include_directories(${TESTS_DIR} ${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder) -add_executable(datagen ${PROGRAMS_DIR}/datagen.c ${TESTS_DIR}/datagencli.c) +add_executable(datagen ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${TESTS_DIR}/loremOut.c ${TESTS_DIR}/datagencli.c) target_link_libraries(datagen libzstd_static) # # fullbench # -add_executable(fullbench ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${TESTS_DIR}/fullbench.c) +add_executable(fullbench ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${TESTS_DIR}/fullbench.c) if (NOT MSVC) target_compile_options(fullbench PRIVATE "-Wno-deprecated-declarations") endif() target_link_libraries(fullbench libzstd_static) -add_test(NAME fullbench COMMAND fullbench ${ZSTD_FULLBENCH_FLAGS}) +add_test(NAME fullbench COMMAND "$" ${ZSTD_FULLBENCH_FLAGS}) # # fuzzer @@ -73,7 +73,7 @@ endif() target_link_libraries(fuzzer libzstd_static) AddTestFlagsOption(ZSTD_FUZZER_FLAGS "$ENV{FUZZERTEST} $ENV{FUZZER_FLAGS}" "Semicolon-separated list of flags to pass to the fuzzer test (see `fuzzer -h` for usage)") -add_test(NAME fuzzer COMMAND fuzzer ${ZSTD_FUZZER_FLAGS}) +add_test(NAME fuzzer COMMAND "$" ${ZSTD_FUZZER_FLAGS}) # Disable the timeout since the run time is too long for the default timeout of # 1500 seconds and varies considerably between low-end and high-end CPUs. # set_tests_properties(fuzzer PROPERTIES TIMEOUT 0) @@ -81,14 +81,14 @@ add_test(NAME fuzzer COMMAND fuzzer ${ZSTD_FUZZER_FLAGS}) # # zstreamtest # -add_executable(zstreamtest ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/seqgen.c ${TESTS_DIR}/zstreamtest.c) +add_executable(zstreamtest ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/seqgen.c ${TESTS_DIR}/zstreamtest.c ${TESTS_DIR}/external_matchfinder.c) if (NOT MSVC) target_compile_options(zstreamtest PRIVATE "-Wno-deprecated-declarations") endif() target_link_libraries(zstreamtest libzstd_static) AddTestFlagsOption(ZSTD_ZSTREAM_FLAGS "$ENV{ZSTREAM_TESTTIME} $ENV{FUZZER_FLAGS}" "Semicolon-separated list of flags to pass to the zstreamtest test (see `zstreamtest -h` for usage)") -add_test(NAME zstreamtest COMMAND zstreamtest ${ZSTD_ZSTREAM_FLAGS}) +add_test(NAME zstreamtest COMMAND "$" ${ZSTD_ZSTREAM_FLAGS}) # # playTests.sh @@ -96,20 +96,21 @@ add_test(NAME zstreamtest COMMAND zstreamtest ${ZSTD_ZSTREAM_FLAGS}) AddTestFlagsOption(ZSTD_PLAYTESTS_FLAGS "$ENV{PLAYTESTS_FLAGS}" "Semicolon-separated list of flags to pass to the playTests.sh test") add_test(NAME playTests COMMAND sh -c "\"${TESTS_DIR}/playTests.sh\" ${ZSTD_PLAYTESTS_FLAGS}") -if (ZSTD_BUILD_PROGRAMS) +find_program(UNAME uname) # Run script only in unix shell environments +if (ZSTD_BUILD_PROGRAMS AND UNAME) set_property(TEST playTests APPEND PROPERTY ENVIRONMENT "ZSTD_BIN=$" "DATAGEN_BIN=$" ) else() - message(STATUS "Disabling playTests.sh test because ZSTD_BUILD_PROGRAMS is not enabled") + message(STATUS "Disabling playTests.sh test because requirements not met") set_tests_properties(playTests PROPERTIES DISABLED YES) endif() # Label the "Medium" set of tests (see TESTING.md) set_property(TEST fuzzer zstreamtest playTests APPEND PROPERTY LABELS Medium) -add_executable(paramgrill ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/paramgrill.c) +add_executable(paramgrill ${PROGRAMS_DIR}/benchfn.c ${PROGRAMS_DIR}/benchzstd.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/lorem.c ${PROGRAMS_DIR}/util.c ${PROGRAMS_DIR}/timefn.c ${TESTS_DIR}/paramgrill.c) if (UNIX) target_link_libraries(paramgrill libzstd_static m) #m is math library else() diff --git a/build/cmake/zstdConfig.cmake b/build/cmake/zstdConfig.cmake deleted file mode 100644 index ebbfcc38f6f..00000000000 --- a/build/cmake/zstdConfig.cmake +++ /dev/null @@ -1 +0,0 @@ -include("${CMAKE_CURRENT_LIST_DIR}/zstdTargets.cmake") diff --git a/build/cmake/zstdConfig.cmake.in b/build/cmake/zstdConfig.cmake.in new file mode 100644 index 00000000000..a719d789973 --- /dev/null +++ b/build/cmake/zstdConfig.cmake.in @@ -0,0 +1,13 @@ +@PACKAGE_INIT@ + +include(CMakeFindDependencyMacro) +if(@ZSTD_MULTITHREAD_SUPPORT@ AND "@UNIX@") + find_dependency(Threads) +endif() + +foreach(lib_suffix IN ITEMS "_shared" "_static") + include("${CMAKE_CURRENT_LIST_DIR}/zstdTargets${lib_suffix}.cmake" OPTIONAL) +endforeach() +include("${CMAKE_CURRENT_LIST_DIR}/zstdTargets.cmake") + +check_required_components("zstd") diff --git a/build/meson/contrib/pzstd/meson.build b/build/meson/contrib/pzstd/meson.build index 2c47999fa27..c3ee3d60a0e 100644 --- a/build/meson/contrib/pzstd/meson.build +++ b/build/meson/contrib/pzstd/meson.build @@ -18,7 +18,8 @@ pzstd_sources = [join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'contrib/pzstd/SkippableFrame.cpp')] pzstd = executable('pzstd', pzstd_sources, - cpp_args: [ '-DNDEBUG', '-Wno-shadow', '-Wno-deprecated-declarations' ], + cpp_args: pzstd_warning_flags, include_directories: pzstd_includes, - dependencies: [ libzstd_dep, thread_dep ], + dependencies: [ libzstd_internal_dep, thread_dep ], + override_options: ['b_ndebug=true'], install: true) diff --git a/build/meson/lib/meson.build b/build/meson/lib/meson.build index 467007491f5..81fc313f954 100644 --- a/build/meson/lib/meson.build +++ b/build/meson/lib/meson.build @@ -30,6 +30,7 @@ libzstd_sources = [join_paths(zstd_rootdir, 'lib/common/entropy_common.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_compress_literals.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_compress_sequences.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_compress_superblock.c'), + join_paths(zstd_rootdir, 'lib/compress/zstd_preSplit.c'), join_paths(zstd_rootdir, 'lib/compress/zstdmt_compress.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_fast.c'), join_paths(zstd_rootdir, 'lib/compress/zstd_double_fast.c'), @@ -124,7 +125,11 @@ libzstd = library('zstd', version: zstd_libversion) libzstd_dep = declare_dependency(link_with: libzstd, - include_directories: libzstd_includes) + include_directories: join_paths(zstd_rootdir,'lib')) # Do not expose private headers + +if meson.version().version_compare('>=0.54.0') + meson.override_dependency('libzstd', libzstd_dep) +endif # we link to both: # - the shared library (for public symbols) @@ -134,7 +139,8 @@ libzstd_dep = declare_dependency(link_with: libzstd, # -fvisibility=hidden means those cannot be found if get_option('default_library') == 'static' libzstd_static = libzstd - libzstd_internal_dep = libzstd_dep + libzstd_internal_dep = declare_dependency(link_with: libzstd, + include_directories: libzstd_includes) else if get_option('default_library') == 'shared' libzstd_static = static_library('zstd_objlib', @@ -147,11 +153,13 @@ else if cc_id == compiler_msvc # msvc does not actually support linking to both, but errors out with: # error LNK2005: ZSTD_ already defined in zstd.lib(zstd-1.dll) - libzstd_internal_dep = declare_dependency(link_with: libzstd_static) + libzstd_internal_dep = declare_dependency(link_with: libzstd_static, + include_directories: libzstd_includes) else libzstd_internal_dep = declare_dependency(link_with: libzstd, # the static library must be linked after the shared one - dependencies: declare_dependency(link_with: libzstd_static)) + dependencies: declare_dependency(link_with: libzstd_static), + include_directories: libzstd_includes) endif endif @@ -160,7 +168,7 @@ pkgconfig.generate(libzstd, filebase: 'libzstd', description: 'fast lossless compression algorithm library', version: zstd_libversion, - url: 'http://www.zstd.net/') + url: 'https://facebook.github.io/zstd/') install_headers(join_paths(zstd_rootdir, 'lib/zstd.h'), join_paths(zstd_rootdir, 'lib/zdict.h'), diff --git a/build/meson/meson.build b/build/meson/meson.build index f264760a34c..a9768d43a53 100644 --- a/build/meson/meson.build +++ b/build/meson/meson.build @@ -10,7 +10,7 @@ project('zstd', ['c', 'cpp'], - license: ['BSD', 'GPLv2'], + license: 'BSD-3-Clause OR GPL-2.0-only', default_options : [ # There shouldn't be any need to force a C standard convention for zstd # but in case one would want that anyway, this can be done here. @@ -23,8 +23,10 @@ project('zstd', # so this isn't safe #'werror=true' ], - version: 'DUMMY', - meson_version: '>=0.48.0') + version: run_command( + find_program('GetZstdLibraryVersion.py'), '../../lib/zstd.h', + check: true).stdout().strip(), + meson_version: '>=0.50.0') cc = meson.get_compiler('c') cxx = meson.get_compiler('cpp') @@ -45,16 +47,6 @@ compiler_msvc = 'msvc' zstd_version = meson.project_version() -zstd_h_file = join_paths(meson.current_source_dir(), '../../lib/zstd.h') -GetZstdLibraryVersion_py = find_program('GetZstdLibraryVersion.py', native : true) -r = run_command(GetZstdLibraryVersion_py, zstd_h_file) -if r.returncode() == 0 - zstd_version = r.stdout().strip() - message('Project version is now: @0@'.format(zstd_version)) -else - error('Cannot find project version in @0@'.format(zstd_h_file)) -endif - zstd_libversion = zstd_version # ============================================================================= @@ -73,7 +65,6 @@ zstd_docdir = join_paths(zstd_datadir, 'doc', meson.project_name()) # Built-in options use_debug = get_option('debug') -buildtype = get_option('buildtype') default_library_type = get_option('default_library') # Custom options @@ -96,8 +87,13 @@ feature_lz4 = get_option('lz4') # ============================================================================= libm_dep = cc.find_library('m', required: false) -thread_dep = dependency('threads', required: feature_multi_thread) -use_multi_thread = thread_dep.found() +if host_machine_os == os_windows + thread_dep = dependency('', required: false) + use_multi_thread = not feature_multi_thread.disabled() +else + thread_dep = dependency('threads', required: feature_multi_thread) + use_multi_thread = thread_dep.found() +endif # Arguments in dependency should be equivalent to those passed to pkg-config zlib_dep = dependency('zlib', required: feature_zlib) use_zlib = zlib_dep.found() @@ -112,15 +108,23 @@ use_lz4 = lz4_dep.found() add_project_arguments('-DXXH_NAMESPACE=ZSTD_', language: ['c']) +pzstd_warning_flags = [] if [compiler_gcc, compiler_clang].contains(cc_id) common_warning_flags = [ '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual' ] + pzstd_warning_flags = ['-Wno-shadow', '-Wno-deprecated-declarations'] if cc_id == compiler_clang common_warning_flags += ['-Wconversion', '-Wno-sign-conversion', '-Wdocumentation'] endif - cc_compile_flags = cc.get_supported_arguments(common_warning_flags + ['-Wstrict-prototypes']) - cxx_compile_flags = cxx.get_supported_arguments(common_warning_flags) + noexecstack_flags = ['-Wa,--noexecstack' ] + noexecstack_link_flags = ['-Wl,-z,noexecstack'] + cc_compile_flags = cc.get_supported_arguments(common_warning_flags + noexecstack_flags + ['-Wstrict-prototypes']) + cxx_compile_flags = cxx.get_supported_arguments(common_warning_flags + noexecstack_flags) add_project_arguments(cc_compile_flags, language : 'c') add_project_arguments(cxx_compile_flags, language : 'cpp') + cc_link_flags = cc.get_supported_link_arguments(noexecstack_link_flags) + cxx_link_flags = cxx.get_supported_link_arguments(noexecstack_link_flags) + add_project_link_arguments(cc_link_flags, language: 'c') + add_project_link_arguments(cxx_link_flags, language: 'cpp') elif cc_id == compiler_msvc msvc_compile_flags = [ '/D_UNICODE', '/DUNICODE' ] if use_multi_thread @@ -138,7 +142,7 @@ endif subdir('lib') -if bin_programs +if bin_programs or bin_tests subdir('programs') endif diff --git a/build/meson/meson_options.txt b/build/meson/meson_options.txt index accf3fa1035..8d7b8856ec6 100644 --- a/build/meson/meson_options.txt +++ b/build/meson/meson_options.txt @@ -10,11 +10,11 @@ # Read guidelines from https://wiki.gnome.org/Initiatives/GnomeGoals/MesonPorting -option('legacy_level', type: 'integer', min: 0, max: 7, value: 5, - description: 'Support any legacy format: 7 to 1 for v0.7+ to v0.1+') +option('legacy_level', type: 'integer', min: 0, max: 7, value: 0, + description: 'Support legacy format: 0=disabled, 1-7=support v0.1+ to v0.7+') option('debug_level', type: 'integer', min: 0, max: 9, value: 1, description: 'Enable run-time debug. See lib/common/debug.h') -option('backtrace', type: 'boolean', value: false, +option('backtrace', type: 'feature', value: 'disabled', description: 'Display a stack backtrace when execution generates a runtime exception') option('static_runtime', type: 'boolean', value: false, description: 'Link to static run-time libraries on MSVC') @@ -27,7 +27,7 @@ option('bin_contrib', type: 'boolean', value: false, description: 'Enable contrib build') option('multi_thread', type: 'feature', value: 'enabled', - description: 'Enable multi-threading when pthread is detected') + description: 'Enable multi-threading when pthread or Windows is detected') option('zlib', type: 'feature', value: 'auto', description: 'Enable zlib support') option('lzma', type: 'feature', value: 'auto', diff --git a/build/meson/programs/meson.build b/build/meson/programs/meson.build index 8df3a5dcbdd..e103a629539 100644 --- a/build/meson/programs/meson.build +++ b/build/meson/programs/meson.build @@ -18,6 +18,7 @@ zstd_programs_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/benchfn.c'), join_paths(zstd_rootdir, 'programs/benchzstd.c'), join_paths(zstd_rootdir, 'programs/datagen.c'), + join_paths(zstd_rootdir, 'programs/lorem.c'), join_paths(zstd_rootdir, 'programs/dibio.c'), join_paths(zstd_rootdir, 'programs/zstdcli_trace.c')] @@ -51,7 +52,8 @@ endif export_dynamic_on_windows = false # explicit backtrace enable/disable for Linux & Darwin -if not use_backtrace +have_execinfo = cc.has_header('execinfo.h', required: use_backtrace) +if not have_execinfo zstd_c_args += '-DBACKTRACE_ENABLE=0' elif use_debug and host_machine_os == os_windows # MinGW target zstd_c_args += '-DBACKTRACE_ENABLE=1' @@ -71,7 +73,14 @@ zstd = executable('zstd', c_args: zstd_c_args, dependencies: zstd_deps, export_dynamic: export_dynamic_on_windows, # Since Meson 0.45.0 - install: true) + build_by_default: bin_programs, + install: bin_programs) + +if not bin_programs + # we generate rules to build the programs, but don't install anything + # so do not continue to installing scripts and manpages + subdir_done() +endif zstd_frugal_sources = [join_paths(zstd_rootdir, 'programs/zstdcli.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), diff --git a/build/meson/tests/meson.build b/build/meson/tests/meson.build index 22f43209ad7..71ffc50693d 100644 --- a/build/meson/tests/meson.build +++ b/build/meson/tests/meson.build @@ -21,7 +21,6 @@ FUZZER_FLAGS = ['--no-big-tests'] FUZZERTEST = '-T200s' ZSTREAM_TESTTIME = '-T90s' DECODECORPUS_TESTTIME = '-T30' -ZSTDRTTEST = ['--test-large-data'] # ============================================================================= # Executables @@ -30,6 +29,7 @@ ZSTDRTTEST = ['--test-large-data'] test_includes = [ include_directories(join_paths(zstd_rootdir, 'programs')) ] testcommon_sources = [join_paths(zstd_rootdir, 'programs/datagen.c'), + join_paths(zstd_rootdir, 'programs/lorem.c'), join_paths(zstd_rootdir, 'programs/util.c'), join_paths(zstd_rootdir, 'programs/timefn.c'), join_paths(zstd_rootdir, 'programs/benchfn.c'), @@ -44,7 +44,8 @@ testcommon_dep = declare_dependency(link_with: testcommon, dependencies: libzstd_deps, include_directories: libzstd_includes) -datagen_sources = [join_paths(zstd_rootdir, 'tests/datagencli.c')] +datagen_sources = [join_paths(zstd_rootdir, 'tests/datagencli.c'), + join_paths(zstd_rootdir, 'tests/loremOut.c')] datagen = executable('datagen', datagen_sources, c_args: [ '-DNDEBUG' ], @@ -66,8 +67,10 @@ fuzzer = executable('fuzzer', dependencies: [ testcommon_dep, thread_dep ], install: false) -zstreamtest_sources = [join_paths(zstd_rootdir, 'tests/seqgen.c'), - join_paths(zstd_rootdir, 'tests/zstreamtest.c')] +zstreamtest_sources = [ + join_paths(zstd_rootdir, 'tests/seqgen.c'), + join_paths(zstd_rootdir, 'tests/zstreamtest.c'), + join_paths(zstd_rootdir, 'tests/external_matchfinder.c')] zstreamtest = executable('zstreamtest', zstreamtest_sources, include_directories: test_includes, @@ -90,7 +93,7 @@ roundTripCrash = executable('roundTripCrash', longmatch_sources = [join_paths(zstd_rootdir, 'tests/longmatch.c')] longmatch = executable('longmatch', longmatch_sources, - dependencies: [ libzstd_dep ], + dependencies: [ libzstd_internal_dep ], install: false) invalidDictionaries_sources = [join_paths(zstd_rootdir, 'tests/invalidDictionaries.c')] @@ -134,24 +137,38 @@ checkTag = executable('checkTag', # ============================================================================= if tests_supported_oses.contains(host_machine_os) - valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: true) + valgrind_prog = find_program('valgrind', ['/usr/bin/valgrind'], required: false) valgrindTest_py = files('valgrindTest.py') - test('valgrindTest', - valgrindTest_py, - args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], - depends: [zstd, datagen, fuzzer, fullbench], - timeout: 600) # Timeout should work on HDD drive + if valgrind_prog.found() + test('valgrindTest', + valgrindTest_py, + args: [valgrind_prog.path(), zstd, datagen, fuzzer, fullbench], + depends: [zstd, datagen, fuzzer, fullbench], + timeout: 600) # Timeout should work on HDD drive + endif endif if host_machine_os != os_windows playTests_sh = find_program(join_paths(zstd_rootdir, 'tests/playTests.sh'), required: true) - test('test-zstd', - playTests_sh, - args: ZSTDRTTEST, - env: ['ZSTD_BIN=' + zstd.full_path(), 'DATAGEN_BIN=./datagen'], - depends: [datagen], - workdir: meson.current_build_dir(), - timeout: 2800) # Timeout should work on HDD drive + + # add slow tests only if the meson version is new enough to support + # test setups with default-excluded suites + if meson.version().version_compare('>=0.57.0') + matrix = {'fast': [], 'slow': ['--test-large-data']} + else + matrix = {'fast': []} + endif + + foreach suite, opt: matrix + test('test-zstd-'+suite, + playTests_sh, + args: opt, + env: ['ZSTD_BIN=' + zstd.full_path(), 'DATAGEN_BIN=./datagen'], + depends: [datagen, zstd], + suite: suite, + workdir: meson.current_build_dir(), + timeout: 2800) # Timeout should work on HDD drive + endforeach endif test('test-fullbench-1', @@ -190,3 +207,11 @@ test('test-decodecorpus', args: ['-t', DECODECORPUS_TESTTIME], timeout: 60) test('test-poolTests', poolTests) # should be fast + +if meson.version().version_compare('>=0.57.0') + add_test_setup('fast', + is_default: true, + exclude_suites: ['slow']) + add_test_setup('slow', + exclude_suites: ['fast']) +endif diff --git a/build/single_file_libs/.gitignore b/build/single_file_libs/.gitignore index 8c1bc71edd6..b769e108924 100644 --- a/build/single_file_libs/.gitignore +++ b/build/single_file_libs/.gitignore @@ -4,6 +4,7 @@ zstddeclib.c zstdenclib.c zstd.c zstd.h +zstd_errors.h # test artifacts temp* diff --git a/build/single_file_libs/build_library_test.sh b/build/single_file_libs/build_library_test.sh index f4ba109ab38..b67cd99a0f2 100755 --- a/build/single_file_libs/build_library_test.sh +++ b/build/single_file_libs/build_library_test.sh @@ -70,6 +70,7 @@ echo "Single file library creation script: PASSED" # Copy the header to here (for the tests) cp "$ZSTD_SRC_ROOT/zstd.h" examples/zstd.h +cp "$ZSTD_SRC_ROOT/zstd_errors.h" examples/zstd_errors.h # Compile the generated output cc -Wall -Wextra -Werror -Wshadow -pthread -I. -Os -g0 -o $OUT_FILE zstd.c examples/roundtrip.c diff --git a/build/single_file_libs/zstd-in.c b/build/single_file_libs/zstd-in.c index 59c6b5f19b6..f381ecc4f56 100644 --- a/build/single_file_libs/zstd-in.c +++ b/build/single_file_libs/zstd-in.c @@ -8,7 +8,7 @@ * \endcode */ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -69,6 +69,7 @@ #include "compress/zstd_compress_literals.c" #include "compress/zstd_compress_sequences.c" #include "compress/zstd_compress_superblock.c" +#include "compress/zstd_preSplit.c" #include "compress/zstd_compress.c" #include "compress/zstd_double_fast.c" #include "compress/zstd_fast.c" diff --git a/build/single_file_libs/zstddeclib-in.c b/build/single_file_libs/zstddeclib-in.c index 5a58589c970..8d9c1f54bb7 100644 --- a/build/single_file_libs/zstddeclib-in.c +++ b/build/single_file_libs/zstddeclib-in.c @@ -8,7 +8,7 @@ * \endcode */ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/diagnose_corruption/Makefile b/contrib/diagnose_corruption/Makefile index a21a0021216..ecc9e63952c 100644 --- a/contrib/diagnose_corruption/Makefile +++ b/contrib/diagnose_corruption/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2019-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/diagnose_corruption/check_flipped_bits.c b/contrib/diagnose_corruption/check_flipped_bits.c index cc40ab84beb..09ddd467476 100644 --- a/contrib/diagnose_corruption/check_flipped_bits.c +++ b/contrib/diagnose_corruption/check_flipped_bits.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile index e06a32c0dac..912bf194977 100644 --- a/contrib/docker/Dockerfile +++ b/contrib/docker/Dockerfile @@ -1,13 +1,13 @@ # Dockerfile # First image to build the binary -FROM alpine as builder +FROM alpine@sha256:69665d02cb32192e52e07644d76bc6f25abeb5410edc1c7a81a10ba3f0efb90a as builder RUN apk --no-cache add make gcc libc-dev COPY . /src RUN mkdir /pkg && cd /src && make && make DESTDIR=/pkg install # Second minimal image to only keep the built binary -FROM alpine +FROM alpine@sha256:69665d02cb32192e52e07644d76bc6f25abeb5410edc1c7a81a10ba3f0efb90a # Copy the built files COPY --from=builder /pkg / diff --git a/contrib/externalSequenceProducer/.gitignore b/contrib/externalSequenceProducer/.gitignore new file mode 100644 index 00000000000..147710aee78 --- /dev/null +++ b/contrib/externalSequenceProducer/.gitignore @@ -0,0 +1,2 @@ +# build artifacts +externalSequenceProducer diff --git a/contrib/externalSequenceProducer/Makefile b/contrib/externalSequenceProducer/Makefile new file mode 100644 index 00000000000..0591ae01ba5 --- /dev/null +++ b/contrib/externalSequenceProducer/Makefile @@ -0,0 +1,40 @@ +# ################################################################ +# Copyright (c) Yann Collet, Meta Platforms, Inc. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +PROGDIR = ../../programs +LIBDIR = ../../lib + +LIBZSTD = $(LIBDIR)/libzstd.a + +CPPFLAGS+= -I$(LIBDIR) -I$(LIBDIR)/compress -I$(LIBDIR)/common + +CFLAGS ?= -O3 +CFLAGS += -std=gnu99 +DEBUGFLAGS= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum \ + -Wstrict-prototypes -Wundef -Wpointer-arith \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) + +default: externalSequenceProducer + +all: externalSequenceProducer + +externalSequenceProducer: sequence_producer.c main.c $(LIBZSTD) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +.PHONY: $(LIBZSTD) +$(LIBZSTD): + $(MAKE) -C $(LIBDIR) libzstd.a CFLAGS="$(CFLAGS)" + +clean: + $(RM) *.o + $(MAKE) -C $(LIBDIR) clean > /dev/null + $(RM) externalSequenceProducer diff --git a/contrib/externalSequenceProducer/README.md b/contrib/externalSequenceProducer/README.md new file mode 100644 index 00000000000..c16a170073b --- /dev/null +++ b/contrib/externalSequenceProducer/README.md @@ -0,0 +1,14 @@ +externalSequenceProducer +===================== + +`externalSequenceProducer` is a test tool for the Block-Level Sequence Producer API. +It demonstrates how to use the API to perform a simple round-trip test. + +A sample sequence producer is provided in sequence_producer.c, but the user can swap +this out with a different one if desired. The sample sequence producer implements +LZ parsing with a 1KB hashtable. Dictionary-based parsing is not currently supported. + +Command line : +``` +externalSequenceProducer filename +``` diff --git a/contrib/externalSequenceProducer/main.c b/contrib/externalSequenceProducer/main.c new file mode 100644 index 00000000000..c81e9bf4097 --- /dev/null +++ b/contrib/externalSequenceProducer/main.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include +#include +#include +#include + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "sequence_producer.h" // simpleSequenceProducer + +#define CHECK(res) \ +do { \ + if (ZSTD_isError(res)) { \ + printf("ERROR: %s\n", ZSTD_getErrorName(res)); \ + return 1; \ + } \ +} while (0) \ + +int main(int argc, char *argv[]) { + int retn = 0; + if (argc != 2) { + printf("Usage: externalSequenceProducer \n"); + return 1; + } + + ZSTD_CCtx* const zc = ZSTD_createCCtx(); + + int simpleSequenceProducerState = 0xdeadbeef; + + // Here is the crucial bit of code! + ZSTD_registerSequenceProducer( + zc, + &simpleSequenceProducerState, + simpleSequenceProducer + ); + + { + size_t const res = ZSTD_CCtx_setParameter(zc, ZSTD_c_enableSeqProducerFallback, 1); + CHECK(res); + } + + FILE *f = fopen(argv[1], "rb"); + assert(f); + { + int const ret = fseek(f, 0, SEEK_END); + assert(ret == 0); + } + size_t const srcSize = ftell(f); + { + int const ret = fseek(f, 0, SEEK_SET); + assert(ret == 0); + } + + char* const src = malloc(srcSize + 1); + assert(src); + { + size_t const ret = fread(src, srcSize, 1, f); + assert(ret == 1); + int const ret2 = fclose(f); + assert(ret2 == 0); + } + + size_t const dstSize = ZSTD_compressBound(srcSize); + char* const dst = malloc(dstSize); + assert(dst); + + size_t const cSize = ZSTD_compress2(zc, dst, dstSize, src, srcSize); + CHECK(cSize); + + char* const val = malloc(srcSize); + assert(val); + + { + size_t const res = ZSTD_decompress(val, srcSize, dst, cSize); + CHECK(res); + } + + if (memcmp(src, val, srcSize) == 0) { + printf("Compression and decompression were successful!\n"); + printf("Original size: %lu\n", srcSize); + printf("Compressed size: %lu\n", cSize); + } else { + printf("ERROR: input and validation buffers don't match!\n"); + for (size_t i = 0; i < srcSize; i++) { + if (src[i] != val[i]) { + printf("First bad index: %zu\n", i); + break; + } + } + retn = 1; + } + + ZSTD_freeCCtx(zc); + free(src); + free(dst); + free(val); + return retn; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.c b/contrib/externalSequenceProducer/sequence_producer.c new file mode 100644 index 00000000000..60a2f9572b0 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "sequence_producer.h" + +#define HSIZE 1024 +static U32 const HLOG = 10; +static U32 const MLS = 4; +static U32 const BADIDX = 0xffffffff; + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +) { + const BYTE* const istart = (const BYTE*)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + const BYTE* anchor = istart; + size_t seqCount = 0; + U32 hashTable[HSIZE]; + + (void)sequenceProducerState; + (void)dict; + (void)dictSize; + (void)outSeqsCapacity; + (void)compressionLevel; + + { int i; + for (i=0; i < HSIZE; i++) { + hashTable[i] = BADIDX; + } } + + while (ip + MLS < iend) { + size_t const hash = ZSTD_hashPtr(ip, HLOG, MLS); + U32 const matchIndex = hashTable[hash]; + hashTable[hash] = (U32)(ip - istart); + + if (matchIndex != BADIDX) { + const BYTE* const match = istart + matchIndex; + U32 const matchLen = (U32)ZSTD_count(ip, match, iend); + if (matchLen >= ZSTD_MINMATCH_MIN) { + U32 const litLen = (U32)(ip - anchor); + U32 const offset = (U32)(ip - match); + ZSTD_Sequence const seq = { + offset, litLen, matchLen, 0 + }; + + /* Note: it's crucial to stay within the window size! */ + if (offset <= windowSize) { + outSeqs[seqCount++] = seq; + ip += matchLen; + anchor = ip; + continue; + } + } + } + + ip++; + } + + { ZSTD_Sequence const finalSeq = { + 0, (U32)(iend - anchor), 0, 0 + }; + outSeqs[seqCount++] = finalSeq; + } + + return seqCount; +} diff --git a/contrib/externalSequenceProducer/sequence_producer.h b/contrib/externalSequenceProducer/sequence_producer.h new file mode 100644 index 00000000000..19f9982ac36 --- /dev/null +++ b/contrib/externalSequenceProducer/sequence_producer.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Yann Collet, Meta Platforms, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MATCHFINDER_H +#define MATCHFINDER_H + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" + +size_t simpleSequenceProducer( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +#endif diff --git a/contrib/freestanding_lib/freestanding.py b/contrib/freestanding_lib/freestanding.py index 4a02dea147b..df6983245d6 100755 --- a/contrib/freestanding_lib/freestanding.py +++ b/contrib/freestanding_lib/freestanding.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # ################################################################ -# Copyright (c) 2021-2021, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -431,7 +431,7 @@ def __init__( external_xxhash: bool, xxh64_state: Optional[str], xxh64_prefix: Optional[str], rewritten_includes: [(str, str)], defs: [(str, Optional[str])], replaces: [(str, str)], - undefs: [str], excludes: [str], seds: [str], + undefs: [str], excludes: [str], seds: [str], spdx: bool, ): self._zstd_deps = zstd_deps self._mem = mem @@ -446,6 +446,7 @@ def __init__( self._undefs = undefs self._excludes = excludes self._seds = seds + self._spdx = spdx def _dst_lib_file_paths(self): """ @@ -640,6 +641,27 @@ def _process_seds(self): for sed in self._seds: self._process_sed(sed) + def _process_spdx(self): + if not self._spdx: + return + self._log("Processing spdx") + SPDX_C = "// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause\n" + SPDX_H_S = "/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */\n" + for filepath in self._dst_lib_file_paths(): + file = FileLines(filepath) + if file.lines[0] == SPDX_C or file.lines[0] == SPDX_H_S: + continue + for line in file.lines: + if "SPDX-License-Identifier" in line: + raise RuntimeError(f"Unexpected SPDX license identifier: {file.filename} {repr(line)}") + if file.filename.endswith(".c"): + file.lines.insert(0, SPDX_C) + elif file.filename.endswith(".h") or file.filename.endswith(".S"): + file.lines.insert(0, SPDX_H_S) + else: + raise RuntimeError(f"Unexpected file extension: {file.filename}") + file.write() + def go(self): @@ -651,6 +673,7 @@ def go(self): self._rewrite_includes() self._replace_xxh64_prefix() self._process_seds() + self._process_spdx() def parse_optional_pair(defines: [str]) -> [(str, Optional[str])]: @@ -689,6 +712,7 @@ def main(name, args): parser.add_argument("--xxh64-prefix", default=None, help="Alternate XXH64 function prefix (excluding _) e.g. --xxh64-prefix=xxh64") parser.add_argument("--rewrite-include", default=[], dest="rewritten_includes", action="append", help="Rewrite an include REGEX=NEW (e.g. '=')") parser.add_argument("--sed", default=[], dest="seds", action="append", help="Apply a sed replacement. Format: `s/REGEX/FORMAT/[g]`. REGEX is a Python regex. FORMAT is a Python format string formatted by the regex dict.") + parser.add_argument("--spdx", action="store_true", help="Add SPDX License Identifiers") parser.add_argument("-D", "--define", default=[], dest="defs", action="append", help="Pre-define this macro (can be passed multiple times)") parser.add_argument("-U", "--undefine", default=[], dest="undefs", action="append", help="Pre-undefine this macro (can be passed multiple times)") parser.add_argument("-R", "--replace", default=[], dest="replaces", action="append", help="Pre-define this macro and replace the first ifndef block with its definition") @@ -743,6 +767,7 @@ def main(name, args): args.undefs, args.excludes, args.seds, + args.spdx, ).go() if __name__ == "__main__": diff --git a/contrib/gen_html/Makefile b/contrib/gen_html/Makefile index 425f266c4e4..f93669e023f 100644 --- a/contrib/gen_html/Makefile +++ b/contrib/gen_html/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -40,7 +40,7 @@ gen_html: gen_html.cpp $(ZSTDMANUAL): gen_html $(ZSTDAPI) echo "Update zstd manual in /doc" - ./gen_html $(LIBVER) $(ZSTDAPI) $(ZSTDMANUAL) + ./gen_html$(EXT) $(LIBVER) $(ZSTDAPI) $(ZSTDMANUAL) .PHONY: manual manual: gen_html $(ZSTDMANUAL) diff --git a/contrib/gen_html/gen_html.cpp b/contrib/gen_html/gen_html.cpp index 90d5b21a3aa..adf0f41b905 100644 --- a/contrib/gen_html/gen_html.cpp +++ b/contrib/gen_html/gen_html.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -211,6 +211,7 @@ int main(int argc, char *argv[]) { ostream << "\n\n\n" << version << "\n\n" << endl; ostream << "

" << version << "

\n"; + ostream << "Note: the content of this file has been automatically generated by parsing \"zstd.h\" \n"; ostream << "
\n

Contents

\n
    \n"; for (size_t i=0; i=l) { DISPLAY(__VA_ARGS__); } } -static int g_displayLevel = DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */ +static int g_displayLevel = ZSTD_DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */ /*--- buffer_t ---*/ @@ -739,6 +741,8 @@ static int benchMem(slice_collection_t dstBlocks, slice_collection_t srcBlocks, /* BMK_benchTimedFn may not run exactly nbRounds iterations */ double speedAggregated = aggregateData(speedPerRound, roundNb + 1, metricAggregatePref); + free(speedPerRound); + if (metricAggregatePref == fastest) DISPLAY("Fastest Speed : %.1f MB/s \n", speedAggregated); else @@ -856,7 +860,7 @@ int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary, CONTROL(cTotalSizeNoDict != 0); DISPLAYLEVEL(3, "compressing at level %u without dictionary : Ratio=%.2f (%u bytes) \n", clevel, - (double)totalSrcSlicesSize / cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); + (double)totalSrcSlicesSize / (double)cTotalSizeNoDict, (unsigned)cTotalSizeNoDict); size_t* const cSizes = malloc(nbBlocks * sizeof(size_t)); CONTROL(cSizes != NULL); @@ -865,7 +869,7 @@ int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary, CONTROL(cTotalSize != 0); DISPLAYLEVEL(3, "compressed using a %u bytes dictionary : Ratio=%.2f (%u bytes) \n", (unsigned)dictBuffer.size, - (double)totalSrcSlicesSize / cTotalSize, (unsigned)cTotalSize); + (double)totalSrcSlicesSize / (double)cTotalSize, (unsigned)cTotalSize); /* now dstSlices contain the real compressed size of each block, instead of the maximum capacity */ shrinkSizes(dstSlices, cSizes); @@ -985,7 +989,7 @@ int usage(const char* exeName) DISPLAY ("-# : use compression level # (default: %u) \n", CLEVEL_DEFAULT); DISPLAY ("-D # : use # as a dictionary (default: create one) \n"); DISPLAY ("-i# : nb benchmark rounds (default: %u) \n", BENCH_TIME_DEFAULT_S); - DISPLAY ("-p# : print speed for all rounds 0=fastest 1=median (default: 0)"); + DISPLAY ("-p# : print speed for all rounds 0=fastest 1=median (default: 0) \n"); DISPLAY ("--nbBlocks=#: use # blocks for bench (default: one per file) \n"); DISPLAY ("--nbDicts=# : create # dictionaries for bench (default: one per block) \n"); DISPLAY ("-h : help (this text) \n"); @@ -1025,7 +1029,7 @@ int main (int argc, const char** argv) unsigned nbBlocks = 0; /* determine nbBlocks automatically, from source and blockSize */ ZSTD_dictContentType_e dictContentType = ZSTD_dct_auto; ZSTD_dictAttachPref_e dictAttachPref = ZSTD_dictDefaultAttach; - ZSTD_paramSwitch_e prefetchCDictTables = ZSTD_ps_auto; + ZSTD_ParamSwitch_e prefetchCDictTables = ZSTD_ps_auto; metricAggregatePref_e metricAggregatePref = fastest; for (int argNb = 1; argNb < argc ; argNb++) { diff --git a/contrib/linux-kernel/Makefile b/contrib/linux-kernel/Makefile index 47a4317404b..63dd15d958f 100644 --- a/contrib/linux-kernel/Makefile +++ b/contrib/linux-kernel/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -26,14 +26,13 @@ libzstd: --rewrite-include '"(\.\./)?zstd_errors.h"=' \ --sed 's,/\*\*\*,/* *,g' \ --sed 's,/\*\*,/*,g' \ + --spdx \ -DZSTD_NO_INTRINSICS \ -DZSTD_NO_UNUSED_FUNCTIONS \ -DZSTD_LEGACY_SUPPORT=0 \ -DZSTD_STATIC_LINKING_ONLY \ -DFSE_STATIC_LINKING_ONLY \ - -DHUF_STATIC_LINKING_ONLY \ -DXXH_STATIC_LINKING_ONLY \ - -DMEM_FORCE_MEMORY_ACCESS=0 \ -D__GNUC__ \ -D__linux__=1 \ -DSTATIC_BMI2=0 \ @@ -49,16 +48,19 @@ libzstd: -UZSTD_MULTITHREAD \ -U_MSC_VER \ -U_WIN32 \ - -RZSTDLIB_VISIBILITY= \ - -RZSTDERRORLIB_VISIBILITY= \ + -RZSTDLIB_VISIBLE= \ + -RZSTDERRORLIB_VISIBLE= \ -RZSTD_FALLTHROUGH=fallthrough \ -DZSTD_HAVE_WEAK_SYMBOLS=0 \ -DZSTD_TRACE=0 \ -DZSTD_NO_TRACE \ + -DZSTD_DISABLE_ASM \ -DZSTD_LINUX_KERNEL + rm linux/lib/zstd/decompress/huf_decompress_amd64.S mv linux/lib/zstd/zstd.h linux/include/linux/zstd_lib.h mv linux/lib/zstd/zstd_errors.h linux/include/linux/ cp linux_zstd.h linux/include/linux/zstd.h + cp zstd_common_module.c linux/lib/zstd cp zstd_compress_module.c linux/lib/zstd cp zstd_decompress_module.c linux/lib/zstd cp decompress_sources.h linux/lib/zstd @@ -102,4 +104,5 @@ test: libzstd .PHONY: clean clean: - $(RM) -rf linux test/test test/static_test + $(RM) -rf linux + $(MAKE) -C test clean diff --git a/contrib/linux-kernel/decompress_sources.h b/contrib/linux-kernel/decompress_sources.h index a06ca187aab..8a47eb2a451 100644 --- a/contrib/linux-kernel/decompress_sources.h +++ b/contrib/linux-kernel/decompress_sources.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/linux.mk b/contrib/linux-kernel/linux.mk index f6f3a8983d8..be218b5e0ed 100644 --- a/contrib/linux-kernel/linux.mk +++ b/contrib/linux-kernel/linux.mk @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -10,16 +10,10 @@ # ################################################################ obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o - -ccflags-y += -Wno-error=deprecated-declarations +obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o zstd_compress-y := \ zstd_compress_module.o \ - common/debug.o \ - common/entropy_common.o \ - common/error_private.o \ - common/fse_decompress.o \ - common/zstd_common.o \ compress/fse_compress.o \ compress/hist.o \ compress/huf_compress.o \ @@ -32,16 +26,19 @@ zstd_compress-y := \ compress/zstd_lazy.o \ compress/zstd_ldm.o \ compress/zstd_opt.o \ + compress/zstd_preSplit.o \ zstd_decompress-y := \ zstd_decompress_module.o \ + decompress/huf_decompress.o \ + decompress/zstd_ddict.o \ + decompress/zstd_decompress.o \ + decompress/zstd_decompress_block.o \ + +zstd_common-y := \ + zstd_common_module.o \ common/debug.o \ common/entropy_common.o \ common/error_private.o \ common/fse_decompress.o \ common/zstd_common.o \ - decompress/huf_decompress.o \ - decompress/huf_decompress_amd64.o \ - decompress/zstd_ddict.o \ - decompress/zstd_decompress.o \ - decompress/zstd_decompress_block.o \ diff --git a/contrib/linux-kernel/linux_zstd.h b/contrib/linux-kernel/linux_zstd.h index 113408eef6e..2f2a3c8b8a3 100644 --- a/contrib/linux-kernel/linux_zstd.h +++ b/contrib/linux-kernel/linux_zstd.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -77,6 +77,30 @@ int zstd_min_clevel(void); */ int zstd_max_clevel(void); +/** + * zstd_default_clevel() - default compression level + * + * Return: Default compression level. + */ +int zstd_default_clevel(void); + +/** + * struct zstd_custom_mem - custom memory allocation + */ +typedef ZSTD_customMem zstd_custom_mem; + +/** + * struct zstd_dict_load_method - Dictionary load method. + * See zstd_lib.h. + */ +typedef ZSTD_dictLoadMethod_e zstd_dict_load_method; + +/** + * struct zstd_dict_content_type - Dictionary context type. + * See zstd_lib.h. + */ +typedef ZSTD_dictContentType_e zstd_dict_content_type; + /* ====== Parameter Selection ====== */ /** @@ -136,9 +160,32 @@ typedef ZSTD_parameters zstd_parameters; zstd_parameters zstd_get_params(int level, unsigned long long estimated_src_size); -/* ====== Single-pass Compression ====== */ +/** + * zstd_get_cparams() - returns zstd_compression_parameters for selected level + * @level: The compression level + * @estimated_src_size: The estimated source size to compress or 0 + * if unknown. + * @dict_size: Dictionary size. + * + * Return: The selected zstd_compression_parameters. + */ +zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size); typedef ZSTD_CCtx zstd_cctx; +typedef ZSTD_cParameter zstd_cparameter; + +/** + * zstd_cctx_set_param() - sets a compression parameter + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @param: The parameter to set. + * @value: The value to set the parameter to. + * + * Return: Zero or an error, which can be checked using zstd_is_error(). + */ +size_t zstd_cctx_set_param(zstd_cctx *cctx, zstd_cparameter param, int value); + +/* ====== Single-pass Compression ====== */ /** * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx @@ -153,6 +200,20 @@ typedef ZSTD_CCtx zstd_cctx; */ size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters); +/** + * zstd_cctx_workspace_bound_with_ext_seq_prod() - max memory needed to + * initialize a zstd_cctx when using the block-level external sequence + * producer API. + * @parameters: The compression parameters to be used. + * + * If multiple compression parameters might be used, the caller must call + * this function for each set of parameters and use the maximum size. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cctx(). + */ +size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *parameters); + /** * zstd_init_cctx() - initialize a zstd compression context * @workspace: The workspace to emplace the context into. It must outlive @@ -180,6 +241,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size); size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, const void *src, size_t src_size, const zstd_parameters *parameters); +/** + * zstd_create_cctx_advanced() - Create compression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to compression context otherwise. + */ +zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_cctx() - Free compression context + * @cdict: Pointer to compression context. + * + * Return: Always 0. + */ +size_t zstd_free_cctx(zstd_cctx* cctx); + +/** + * struct zstd_cdict - Compression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_CDict zstd_cdict; + +/** + * zstd_create_cdict_byreference() - Create compression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_cdict is destroyed. + * + * Return: NULL on error, pointer to compression dictionary + * otherwise. + */ +zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size, + zstd_compression_parameters cparams, + zstd_custom_mem custom_mem); + +/** + * zstd_free_cdict() - Free compression dictionary + * @cdict: Pointer to compression dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_cdict(zstd_cdict* cdict); + +/** + * zstd_compress_using_cdict() - compress src into dst using a dictionary + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @dst: The buffer to compress src into. + * @dst_capacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @src_size: The size of the data to compress. + * @cdict: The dictionary to be used. + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst, + size_t dst_capacity, const void *src, size_t src_size, + const zstd_cdict *cdict); + /* ====== Single-pass Decompression ====== */ typedef ZSTD_DCtx zstd_dctx; @@ -220,6 +346,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size); size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity, const void *src, size_t src_size); +/** + * struct zstd_ddict - Decompression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_DDict zstd_ddict; + +/** + * zstd_create_ddict_byreference() - Create decompression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_ddict is destroyed. + * + * Return: NULL on error, pointer to decompression dictionary + * otherwise. + */ +zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size, + zstd_custom_mem custom_mem); +/** + * zstd_free_ddict() - Free decompression dictionary + * @dict: Pointer to the dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_ddict(zstd_ddict *ddict); + +/** + * zstd_create_dctx_advanced() - Create decompression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to decompression context otherwise. + */ +zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_dctx() -- Free decompression context + * @dctx: Pointer to decompression context. + * Return: Always 0. + */ +size_t zstd_free_dctx(zstd_dctx *dctx); + +/** + * zstd_decompress_using_ddict() - decompress src into dst using a dictionary + * @dctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dst_capacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @src_size: The exact size of the data to decompress. + * @ddict: The dictionary to be used. + * + * Return: The decompressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_decompress_using_ddict(zstd_dctx *dctx, + void *dst, size_t dst_capacity, const void *src, size_t src_size, + const zstd_ddict *ddict); + + /* ====== Streaming Buffers ====== */ /** @@ -257,6 +448,16 @@ typedef ZSTD_CStream zstd_cstream; */ size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams); +/** + * zstd_cstream_workspace_bound_with_ext_seq_prod() - memory needed to initialize + * a zstd_cstream when using the block-level external sequence producer API. + * @cparams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * zstd_init_cstream(). + */ +size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *cparams); + /** * zstd_init_cstream() - initialize a zstd streaming compression context * @parameters The zstd parameters to use for compression. @@ -416,6 +617,18 @@ size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output, */ size_t zstd_find_frame_compressed_size(const void *src, size_t src_size); +/** + * zstd_register_sequence_producer() - exposes the zstd library function + * ZSTD_registerSequenceProducer(). This is used for the block-level external + * sequence producer API. See upstream zstd.h for detailed documentation. + */ +typedef ZSTD_sequenceProducer_F zstd_sequence_producer_f; +void zstd_register_sequence_producer( + zstd_cctx *cctx, + void* sequence_producer_state, + zstd_sequence_producer_f sequence_producer +); + /** * struct zstd_frame_params - zstd frame parameters stored in the frame header * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not @@ -429,7 +642,7 @@ size_t zstd_find_frame_compressed_size(const void *src, size_t src_size); * * See zstd_lib.h. */ -typedef ZSTD_frameHeader zstd_frame_header; +typedef ZSTD_FrameHeader zstd_frame_header; /** * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame @@ -444,4 +657,35 @@ typedef ZSTD_frameHeader zstd_frame_header; size_t zstd_get_frame_header(zstd_frame_header *params, const void *src, size_t src_size); +/** + * struct zstd_sequence - a sequence of literals or a match + * + * @offset: The offset of the match + * @litLength: The literal length of the sequence + * @matchLength: The match length of the sequence + * @rep: Represents which repeat offset is used + */ +typedef ZSTD_Sequence zstd_sequence; + +/** + * zstd_compress_sequences_and_literals() - compress an array of zstd_sequence and literals + * + * @cctx: The zstd compression context. + * @dst: The buffer to compress the data into. + * @dst_capacity: The size of the destination buffer. + * @in_seqs: The array of zstd_sequence to compress. + * @in_seqs_size: The number of sequences in in_seqs. + * @literals: The literals associated to the sequences to be compressed. + * @lit_size: The size of the literals in the literals buffer. + * @lit_capacity: The size of the literals buffer. + * @decompressed_size: The size of the input data + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, + const zstd_sequence *in_seqs, size_t in_seqs_size, + const void* literals, size_t lit_size, size_t lit_capacity, + size_t decompressed_size); + #endif /* LINUX_ZSTD_H */ diff --git a/contrib/linux-kernel/mem.h b/contrib/linux-kernel/mem.h index 1d9cc03924c..d9bd752fe17 100644 --- a/contrib/linux-kernel/mem.h +++ b/contrib/linux-kernel/mem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,7 +15,7 @@ /*-**************************************** * Dependencies ******************************************/ -#include /* get_unaligned, put_unaligned* */ +#include /* get_unaligned, put_unaligned* */ #include /* inline */ #include /* swab32, swab64 */ #include /* size_t, ptrdiff_t */ @@ -24,6 +24,7 @@ /*-**************************************** * Compiler specifics ******************************************/ +#undef MEM_STATIC /* may be already defined from common/compiler.h */ #define MEM_STATIC static inline /*-************************************************************** diff --git a/contrib/linux-kernel/test/Makefile b/contrib/linux-kernel/test/Makefile index be82b3fbac8..67b55e66504 100644 --- a/contrib/linux-kernel/test/Makefile +++ b/contrib/linux-kernel/test/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -45,4 +45,5 @@ clean: $(RM) -f $(LINUX_ZSTDLIB)/*.o $(RM) -f $(LINUX_ZSTDLIB)/**/*.o $(RM) -f *.o *.a + $(RM) -f static_test $(RM) -f test diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h index de43edb695c..988ce4a20ff 100644 --- a/contrib/linux-kernel/test/include/linux/compiler.h +++ b/contrib/linux-kernel/test/include/linux/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/errno.h b/contrib/linux-kernel/test/include/linux/errno.h index b2475225723..b4bdcba0ea6 100644 --- a/contrib/linux-kernel/test/include/linux/errno.h +++ b/contrib/linux-kernel/test/include/linux/errno.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/kernel.h b/contrib/linux-kernel/test/include/linux/kernel.h index 1f702abac55..a4d791cd1d5 100644 --- a/contrib/linux-kernel/test/include/linux/kernel.h +++ b/contrib/linux-kernel/test/include/linux/kernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/limits.h b/contrib/linux-kernel/test/include/linux/limits.h index db9c0990488..574aa7b343f 100644 --- a/contrib/linux-kernel/test/include/linux/limits.h +++ b/contrib/linux-kernel/test/include/linux/limits.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/math64.h b/contrib/linux-kernel/test/include/linux/math64.h index 8eefa2d5c9d..7f6713e73bc 100644 --- a/contrib/linux-kernel/test/include/linux/math64.h +++ b/contrib/linux-kernel/test/include/linux/math64.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/module.h b/contrib/linux-kernel/test/include/linux/module.h index be6d20daea2..06ef56f9ebe 100644 --- a/contrib/linux-kernel/test/include/linux/module.h +++ b/contrib/linux-kernel/test/include/linux/module.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,6 +12,8 @@ #define EXPORT_SYMBOL(symbol) \ void* __##symbol = symbol +#define EXPORT_SYMBOL_GPL(symbol) \ + void* __##symbol = symbol #define MODULE_LICENSE(license) #define MODULE_DESCRIPTION(description) diff --git a/contrib/linux-kernel/test/include/linux/printk.h b/contrib/linux-kernel/test/include/linux/printk.h index eab08e0c4cc..92a25278e71 100644 --- a/contrib/linux-kernel/test/include/linux/printk.h +++ b/contrib/linux-kernel/test/include/linux/printk.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/stddef.h b/contrib/linux-kernel/test/include/linux/stddef.h index 8538eb3e40c..15c7408fcdf 100644 --- a/contrib/linux-kernel/test/include/linux/stddef.h +++ b/contrib/linux-kernel/test/include/linux/stddef.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/swab.h b/contrib/linux-kernel/test/include/linux/swab.h index 783046b42d1..2b48b434c97 100644 --- a/contrib/linux-kernel/test/include/linux/swab.h +++ b/contrib/linux-kernel/test/include/linux/swab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/linux/types.h b/contrib/linux-kernel/test/include/linux/types.h index 459a4570060..b413db6f983 100644 --- a/contrib/linux-kernel/test/include/linux/types.h +++ b/contrib/linux-kernel/test/include/linux/types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/include/asm/unaligned.h b/contrib/linux-kernel/test/include/linux/unaligned.h similarity index 100% rename from contrib/linux-kernel/test/include/asm/unaligned.h rename to contrib/linux-kernel/test/include/linux/unaligned.h diff --git a/contrib/linux-kernel/test/include/linux/xxhash.h b/contrib/linux-kernel/test/include/linux/xxhash.h index 7e92a706e7e..f993eb9eed1 100644 --- a/contrib/linux-kernel/test/include/linux/xxhash.h +++ b/contrib/linux-kernel/test/include/linux/xxhash.h @@ -2,7 +2,7 @@ * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -260,7 +260,7 @@ XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -296,7 +296,7 @@ XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state * - xxHash source repository: https://github.com/Cyan4973/xxHash */ -#include +#include #include #include #include diff --git a/contrib/linux-kernel/test/static_test.c b/contrib/linux-kernel/test/static_test.c index d2b8b5a322c..ba4a420d417 100644 --- a/contrib/linux-kernel/test/static_test.c +++ b/contrib/linux-kernel/test/static_test.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/linux-kernel/test/test.c b/contrib/linux-kernel/test/test.c index 6cd1730bb3a..0f4ba3f45ef 100644 --- a/contrib/linux-kernel/test/test.c +++ b/contrib/linux-kernel/test/test.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -186,11 +186,14 @@ static void __attribute__((noinline)) use(void *x) { asm volatile("" : "+r"(x)); } +static void __attribute__((noinline)) fill_stack(void) { + memset(g_stack, 0x33, 8192); +} + static void __attribute__((noinline)) set_stack(void) { char stack[8192]; g_stack = stack; - memset(g_stack, 0x33, 8192); use(g_stack); } @@ -208,6 +211,7 @@ static void __attribute__((noinline)) check_stack(void) { static void test_stack_usage(test_data_t const *data) { set_stack(); + fill_stack(); test_f2fs(); test_btrfs(data); test_decompress_unzstd(data); diff --git a/contrib/linux-kernel/zstd_common_module.c b/contrib/linux-kernel/zstd_common_module.c new file mode 100644 index 00000000000..466828e3575 --- /dev/null +++ b/contrib/linux-kernel/zstd_common_module.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include + +#include "common/huf.h" +#include "common/fse.h" +#include "common/zstd_internal.h" + +// Export symbols shared by compress and decompress into a common module + +#undef ZSTD_isError /* defined within zstd_internal.h */ +EXPORT_SYMBOL_GPL(FSE_readNCount); +EXPORT_SYMBOL_GPL(HUF_readStats); +EXPORT_SYMBOL_GPL(HUF_readStats_wksp); +EXPORT_SYMBOL_GPL(ZSTD_isError); +EXPORT_SYMBOL_GPL(ZSTD_getErrorName); +EXPORT_SYMBOL_GPL(ZSTD_getErrorCode); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Zstd Common"); diff --git a/contrib/linux-kernel/zstd_compress_module.c b/contrib/linux-kernel/zstd_compress_module.c index 04e1b5c01d9..7651b53551c 100644 --- a/contrib/linux-kernel/zstd_compress_module.c +++ b/contrib/linux-kernel/zstd_compress_module.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,6 +16,7 @@ #include "common/zstd_deps.h" #include "common/zstd_internal.h" +#include "compress/zstd_compress_internal.h" #define ZSTD_FORWARD_IF_ERR(ret) \ do { \ @@ -66,6 +67,12 @@ int zstd_max_clevel(void) } EXPORT_SYMBOL(zstd_max_clevel); +int zstd_default_clevel(void) +{ + return ZSTD_defaultCLevel(); +} +EXPORT_SYMBOL(zstd_default_clevel); + size_t zstd_compress_bound(size_t src_size) { return ZSTD_compressBound(src_size); @@ -79,12 +86,71 @@ zstd_parameters zstd_get_params(int level, } EXPORT_SYMBOL(zstd_get_params); +zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size) +{ + return ZSTD_getCParams(level, estimated_src_size, dict_size); +} +EXPORT_SYMBOL(zstd_get_cparams); + +size_t zstd_cctx_set_param(zstd_cctx *cctx, ZSTD_cParameter param, int value) +{ + return ZSTD_CCtx_setParameter(cctx, param, value); +} +EXPORT_SYMBOL(zstd_cctx_set_param); + size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams) { return ZSTD_estimateCCtxSize_usingCParams(*cparams); } EXPORT_SYMBOL(zstd_cctx_workspace_bound); +// Used by zstd_cctx_workspace_bound_with_ext_seq_prod() +static size_t dummy_external_sequence_producer( + void *sequenceProducerState, + ZSTD_Sequence *outSeqs, size_t outSeqsCapacity, + const void *src, size_t srcSize, + const void *dict, size_t dictSize, + int compressionLevel, + size_t windowSize) +{ + (void)sequenceProducerState; + (void)outSeqs; (void)outSeqsCapacity; + (void)src; (void)srcSize; + (void)dict; (void)dictSize; + (void)compressionLevel; + (void)windowSize; + return ZSTD_SEQUENCE_PRODUCER_ERROR; +} + +static void init_cctx_params_from_compress_params( + ZSTD_CCtx_params *cctx_params, + const zstd_compression_parameters *compress_params) +{ + ZSTD_parameters zstd_params; + memset(&zstd_params, 0, sizeof(zstd_params)); + zstd_params.cParams = *compress_params; + ZSTD_CCtxParams_init_advanced(cctx_params, zstd_params); +} + +size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) +{ + ZSTD_CCtx_params cctx_params; + init_cctx_params_from_compress_params(&cctx_params, compress_params); + ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); + return ZSTD_estimateCCtxSize_usingCCtxParams(&cctx_params); +} +EXPORT_SYMBOL(zstd_cctx_workspace_bound_with_ext_seq_prod); + +size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) +{ + ZSTD_CCtx_params cctx_params; + init_cctx_params_from_compress_params(&cctx_params, compress_params); + ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); + return ZSTD_estimateCStreamSize_usingCCtxParams(&cctx_params); +} +EXPORT_SYMBOL(zstd_cstream_workspace_bound_with_ext_seq_prod); + zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size) { if (workspace == NULL) @@ -93,6 +159,33 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size) } EXPORT_SYMBOL(zstd_init_cctx); +zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem) +{ + return ZSTD_createCCtx_advanced(custom_mem); +} +EXPORT_SYMBOL(zstd_create_cctx_advanced); + +size_t zstd_free_cctx(zstd_cctx *cctx) +{ + return ZSTD_freeCCtx(cctx); +} +EXPORT_SYMBOL(zstd_free_cctx); + +zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size, + zstd_compression_parameters cparams, + zstd_custom_mem custom_mem) +{ + return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef, + ZSTD_dct_auto, cparams, custom_mem); +} +EXPORT_SYMBOL(zstd_create_cdict_byreference); + +size_t zstd_free_cdict(zstd_cdict *cdict) +{ + return ZSTD_freeCDict(cdict); +} +EXPORT_SYMBOL(zstd_free_cdict); + size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, const void *src, size_t src_size, const zstd_parameters *parameters) { @@ -101,6 +194,15 @@ size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, } EXPORT_SYMBOL(zstd_compress_cctx); +size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst, + size_t dst_capacity, const void *src, size_t src_size, + const ZSTD_CDict *cdict) +{ + return ZSTD_compress_usingCDict(cctx, dst, dst_capacity, + src, src_size, cdict); +} +EXPORT_SYMBOL(zstd_compress_using_cdict); + size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams) { return ZSTD_estimateCStreamSize_usingCParams(*cparams); @@ -160,5 +262,25 @@ size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output) } EXPORT_SYMBOL(zstd_end_stream); +void zstd_register_sequence_producer( + zstd_cctx *cctx, + void* sequence_producer_state, + zstd_sequence_producer_f sequence_producer +) { + ZSTD_registerSequenceProducer(cctx, sequence_producer_state, sequence_producer); +} +EXPORT_SYMBOL(zstd_register_sequence_producer); + +size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, + const zstd_sequence *in_seqs, size_t in_seqs_size, + const void* literals, size_t lit_size, size_t lit_capacity, + size_t decompressed_size) +{ + return ZSTD_compressSequencesAndLiterals(cctx, dst, dst_capacity, in_seqs, + in_seqs_size, literals, lit_size, + lit_capacity, decompressed_size); +} +EXPORT_SYMBOL(zstd_compress_sequences_and_literals); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/contrib/linux-kernel/zstd_decompress_module.c b/contrib/linux-kernel/zstd_decompress_module.c index f4ed952ed48..0ae819f0c92 100644 --- a/contrib/linux-kernel/zstd_decompress_module.c +++ b/contrib/linux-kernel/zstd_decompress_module.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -44,6 +44,33 @@ size_t zstd_dctx_workspace_bound(void) } EXPORT_SYMBOL(zstd_dctx_workspace_bound); +zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem) +{ + return ZSTD_createDCtx_advanced(custom_mem); +} +EXPORT_SYMBOL(zstd_create_dctx_advanced); + +size_t zstd_free_dctx(zstd_dctx *dctx) +{ + return ZSTD_freeDCtx(dctx); +} +EXPORT_SYMBOL(zstd_free_dctx); + +zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size, + zstd_custom_mem custom_mem) +{ + return ZSTD_createDDict_advanced(dict, dict_size, ZSTD_dlm_byRef, + ZSTD_dct_auto, custom_mem); + +} +EXPORT_SYMBOL(zstd_create_ddict_byreference); + +size_t zstd_free_ddict(zstd_ddict *ddict) +{ + return ZSTD_freeDDict(ddict); +} +EXPORT_SYMBOL(zstd_free_ddict); + zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size) { if (workspace == NULL) @@ -59,6 +86,15 @@ size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity, } EXPORT_SYMBOL(zstd_decompress_dctx); +size_t zstd_decompress_using_ddict(zstd_dctx *dctx, + void *dst, size_t dst_capacity, const void* src, size_t src_size, + const zstd_ddict* ddict) +{ + return ZSTD_decompress_usingDDict(dctx, dst, dst_capacity, src, + src_size, ddict); +} +EXPORT_SYMBOL(zstd_decompress_using_ddict); + size_t zstd_dstream_workspace_bound(size_t max_window_size) { return ZSTD_estimateDStreamSize(max_window_size); @@ -77,7 +113,7 @@ EXPORT_SYMBOL(zstd_init_dstream); size_t zstd_reset_dstream(zstd_dstream *dstream) { - return ZSTD_resetDStream(dstream); + return ZSTD_DCtx_reset(dstream, ZSTD_reset_session_only); } EXPORT_SYMBOL(zstd_reset_dstream); diff --git a/contrib/linux-kernel/zstd_deps.h b/contrib/linux-kernel/zstd_deps.h index 7a5bf44839c..f931f7d0e29 100644 --- a/contrib/linux-kernel/zstd_deps.h +++ b/contrib/linux-kernel/zstd_deps.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -84,7 +84,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) { #include -#define assert(x) WARN_ON((x)) +#define assert(x) WARN_ON(!(x)) #endif /* ZSTD_DEPS_ASSERT */ #endif /* ZSTD_DEPS_NEED_ASSERT */ @@ -115,11 +115,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) { #ifndef ZSTD_DEPS_STDINT #define ZSTD_DEPS_STDINT -/* - * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which - * is an unsigned long. - */ -typedef long intptr_t; +/* intptr_t already provided by ZSTD_DEPS_COMMON */ #endif /* ZSTD_DEPS_STDINT */ #endif /* ZSTD_DEPS_NEED_STDINT */ diff --git a/contrib/match_finders/zstd_edist.c b/contrib/match_finders/zstd_edist.c index d63a7cf8d4f..d685cdd9e2f 100644 --- a/contrib/match_finders/zstd_edist.c +++ b/contrib/match_finders/zstd_edist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/match_finders/zstd_edist.h b/contrib/match_finders/zstd_edist.h index a947649b2c8..c739e2abc51 100644 --- a/contrib/match_finders/zstd_edist.h +++ b/contrib/match_finders/zstd_edist.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/premake/zstd.lua b/contrib/premake/zstd.lua index df1ace3ee8e..f3fd5b2935a 100644 --- a/contrib/premake/zstd.lua +++ b/contrib/premake/zstd.lua @@ -2,6 +2,7 @@ -- Basic usage: project_zstd(ZSTD_DIR) function project_zstd(dir, compression, decompression, deprecated, dictbuilder, legacy) + if string.sub(dir, -1, 1) ~= '/' then dir = dir .. '/' end if compression == nil then compression = true end if decompression == nil then decompression = true end if deprecated == nil then deprecated = false end diff --git a/contrib/pzstd/ErrorHolder.h b/contrib/pzstd/ErrorHolder.h index 829651c5961..2c2797edea0 100644 --- a/contrib/pzstd/ErrorHolder.h +++ b/contrib/pzstd/ErrorHolder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Logging.h b/contrib/pzstd/Logging.h index beb160b647d..aa73a976b80 100644 --- a/contrib/pzstd/Logging.h +++ b/contrib/pzstd/Logging.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,6 +10,7 @@ #include #include +#include namespace pzstd { diff --git a/contrib/pzstd/Makefile b/contrib/pzstd/Makefile index 3d930cca93f..e4b3e8a21f2 100644 --- a/contrib/pzstd/Makefile +++ b/contrib/pzstd/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -10,7 +10,7 @@ # Standard variables for installation DESTDIR ?= PREFIX ?= /usr/local -BINDIR := $(DESTDIR)$(PREFIX)/bin +BINDIR := $(PREFIX)/bin ZSTDDIR = ../../lib PROGDIR = ../../programs @@ -37,10 +37,13 @@ CFLAGS += -Wno-deprecated-declarations PZSTD_INC = -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(PROGDIR) -I. GTEST_INC = -isystem googletest/googletest/include +# Set the minimum required by gtest +PZSTD_CXX_STD := -std=c++14 + PZSTD_CPPFLAGS = $(PZSTD_INC) PZSTD_CCXXFLAGS = PZSTD_CFLAGS = $(PZSTD_CCXXFLAGS) -PZSTD_CXXFLAGS = $(PZSTD_CCXXFLAGS) -std=c++11 +PZSTD_CXXFLAGS = $(PZSTD_CCXXFLAGS) $(PZSTD_CXX_STD) PZSTD_LDFLAGS = EXTRA_FLAGS = ALL_CFLAGS = $(EXTRA_FLAGS) $(CPPFLAGS) $(PZSTD_CPPFLAGS) $(CFLAGS) $(PZSTD_CFLAGS) @@ -106,12 +109,12 @@ check: .PHONY: install install: PZSTD_CPPFLAGS += -DNDEBUG install: pzstd$(EXT) - install -d -m 755 $(BINDIR)/ - install -m 755 pzstd$(EXT) $(BINDIR)/pzstd$(EXT) + install -d -m 755 $(DESTDIR)$(BINDIR)/ + install -m 755 pzstd$(EXT) $(DESTDIR)$(BINDIR)/pzstd$(EXT) .PHONY: uninstall uninstall: - $(RM) $(BINDIR)/pzstd$(EXT) + $(RM) $(DESTDIR)$(BINDIR)/pzstd$(EXT) # Targets for many different builds .PHONY: all diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp index 90f9d571fcf..90841b9bb04 100644 --- a/contrib/pzstd/Options.cpp +++ b/contrib/pzstd/Options.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -322,7 +322,7 @@ Options::Status Options::parse(int argc, const char **argv) { g_utilDisplayLevel = verbosity; // Remove local input files that are symbolic links if (!followLinks) { - std::remove_if(localInputFiles.begin(), localInputFiles.end(), + localInputFiles.erase(std::remove_if(localInputFiles.begin(), localInputFiles.end(), [&](const char *path) { bool isLink = UTIL_isLink(path); if (isLink && verbosity >= 2) { @@ -332,7 +332,7 @@ Options::Status Options::parse(int argc, const char **argv) { path); } return isLink; - }); + }), localInputFiles.end()); } // Translate input files/directories into files to (de)compress diff --git a/contrib/pzstd/Options.h b/contrib/pzstd/Options.h index 924543abfd5..92c18a350cf 100644 --- a/contrib/pzstd/Options.h +++ b/contrib/pzstd/Options.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/Pzstd.cpp b/contrib/pzstd/Pzstd.cpp index 2c09bda7a2d..048a006b3b3 100644 --- a/contrib/pzstd/Pzstd.cpp +++ b/contrib/pzstd/Pzstd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,11 +10,13 @@ #include "Pzstd.h" #include "SkippableFrame.h" #include "utils/FileSystem.h" +#include "utils/Portability.h" #include "utils/Range.h" #include "utils/ScopeGuard.h" #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" +#include #include #include #include @@ -267,7 +269,10 @@ static void compress( std::shared_ptr out, size_t maxInputSize) { auto& errorHolder = state.errorHolder; - auto guard = makeScopeGuard([&] { out->finish(); }); + auto guard = makeScopeGuard([&] { + in->finish(); + out->finish(); + }); // Initialize the CCtx auto ctx = state.cStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_CStream")) { @@ -336,6 +341,10 @@ static size_t calculateStep( const ZSTD_parameters ¶ms) { (void)size; (void)numThreads; + // Not validated to work correctly for window logs > 23. + // It will definitely fail if windowLog + 2 is >= 4GB because + // the skippable frame can only store sizes up to 4GB. + assert(params.cParams.windowLog <= 23); return size_t{1} << (params.cParams.windowLog + 2); } @@ -425,7 +434,10 @@ static void decompress( std::shared_ptr in, std::shared_ptr out) { auto& errorHolder = state.errorHolder; - auto guard = makeScopeGuard([&] { out->finish(); }); + auto guard = makeScopeGuard([&] { + in->finish(); + out->finish(); + }); // Initialize the DCtx auto ctx = state.dStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_DStream")) { @@ -572,6 +584,7 @@ std::uint64_t writeFile( FILE* outputFd, bool decompress) { auto& errorHolder = state.errorHolder; + auto outsFinishGuard = makeScopeGuard([&outs] { outs.finish(); }); auto lineClearGuard = makeScopeGuard([&state] { state.log.clear(kLogInfo); }); @@ -579,6 +592,7 @@ std::uint64_t writeFile( std::shared_ptr out; // Grab the output queue for each decompression job (in order). while (outs.pop(out)) { + auto outFinishGuard = makeScopeGuard([&out] { out->finish(); }); if (errorHolder.hasError()) { continue; } @@ -587,7 +601,8 @@ std::uint64_t writeFile( // start writing before compression is done because we need to know the // compressed size. // Wait for the compressed size to be available and write skippable frame - SkippableFrame frame(out->size()); + assert(uint64_t(out->size()) < uint64_t(1) << 32); + SkippableFrame frame(uint32_t(out->size())); if (!writeData(frame.data(), outputFd)) { errorHolder.setError("Failed to write output"); return bytesWritten; diff --git a/contrib/pzstd/Pzstd.h b/contrib/pzstd/Pzstd.h index c667c887d7b..3645e594268 100644 --- a/contrib/pzstd/Pzstd.h +++ b/contrib/pzstd/Pzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/README.md b/contrib/pzstd/README.md index 84d94581583..bc8f831f82c 100644 --- a/contrib/pzstd/README.md +++ b/contrib/pzstd/README.md @@ -31,7 +31,7 @@ If this number is not suitable, during compilation you can define `PZSTD_NUM_THR ## Benchmarks -As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia). +As a reference, PZstandard and Pigz were compared on an Intel Core i7 @ 3.1 GHz, each using 4 threads, with the [Silesia compression corpus](https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia). Compression Speed vs Ratio with 4 Threads | Decompression Speed with 4 Threads ------------------------------------------|----------------------------------- diff --git a/contrib/pzstd/SkippableFrame.cpp b/contrib/pzstd/SkippableFrame.cpp index 769866dfc81..3bea4eb65bf 100644 --- a/contrib/pzstd/SkippableFrame.cpp +++ b/contrib/pzstd/SkippableFrame.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/SkippableFrame.h b/contrib/pzstd/SkippableFrame.h index 60deed0405b..817415e9231 100644 --- a/contrib/pzstd/SkippableFrame.h +++ b/contrib/pzstd/SkippableFrame.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/main.cpp b/contrib/pzstd/main.cpp index b93f043b16b..422b4a56a71 100644 --- a/contrib/pzstd/main.cpp +++ b/contrib/pzstd/main.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/OptionsTest.cpp b/contrib/pzstd/test/OptionsTest.cpp index e601148255d..91e39750d0c 100644 --- a/contrib/pzstd/test/OptionsTest.cpp +++ b/contrib/pzstd/test/OptionsTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/PzstdTest.cpp b/contrib/pzstd/test/PzstdTest.cpp index 5c7d6631080..3249f860af0 100644 --- a/contrib/pzstd/test/PzstdTest.cpp +++ b/contrib/pzstd/test/PzstdTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -7,9 +7,7 @@ * in the COPYING file in the root directory of this source tree). */ #include "Pzstd.h" -extern "C" { #include "datagen.h" -} #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" diff --git a/contrib/pzstd/test/RoundTrip.h b/contrib/pzstd/test/RoundTrip.h index c6364ecb422..f777622a393 100644 --- a/contrib/pzstd/test/RoundTrip.h +++ b/contrib/pzstd/test/RoundTrip.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/test/RoundTripTest.cpp b/contrib/pzstd/test/RoundTripTest.cpp index 36af0673ae6..27c5028e880 100644 --- a/contrib/pzstd/test/RoundTripTest.cpp +++ b/contrib/pzstd/test/RoundTripTest.cpp @@ -1,14 +1,12 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). */ -extern "C" { #include "datagen.h" -} #include "Options.h" #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" diff --git a/contrib/pzstd/utils/Buffer.h b/contrib/pzstd/utils/Buffer.h index d17ad2f2cf1..a85f770ba18 100644 --- a/contrib/pzstd/utils/Buffer.h +++ b/contrib/pzstd/utils/Buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/FileSystem.h b/contrib/pzstd/utils/FileSystem.h index 3cfbe86e507..8d57d05f0fa 100644 --- a/contrib/pzstd/utils/FileSystem.h +++ b/contrib/pzstd/utils/FileSystem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,16 +8,18 @@ */ #pragma once +#include "utils/Portability.h" #include "utils/Range.h" #include #include #include +#include #include // A small subset of `std::filesystem`. // `std::filesystem` should be a drop in replacement. -// See http://en.cppreference.com/w/cpp/filesystem for documentation. +// See https://en.cppreference.com/w/cpp/filesystem for documentation. namespace pzstd { @@ -28,7 +30,7 @@ typedef struct ::_stat64 file_status; typedef struct ::stat file_status; #endif -/// http://en.cppreference.com/w/cpp/filesystem/status +/// https://en.cppreference.com/w/cpp/filesystem/status inline file_status status(StringPiece path, std::error_code& ec) noexcept { file_status status; #if defined(_MSC_VER) @@ -44,7 +46,7 @@ inline file_status status(StringPiece path, std::error_code& ec) noexcept { return status; } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(file_status status) noexcept { #if defined(S_ISREG) return S_ISREG(status.st_mode); @@ -55,12 +57,12 @@ inline bool is_regular_file(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_regular_file +/// https://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(StringPiece path, std::error_code& ec) noexcept { return is_regular_file(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(file_status status) noexcept { #if defined(S_ISDIR) return S_ISDIR(status.st_mode); @@ -71,22 +73,22 @@ inline bool is_directory(file_status status) noexcept { #endif } -/// http://en.cppreference.com/w/cpp/filesystem/is_directory +/// https://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(StringPiece path, std::error_code& ec) noexcept { return is_directory(status(path, ec)); } -/// http://en.cppreference.com/w/cpp/filesystem/file_size +/// https://en.cppreference.com/w/cpp/filesystem/file_size inline std::uintmax_t file_size( StringPiece path, std::error_code& ec) noexcept { auto stat = status(path, ec); if (ec) { - return -1; + return std::numeric_limits::max(); } if (!is_regular_file(stat)) { ec.assign(ENOTSUP, std::generic_category()); - return -1; + return std::numeric_limits::max(); } ec.clear(); return stat.st_size; diff --git a/contrib/pzstd/utils/Likely.h b/contrib/pzstd/utils/Likely.h index 7cea8da2771..52243a64eab 100644 --- a/contrib/pzstd/utils/Likely.h +++ b/contrib/pzstd/utils/Likely.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/Portability.h b/contrib/pzstd/utils/Portability.h new file mode 100644 index 00000000000..ef1f86e51f6 --- /dev/null +++ b/contrib/pzstd/utils/Portability.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#pragma once + +#include + +// Required for windows, which defines min/max, but we want the std:: version. +#undef min +#undef max diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h index 6a850ad4eaa..0fd8f9f8655 100644 --- a/contrib/pzstd/utils/Range.h +++ b/contrib/pzstd/utils/Range.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,9 @@ #pragma once #include "utils/Likely.h" +#include "utils/Portability.h" +#include #include #include #include diff --git a/contrib/pzstd/utils/ResourcePool.h b/contrib/pzstd/utils/ResourcePool.h index 8dfcdd76590..7c4bb623580 100644 --- a/contrib/pzstd/utils/ResourcePool.h +++ b/contrib/pzstd/utils/ResourcePool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/ScopeGuard.h b/contrib/pzstd/utils/ScopeGuard.h index c26f911bf3c..911fd984214 100644 --- a/contrib/pzstd/utils/ScopeGuard.h +++ b/contrib/pzstd/utils/ScopeGuard.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/ThreadPool.h b/contrib/pzstd/utils/ThreadPool.h index 8ece8e0da4e..a087d7c1cff 100644 --- a/contrib/pzstd/utils/ThreadPool.h +++ b/contrib/pzstd/utils/ThreadPool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/WorkQueue.h b/contrib/pzstd/utils/WorkQueue.h index 1d14d922c64..07842e59817 100644 --- a/contrib/pzstd/utils/WorkQueue.h +++ b/contrib/pzstd/utils/WorkQueue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -115,13 +115,14 @@ class WorkQueue { } /** - * Promise that `push()` won't be called again, so once the queue is empty - * there will never any more work. + * Promise that either the reader side or the writer side is done. + * If the writer is done, `push()` won't be called again, so once the queue + * is empty there will never be any more work. If the reader is done, `pop()` + * won't be called again, so further items pushed will just be ignored. */ void finish() { { std::lock_guard lock(mutex_); - assert(!done_); done_ = true; } readerCv_.notify_all(); diff --git a/contrib/pzstd/utils/test/BufferTest.cpp b/contrib/pzstd/utils/test/BufferTest.cpp index fbba74e8262..58bf08dcd69 100644 --- a/contrib/pzstd/utils/test/BufferTest.cpp +++ b/contrib/pzstd/utils/test/BufferTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/RangeTest.cpp b/contrib/pzstd/utils/test/RangeTest.cpp index 755b50fa6e8..8b7dee27193 100644 --- a/contrib/pzstd/utils/test/RangeTest.cpp +++ b/contrib/pzstd/utils/test/RangeTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ResourcePoolTest.cpp b/contrib/pzstd/utils/test/ResourcePoolTest.cpp index 6fe145180be..750ee084b07 100644 --- a/contrib/pzstd/utils/test/ResourcePoolTest.cpp +++ b/contrib/pzstd/utils/test/ResourcePoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ScopeGuardTest.cpp b/contrib/pzstd/utils/test/ScopeGuardTest.cpp index 7bc624da79b..0f77cdf38b7 100644 --- a/contrib/pzstd/utils/test/ScopeGuardTest.cpp +++ b/contrib/pzstd/utils/test/ScopeGuardTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/ThreadPoolTest.cpp b/contrib/pzstd/utils/test/ThreadPoolTest.cpp index 703fd4c9ca1..a01052e605f 100644 --- a/contrib/pzstd/utils/test/ThreadPoolTest.cpp +++ b/contrib/pzstd/utils/test/ThreadPoolTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/pzstd/utils/test/WorkQueueTest.cpp b/contrib/pzstd/utils/test/WorkQueueTest.cpp index 14cf77304f2..16600bb6031 100644 --- a/contrib/pzstd/utils/test/WorkQueueTest.cpp +++ b/contrib/pzstd/utils/test/WorkQueueTest.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/recovery/Makefile b/contrib/recovery/Makefile index 9a9f4f2e81d..be6ea4b0e9c 100644 --- a/contrib/recovery/Makefile +++ b/contrib/recovery/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2019-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/recovery/recover_directory.c b/contrib/recovery/recover_directory.c index 13f83fd106b..b9bd7ab4996 100644 --- a/contrib/recovery/recover_directory.c +++ b/contrib/recovery/recover_directory.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/README.md b/contrib/seekable_format/README.md new file mode 100644 index 00000000000..fedf96bab4d --- /dev/null +++ b/contrib/seekable_format/README.md @@ -0,0 +1,42 @@ +# Zstandard Seekable Format + +The seekable format splits compressed data into a series of independent "frames", +each compressed individually, +so that decompression of a section in the middle of an archive +only requires zstd to decompress at most a frame's worth of extra data, +instead of the entire archive. + +The frames are appended, so that the decompression of the entire payload +still regenerates the original content, using any compliant zstd decoder. + +On top of that, the seekable format generates a jump table, +which makes it possible to jump directly to the position of the relevant frame +when requesting only a segment of the data. +The jump table is simply ignored by zstd decoders unaware of the seekable format. + +The format is delivered with an API to create seekable archives +and to retrieve arbitrary segments inside the archive. + +### Maximum Frame Size parameter + +When creating a seekable archive, the main parameter is the maximum frame size. + +At compression time, user can manually select the boundaries between segments, +but they don't have to: long segments will be automatically split +when larger than selected maximum frame size. + +Small frame sizes reduce decompression cost when requesting small segments, +because the decoder will nonetheless have to decompress an entire frame +to recover just a single byte from it. + +A good rule of thumb is to select a maximum frame size roughly equivalent +to the access pattern when it's known. +For example, if the application tends to request 4KB blocks, +then it's a good idea to set a maximum frame size in the vicinity of 4 KB. + +But small frame sizes also reduce compression ratio, +and increase the cost for the jump table, +so there is a balance to find. + +In general, try to avoid really tiny frame sizes (<1 KB), +which would have a large negative impact on compression ratio. diff --git a/contrib/seekable_format/examples/Makefile b/contrib/seekable_format/examples/Makefile index 9df6b75fb84..f15a4a15a80 100644 --- a/contrib/seekable_format/examples/Makefile +++ b/contrib/seekable_format/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2017-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -11,6 +11,8 @@ ZSTDLIB_PATH = ../../../lib ZSTDLIB_NAME = libzstd.a +# Parallel tools only make sense against multi-threaded libzstd +ZSTDLIB_TARGET = $(ZSTDLIB_NAME)-mt ZSTDLIB = $(ZSTDLIB_PATH)/$(ZSTDLIB_NAME) CPPFLAGS += -DXXH_NAMESPACE=ZSTD_ -I../ -I../../../lib -I../../../lib/common @@ -28,7 +30,7 @@ all: seekable_compression seekable_decompression seekable_decompression_mem \ parallel_processing $(ZSTDLIB): - make -C $(ZSTDLIB_PATH) $(ZSTDLIB_NAME) + $(MAKE) -C $(ZSTDLIB_PATH) $(ZSTDLIB_TARGET) seekable_compression : seekable_compression.c $(SEEKABLE_OBJS) $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ diff --git a/contrib/seekable_format/examples/parallel_compression.c b/contrib/seekable_format/examples/parallel_compression.c index 4118b0ad762..f8a73a36b44 100644 --- a/contrib/seekable_format/examples/parallel_compression.c +++ b/contrib/seekable_format/examples/parallel_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,9 +23,11 @@ #include "xxhash.h" +#define ZSTD_MULTITHREAD 1 +#include "threading.h" #include "pool.h" // use zstd thread pool for demo -#include "zstd_seekable.h" +#include "../zstd_seekable.h" static void* malloc_orDie(size_t size) { @@ -72,127 +74,166 @@ static size_t fclose_orDie(FILE* file) exit(6); } -static void fseek_orDie(FILE* file, long int offset, int origin) -{ - if (!fseek(file, offset, origin)) { - if (!fflush(file)) return; - } - /* error */ - perror("fseek"); - exit(7); -} - -static long int ftell_orDie(FILE* file) -{ - long int off = ftell(file); - if (off != -1) return off; - /* error */ - perror("ftell"); - exit(8); -} +struct state { + FILE* fout; + ZSTD_pthread_mutex_t mutex; + size_t nextID; + struct job* pending; + ZSTD_frameLog* frameLog; + const int compressionLevel; +}; struct job { - const void* src; + size_t id; + struct job* next; + struct state* state; + + void* src; size_t srcSize; void* dst; size_t dstSize; unsigned checksum; - - int compressionLevel; - int done; }; -static void compressFrame(void* opaque) +static void addPending_inmutex(struct state* state, struct job* job) { - struct job* job = opaque; + struct job** p = &state->pending; + while (*p && (*p)->id < job->id) + p = &(*p)->next; + job->next = *p; + *p = job; +} - job->checksum = XXH64(job->src, job->srcSize, 0); +static void flushFrame(struct state* state, struct job* job) +{ + fwrite_orDie(job->dst, job->dstSize, state->fout); + free(job->dst); - size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel); + size_t ret = ZSTD_seekable_logFrame(state->frameLog, (unsigned)job->dstSize, (unsigned)job->srcSize, job->checksum); if (ZSTD_isError(ret)) { - fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); - exit(20); + fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); + exit(12); } +} - job->dstSize = ret; - job->done = 1; +static void flushPending_inmutex(struct state* state) +{ + while (state->pending && state->pending->id == state->nextID) { + struct job* p = state->pending; + state->pending = p->next; + flushFrame(state, p); + free(p); + state->nextID++; + } } -static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads) +static void finishFrame(struct job* job) { - POOL_ctx* pool = POOL_create(nbThreads, nbThreads); - if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + struct state *state = job->state; + ZSTD_pthread_mutex_lock(&state->mutex); + addPending_inmutex(state, job); + flushPending_inmutex(state); + ZSTD_pthread_mutex_unlock(&state->mutex); +} - FILE* const fin = fopen_orDie(fname, "rb"); - FILE* const fout = fopen_orDie(outName, "wb"); +static void compressFrame(void* opaque) +{ + struct job* job = opaque; - if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } - unsigned dstSize = ZSTD_compressBound(frameSize); + job->checksum = (unsigned)XXH64(job->src, job->srcSize, 0); + + size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->state->compressionLevel); + if (ZSTD_isError(ret)) { + fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); + exit(20); + } + job->dstSize = ret; + // No longer need + free(job->src); + job->src = NULL; + + finishFrame(job); +} - fseek_orDie(fin, 0, SEEK_END); - long int length = ftell_orDie(fin); - fseek_orDie(fin, 0, SEEK_SET); +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} - size_t numFrames = (length + frameSize - 1) / frameSize; +static void openInOut_orDie(const char* fname, FILE** fin, FILE** fout) { + if (strcmp(fname, "-") == 0) { + *fin = stdin; + *fout = stdout; + } else { + *fin = fopen_orDie(fname, "rb"); + const char* outName = createOutFilename_orDie(fname); + *fout = fopen_orDie(outName, "wb"); + } +} - struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames); +static void compressFile_orDie(const char* fname, int cLevel, unsigned frameSize, size_t nbThreads) +{ + struct state state = { + .nextID = 0, + .pending = NULL, + .compressionLevel = cLevel, + }; + ZSTD_pthread_mutex_init(&state.mutex, NULL); + state.frameLog = ZSTD_seekable_createFrameLog(1); + if (state.frameLog == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } - size_t i; - for(i = 0; i < numFrames; i++) { - void* in = malloc_orDie(frameSize); - void* out = malloc_orDie(dstSize); + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } - size_t inSize = fread_orDie(in, frameSize, fin); + FILE* fin; + openInOut_orDie(fname, &fin, &state.fout); - jobs[i].src = in; - jobs[i].srcSize = inSize; - jobs[i].dst = out; - jobs[i].dstSize = dstSize; - jobs[i].compressionLevel = cLevel; - jobs[i].done = 0; - POOL_add(pool, compressFrame, &jobs[i]); + if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } + size_t dstSize = ZSTD_compressBound(frameSize); + + for (size_t id = 0; 1; id++) { + struct job* job = malloc_orDie(sizeof(struct job)); + job->id = id; + job->next = NULL; + job->state = &state; + job->src = malloc_orDie(frameSize); + job->dst = malloc_orDie(dstSize); + job->srcSize = fread_orDie(job->src, frameSize, fin); + job->dstSize = dstSize; + POOL_add(pool, compressFrame, job); + if (feof(fin)) + break; } - ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1); - if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } - for (i = 0; i < numFrames; i++) { - while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ - fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout); - free((void*)jobs[i].src); - free(jobs[i].dst); - - size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum); - if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); } + POOL_joinJobs(pool); + POOL_free(pool); + if (state.pending) { + fprintf(stderr, "Unexpected leftover output blocks!\n"); + exit(13); } { unsigned char seekTableBuff[1024]; ZSTD_outBuffer out = {seekTableBuff, 1024, 0}; - while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) { - fwrite_orDie(seekTableBuff, out.pos, fout); + while (ZSTD_seekable_writeSeekTable(state.frameLog, &out) != 0) { + fwrite_orDie(seekTableBuff, out.pos, state.fout); out.pos = 0; } - fwrite_orDie(seekTableBuff, out.pos, fout); + fwrite_orDie(seekTableBuff, out.pos, state.fout); } - ZSTD_seekable_freeFrameLog(fl); - free(jobs); - fclose_orDie(fout); + ZSTD_seekable_freeFrameLog(state.frameLog); + fclose_orDie(state.fout); fclose_orDie(fin); } -static const char* createOutFilename_orDie(const char* filename) -{ - size_t const inL = strlen(filename); - size_t const outL = inL + 5; - void* outSpace = malloc_orDie(outL); - memset(outSpace, 0, outL); - strcat(outSpace, filename); - strcat(outSpace, ".zst"); - return (const char*)outSpace; -} - int main(int argc, const char** argv) { const char* const exeName = argv[0]; if (argc!=4) { @@ -204,10 +245,9 @@ int main(int argc, const char** argv) { { const char* const inFileName = argv[1]; unsigned const frameSize = (unsigned)atoi(argv[2]); - int const nbThreads = atoi(argv[3]); + size_t const nbThreads = (size_t)atoi(argv[3]); - const char* const outFileName = createOutFilename_orDie(inFileName); - compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads); + compressFile_orDie(inFileName, 5, frameSize, nbThreads); } return 0; diff --git a/contrib/seekable_format/examples/parallel_processing.c b/contrib/seekable_format/examples/parallel_processing.c index 36226b49fd3..2757a9aba07 100644 --- a/contrib/seekable_format/examples/parallel_processing.c +++ b/contrib/seekable_format/examples/parallel_processing.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ #define ZSTD_STATIC_LINKING_ONLY #include // presumes zstd library is installed #include -#if defined(WIN32) || defined(_WIN32) +#if defined(_WIN32) # include # define SLEEP(x) Sleep(x) #else @@ -29,7 +29,7 @@ #include "pool.h" // use zstd thread pool for demo -#include "zstd_seekable.h" +#include "../zstd_seekable.h" #define MIN(a, b) ((a) < (b) ? (a) : (b)) @@ -100,13 +100,11 @@ struct sum_job { const char* fname; unsigned long long sum; unsigned frameNb; - int done; }; static void sumFrame(void* opaque) { struct sum_job* job = (struct sum_job*)opaque; - job->done = 0; FILE* const fin = fopen_orDie(job->fname, "rb"); @@ -128,7 +126,6 @@ static void sumFrame(void* opaque) sum += data[i]; } job->sum = sum; - job->done = 1; fclose(fin); ZSTD_seekable_free(seekable); @@ -153,14 +150,14 @@ static void sumFile_orDie(const char* fname, int nbThreads) unsigned fnb; for (fnb = 0; fnb < numFrames; fnb++) { - jobs[fnb] = (struct sum_job){ fname, 0, fnb, 0 }; + jobs[fnb] = (struct sum_job){ fname, 0, fnb }; POOL_add(pool, sumFrame, &jobs[fnb]); } + POOL_joinJobs(pool); unsigned long long total = 0; for (fnb = 0; fnb < numFrames; fnb++) { - while (!jobs[fnb].done) SLEEP(5); /* wake up every 5 milliseconds to check */ total += jobs[fnb].sum; } diff --git a/contrib/seekable_format/examples/seekable_compression.c b/contrib/seekable_format/examples/seekable_compression.c index 9a331a89531..c3d227dd05a 100644 --- a/contrib/seekable_format/examples/seekable_compression.c +++ b/contrib/seekable_format/examples/seekable_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,7 +13,7 @@ #define ZSTD_STATIC_LINKING_ONLY #include // presumes zstd library is installed -#include "zstd_seekable.h" +#include "../zstd_seekable.h" static void* malloc_orDie(size_t size) { @@ -112,20 +112,23 @@ static char* createOutFilename_orDie(const char* filename) return (char*)outSpace; } -int main(int argc, const char** argv) { +#define CLEVEL_DEFAULT 5 +int main(int argc, const char** argv) +{ const char* const exeName = argv[0]; - if (argc!=3) { - printf("wrong arguments\n"); - printf("usage:\n"); - printf("%s FILE FRAME_SIZE\n", exeName); + if (argc<3 || argc>4) { + printf("wrong arguments \n"); + printf("usage: \n"); + printf("%s FILE FRAME_SIZE [LEVEL] \n", exeName); return 1; } { const char* const inFileName = argv[1]; unsigned const frameSize = (unsigned)atoi(argv[2]); + int const cLevel = (argc==4) ? atoi(argv[3]) : CLEVEL_DEFAULT; char* const outFileName = createOutFilename_orDie(inFileName); - compressFile_orDie(inFileName, outFileName, 5, frameSize); + compressFile_orDie(inFileName, outFileName, cLevel, frameSize); free(outFileName); } diff --git a/contrib/seekable_format/examples/seekable_decompression.c b/contrib/seekable_format/examples/seekable_decompression.c index e9e2013331e..7edbca87d0e 100644 --- a/contrib/seekable_format/examples/seekable_decompression.c +++ b/contrib/seekable_format/examples/seekable_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,7 @@ #include // presumes zstd library is installed #include -#include "zstd_seekable.h" +#include "../zstd_seekable.h" #define MIN(a, b) ((a) < (b) ? (a) : (b)) diff --git a/contrib/seekable_format/examples/seekable_decompression_mem.c b/contrib/seekable_format/examples/seekable_decompression_mem.c index e7b1c65059c..44a06fbbfb0 100644 --- a/contrib/seekable_format/examples/seekable_decompression_mem.c +++ b/contrib/seekable_format/examples/seekable_decompression_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/contrib/seekable_format/tests/.gitignore b/contrib/seekable_format/tests/.gitignore index f831eaf37c4..b59d08b342d 100644 --- a/contrib/seekable_format/tests/.gitignore +++ b/contrib/seekable_format/tests/.gitignore @@ -1 +1,2 @@ seekable_tests +data.txt diff --git a/contrib/seekable_format/tests/Makefile b/contrib/seekable_format/tests/Makefile index d51deb3ea82..a23c0f43c35 100644 --- a/contrib/seekable_format/tests/Makefile +++ b/contrib/seekable_format/tests/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2017-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -24,7 +24,7 @@ SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c $(ZSTDLIB) .PHONY: default clean test default: test -test: seekable_tests +test: seekable_tests parallel_compression_test ./seekable_tests $(ZSTDLIB): @@ -32,7 +32,27 @@ $(ZSTDLIB): seekable_tests : $(SEEKABLE_OBJS) +EXAMPLES_PATH = ../examples +PARALLEL_COMPRESSION = $(EXAMPLES_PATH)/parallel_compression + +DATAGEN_PATH = ../../../tests +DATAGEN = $(DATAGEN_PATH)/datagen + +$(PARALLEL_COMPRESSION): + $(MAKE) -C $(EXAMPLES_PATH) parallel_compression + +$(DATAGEN): + $(MAKE) -C $(DATAGEN_PATH) datagen + +data.txt: $(DATAGEN) + $(DATAGEN) -g100M > $@ + +parallel_compression_test: $(PARALLEL_COMPRESSION) data.txt + ulimit -Sv 102400; $(PARALLEL_COMPRESSION) data.txt 1048576 2 + +.PHONY: parallel_compression_test parallel_comp + clean: @$(RM) core *.o tmp* result* *.zst \ - seekable_tests + seekable_tests data.txt @echo Cleaning completed diff --git a/contrib/seekable_format/tests/seekable_tests.c b/contrib/seekable_format/tests/seekable_tests.c index a482638b918..809ea6425eb 100644 --- a/contrib/seekable_format/tests/seekable_tests.c +++ b/contrib/seekable_format/tests/seekable_tests.c @@ -3,8 +3,55 @@ #include // malloc #include #include +#include -#include "zstd_seekable.h" +#include "../zstd_seekable.h" + + +/* ZSTD_seekable_customFile implementation that reads/seeks a buffer while keeping track of total bytes read */ +typedef struct { + const void *ptr; + size_t size; + size_t pos; + size_t totalRead; +} buffWrapperWithTotal_t; + +static int readBuffWithTotal(void* opaque, void* buffer, size_t n) +{ + buffWrapperWithTotal_t* const buff = (buffWrapperWithTotal_t*)opaque; + assert(buff != NULL); + if (buff->pos + n > buff->size) return -1; + memcpy(buffer, (const char*)buff->ptr + buff->pos, n); + buff->pos += n; + buff->totalRead += n; + return 0; +} + +static int seekBuffWithTotal(void* opaque, long long offset, int origin) +{ + buffWrapperWithTotal_t* const buff = (buffWrapperWithTotal_t*) opaque; + unsigned long long newOffset; + assert(buff != NULL); + switch (origin) { + case SEEK_SET: + assert(offset >= 0); + newOffset = (unsigned long long)offset; + break; + case SEEK_CUR: + newOffset = (unsigned long long)((long long)buff->pos + offset); + break; + case SEEK_END: + newOffset = (unsigned long long)((long long)buff->size + offset); + break; + default: + assert(0); /* not possible */ + } + if (newOffset > buff->size) { + return -1; + } + buff->pos = newOffset; + return 0; +} /* Basic unit tests for zstd seekable format */ int main(int argc, const char** argv) @@ -186,6 +233,138 @@ int main(int argc, const char** argv) } printf("Success!\n"); + + printf("Test %u - check ZSTD magic in compressing empty string: ", testNb++); + { // compressing empty string should return a zstd header + size_t const capacity = 255; + char* inBuffer = malloc(capacity); + assert(inBuffer != NULL); + inBuffer[0] = '\0'; + void* const outBuffer = malloc(capacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream *s = ZSTD_seekable_createCStream(); + ZSTD_seekable_initCStream(s, 1, 1, 255); + + ZSTD_inBuffer input = { .src=inBuffer, .pos=0, .size=0 }; + ZSTD_outBuffer output = { .dst=outBuffer, .pos=0, .size=capacity }; + + ZSTD_seekable_compressStream(s, &output, &input); + ZSTD_seekable_endStream(s, &output); + + if((((char*)output.dst)[0] != '\x28') | (((char*)output.dst)[1] != '\xb5') | (((char*)output.dst)[2] != '\x2f') | (((char*)output.dst)[3] != '\xfd')) { + printf("%#02x %#02x %#02x %#02x\n", ((char*)output.dst)[0], ((char*)output.dst)[1] , ((char*)output.dst)[2] , ((char*)output.dst)[3] ); + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + goto _test_error; + } + + free(inBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(s); + } + printf("Success!\n"); + + + printf("Test %u - multiple decompress calls: ", testNb++); + { char const inBuffer[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt"; + size_t const inSize = sizeof(inBuffer); + + size_t const seekCapacity = 5000; + void* const seekBuffer = malloc(seekCapacity); + assert(seekBuffer != NULL); + size_t seekSize; + + size_t const outCapacity = inSize; + char* const outBuffer = malloc(outCapacity); + assert(outBuffer != NULL); + + ZSTD_seekable_CStream* const zscs = ZSTD_seekable_createCStream(); + assert(zscs != NULL); + + /* compress test data with a small frame size to ensure multiple frames in the output */ + unsigned const maxFrameSize = 40; + { size_t const initStatus = ZSTD_seekable_initCStream(zscs, 9, 0 /* checksumFlag */, maxFrameSize); + assert(!ZSTD_isError(initStatus)); + } + + { ZSTD_outBuffer outb = { .dst=seekBuffer, .pos=0, .size=seekCapacity }; + ZSTD_inBuffer inb = { .src=inBuffer, .pos=0, .size=inSize }; + + while (inb.pos < inb.size) { + size_t const cStatus = ZSTD_seekable_compressStream(zscs, &outb, &inb); + assert(!ZSTD_isError(cStatus)); + } + + size_t const endStatus = ZSTD_seekable_endStream(zscs, &outb); + assert(!ZSTD_isError(endStatus)); + seekSize = outb.pos; + } + + ZSTD_seekable* const stream = ZSTD_seekable_create(); + assert(stream != NULL); + buffWrapperWithTotal_t buffWrapper = {seekBuffer, seekSize, 0, 0}; + { ZSTD_seekable_customFile srcFile = {&buffWrapper, &readBuffWithTotal, &seekBuffWithTotal}; + size_t const initStatus = ZSTD_seekable_initAdvanced(stream, srcFile); + assert(!ZSTD_isError(initStatus)); } + + /* Perform a series of small reads and seeks (repeatedly read 1 byte and skip 1 byte) + and check that we didn't reread input data unnecessarily */ + size_t pos; + for (pos = 0; pos < inSize; pos += 2) { + size_t const decStatus = ZSTD_seekable_decompress(stream, outBuffer, 1, pos); + if (decStatus != 1 || outBuffer[0] != inBuffer[pos]) { + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + } + if (buffWrapper.totalRead > seekSize) { + /* We read more than the compressed size, meaning there were some rereads. + This is unneeded because we only seeked forward. */ + printf("Too much data read: %zu read, with compressed size %zu\n", buffWrapper.totalRead, seekSize); + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + + /* Perform some reads and seeks to ensure correctness */ + struct { + size_t offset; + size_t size; + } const tests[] = { /* Assume the frame size is 40 */ + {20, 40}, /* read partial data from two frames */ + {60, 10}, /* continue reading from the same offset */ + {50, 20}, /* seek backward within the same frame */ + {10, 10}, /* seek backward to a different frame */ + {25, 10}, /* seek forward within the same frame */ + {60, 10}, /* seek forward to a different frame */ + }; + size_t idx; + for (idx = 0; idx < sizeof(tests) / sizeof(tests[0]); idx++) { + size_t const decStatus = ZSTD_seekable_decompress(stream, outBuffer, tests[idx].size, tests[idx].offset); + if (decStatus != tests[idx].size || memcmp(outBuffer, inBuffer + tests[idx].offset, tests[idx].size) != 0) { + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + goto _test_error; + } + } + + free(seekBuffer); + free(outBuffer); + ZSTD_seekable_freeCStream(zscs); + ZSTD_seekable_free(stream); + } + printf("Success!\n"); + /* TODO: Add more tests */ printf("Finished tests\n"); return 0; diff --git a/contrib/seekable_format/zstd_seekable.h b/contrib/seekable_format/zstd_seekable.h index d2807cfbd6f..b1f83d0e0a1 100644 --- a/contrib/seekable_format/zstd_seekable.h +++ b/contrib/seekable_format/zstd_seekable.h @@ -1,13 +1,13 @@ #ifndef SEEKABLE_H #define SEEKABLE_H +#include +#include "zstd.h" /* ZSTDLIB_API */ + #if defined (__cplusplus) extern "C" { #endif -#include -#include "zstd.h" /* ZSTDLIB_API */ - #define ZSTD_seekTableFooterSize 9 @@ -15,8 +15,8 @@ extern "C" { #define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U -/* Limit the maximum size to avoid any potential issues storing the compressed size */ -#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U +/* Limit maximum size to avoid potential issues storing the compressed size */ +#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x40000000U /*-**************************************************************************** * Seekable Format @@ -48,10 +48,19 @@ typedef struct ZSTD_seekTable_s ZSTD_seekTable; * * Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object * for a new compression operation. -* `maxFrameSize` indicates the size at which to automatically start a new -* seekable frame. `maxFrameSize == 0` implies the default maximum size. -* `checksumFlag` indicates whether or not the seek table should include frame -* checksums on the uncompressed data for verification. +* - `maxFrameSize` indicates the size at which to automatically start a new +* seekable frame. +* `maxFrameSize == 0` implies the default maximum size. +* Smaller frame sizes allow faster decompression of small segments, +* since retrieving a single byte requires decompression of +* the full frame where the byte belongs. +* In general, size the frames to roughly correspond to +* the access granularity (when it's known). +* But small sizes also reduce compression ratio. +* Avoid really tiny frame sizes (< 1 KB), +* that would hurt compression ratio considerably. +* - `checksumFlag` indicates whether or not the seek table should include frame +* checksums on the uncompressed data for verification. * @return : a size hint for input to provide for compression, or an error code * checkable with ZSTD_isError() * diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md index 55aebfd2e9d..7bd0790e81b 100644 --- a/contrib/seekable_format/zstd_seekable_compression_format.md +++ b/contrib/seekable_format/zstd_seekable_compression_format.md @@ -2,7 +2,7 @@ ### Notices -Copyright (c) 2017-present Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, diff --git a/contrib/seekable_format/zstdseek_compress.c b/contrib/seekable_format/zstdseek_compress.c index 242bd2ac3a1..4997807d251 100644 --- a/contrib/seekable_format/zstdseek_compress.c +++ b/contrib/seekable_format/zstdseek_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -230,6 +230,8 @@ size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* const BYTE* const inBase = (const BYTE*) input->src + input->pos; size_t inLen = input->size - input->pos; + assert(zcs->maxFrameSize < INT_MAX); + ZSTD_CCtx_setParameter(zcs->cstream, ZSTD_c_srcSizeHint, (int)zcs->maxFrameSize); inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize)); /* if we haven't finished flushing the last frame, don't start writing a new one */ @@ -350,7 +352,7 @@ size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) { - if (!zcs->writingSeekTable && zcs->frameDSize) { + if (!zcs->writingSeekTable) { const size_t endFrame = ZSTD_seekable_endFrame(zcs, output); if (ZSTD_isError(endFrame)) return endFrame; /* return an accurate size hint */ diff --git a/contrib/seekable_format/zstdseek_decompress.c b/contrib/seekable_format/zstdseek_decompress.c index 0848d2519f8..ab9088a1061 100644 --- a/contrib/seekable_format/zstdseek_decompress.c +++ b/contrib/seekable_format/zstdseek_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -38,7 +38,7 @@ * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ # define PLATFORM_POSIX_VERSION 200112L -/* try to determine posix version through official unistd.h's _POSIX_VERSION (http://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). * note : there is no simple way to know in advance if is present or not on target system, * Posix specification mandates its presence and its content, but target system must respect this spec. * It's necessary to _not_ #include whenever target OS is not unix-like @@ -77,7 +77,10 @@ /* ************************************************************ * Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW ***************************************************************/ -#if defined(_MSC_VER) && _MSC_VER >= 1400 +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define LONG_SEEK fseek +#elif defined(_MSC_VER) && _MSC_VER >= 1400 # define LONG_SEEK _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ # define LONG_SEEK fseeko @@ -252,6 +255,8 @@ size_t ZSTD_seekable_free(ZSTD_seekable* zs) ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs) { + assert(zs != NULL); + if (zs->seekTable.entries == NULL) return NULL; ZSTD_seekTable* const st = (ZSTD_seekTable*)malloc(sizeof(ZSTD_seekTable)); if (st==NULL) return NULL; @@ -493,7 +498,7 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign size_t srcBytesRead = 0; do { /* check if we can continue from a previous decompress job */ - if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) { + if (targetFrame != zs->curFrame || offset < zs->decompressedOffset) { zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset; zs->curFrame = targetFrame; @@ -517,9 +522,9 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign size_t forwardProgress; if (zs->decompressedOffset < offset) { /* dummy decompressions until we get to the target offset */ - outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0}; + outTmp = (ZSTD_outBuffer){zs->outBuff, (size_t) (MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset)), 0}; } else { - outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset}; + outTmp = (ZSTD_outBuffer){dst, len, (size_t) (zs->decompressedOffset - offset)}; } prevOutPos = outTmp.pos; diff --git a/contrib/seqBench/Makefile b/contrib/seqBench/Makefile index 0782961eb1d..e7f08a42cc8 100644 --- a/contrib/seqBench/Makefile +++ b/contrib/seqBench/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2018-present, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/doc/README.md b/doc/README.md index bb7a3e4902e..8f3babcdbb2 100644 --- a/doc/README.md +++ b/doc/README.md @@ -5,8 +5,9 @@ This directory contains material defining the Zstandard format, as well as detailed instructions to use `zstd` library. __`zstd_manual.html`__ : Documentation of `zstd.h` API, in html format. -Click on this link: [http://zstd.net/zstd_manual.html](http://zstd.net/zstd_manual.html) -to display documentation of latest release in readable format within a browser. +Unfortunately, Github doesn't display `html` files in parsed format, just as source code. +For a readable display of html API documentation of latest release, +use this link: [https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html](https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html) . __`zstd_compression_format.md`__ : This document defines the Zstandard compression format. Compliant decoders must adhere to this document, diff --git a/doc/decompressor_errata.md b/doc/decompressor_errata.md index b162e7fd6e7..b570f73145d 100644 --- a/doc/decompressor_errata.md +++ b/doc/decompressor_errata.md @@ -6,12 +6,53 @@ Each entry will contain: 1. The last affected decompressor versions. 2. The decompressor components affected. 2. Whether the compressed frame could ever be produced by the reference compressor. -3. An example frame. +3. An example frame (hexadecimal string when it can be short enough, link to golden file otherwise) 4. A description of the bug. The document is in reverse chronological order, with the bugs that affect the most recent zstd decompressor versions listed first. +No sequence using the 2-bytes format +------------------------------------------------ + +**Last affected version**: v1.5.5 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: see zstd/tests/golden-decompression/zeroSeq_2B.zst + +The zstd decoder incorrectly expects FSE tables when there are 0 sequences present in the block +if the value 0 is encoded using the 2-bytes format. +Instead, it should immediately end the sequence section, and move on to next block. + +This situation was never generated by the reference compressor, +because representing 0 sequences with the 2-bytes format is inefficient +(the 1-byte format is always used in this case). + + +Compressed block with a size of exactly 128 KB +------------------------------------------------ + +**Last affected version**: v1.5.2 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: see zstd/tests/golden-decompression/block-128k.zst + +The zstd decoder incorrectly rejected blocks of type `Compressed_Block` when their size was exactly 128 KB. +Note that `128 KB - 1` was accepted, and `128 KB + 1` is forbidden by the spec. + +This type of block was never generated by the reference compressor. + +These blocks used to be disallowed by the spec up until spec version 0.3.2 when the restriction was lifted by [PR#1689](https://github.com/facebook/zstd/pull/1689). + +> A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + + Compressed block with 0 literals and 0 sequences ------------------------------------------------ @@ -31,6 +72,7 @@ Additionally, these blocks were disallowed by the spec up until spec version 0.3 > A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + First block is RLE block ------------------------ @@ -52,6 +94,7 @@ block. https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L3527-L3535 + Tiny FSE Table & Block ---------------------- @@ -82,3 +125,24 @@ The total `Block_Content` is `5` bytes, and `Last_Table_Offset` is `2`. See the compressor workaround code: https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L2667-L2682 + +Magicless format +---------------------- + +**Last affected version**: v1.5.5 + +**Affected decompressor component(s)**: Library + +**Produced by the reference compressor**: Yes (example: https://gist.github.com/embg/9940726094f4cf2cef162cffe9319232) + +**Example Frame**: `27 b5 2f fd 00 03 19 00 00 66 6f 6f 3f ba c4 59` + +v1.5.6 fixes several bugs in which the magicless-format decoder rejects valid frames. +These include but are not limited to: +* Valid frames that happen to begin with a legacy magic number (little-endian) +* Valid frames that happen to begin with a skippable magic number (little-endian) + +If you are affected by this issue and cannot update to v1.5.6 or later, there is a +workaround to recover affected data. Simply prepend the ZSTD magic number +`0xFD2FB528` (little-endian) to your data and decompress using the standard-format +decoder. diff --git a/doc/decompressor_permissive.md b/doc/decompressor_permissive.md new file mode 100644 index 00000000000..164d6c86d61 --- /dev/null +++ b/doc/decompressor_permissive.md @@ -0,0 +1,80 @@ +Decompressor Permissiveness to Invalid Data +=========================================== + +This document describes the behavior of the reference decompressor in cases +where it accepts formally invalid data instead of reporting an error. + +While the reference decompressor *must* decode any compliant frame following +the specification, its ability to detect erroneous data is on a best effort +basis: the decoder may accept input data that would be formally invalid, +when it causes no risk to the decoder, and which detection would cost too much +complexity or speed regression. + +In practice, the vast majority of invalid data are detected, if only because +many corruption events are dangerous for the decoder process (such as +requesting an out-of-bound memory access) and many more are easy to check. + +This document lists a few known cases where invalid data was formerly accepted +by the decoder, and what has changed since. + + +Truncated Huffman states +------------------------ + +**Last affected version**: v1.5.6 + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 0000 5500 0072 8001 0420 7e1f 02aa 00` + +When using FSE-compressed Huffman weights, the compressed weight bitstream +could contain fewer bits than necessary to decode the initial states. + +The reference decompressor up to v1.5.6 will decode truncated or missing +initial states as zero, which can result in a valid Huffman tree if only +the second state is truncated. + +In newer versions, truncated initial states are reported as a corruption +error by the decoder. + + +Offset == 0 +----------- + +**Last affected version**: v1.5.5 + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 0000 4500 0008 0002 002f 430b ae` + +If a sequence is decoded with `literals_length = 0` and `offset_value = 3` +while `Repeated_Offset_1 = 1`, the computed offset will be `0`, which is +invalid. + +The reference decompressor up to v1.5.5 processes this case as if the computed +offset was `1`, including inserting `1` into the repeated offset list. +This prevents the output buffer from remaining uninitialized, thus denying a +potential attack vector from an untrusted source. +However, in the rare case where this scenario would be the outcome of a +transmission or storage error, the decoder relies on the checksum to detect +the error. + +In newer versions, this case is always detected and reported as a corruption error. + + +Non-zeroes reserved bits +------------------------ + +**Last affected version**: v1.5.5 + +**Produced by the reference compressor**: No + +The Sequences section of each block has a header, and one of its elements is a +byte, which describes the compression mode of each symbol. +This byte contains 2 reserved bits which must be set to zero. + +The reference decompressor up to v1.5.5 just ignores these 2 bits. +This behavior has no consequence for the rest of the frame decoding process. + +In newer versions, the 2 reserved bits are actively checked for value zero, +and the decoder reports a corruption error if they are not. diff --git a/doc/educational_decoder/Makefile b/doc/educational_decoder/Makefile index a9c601ebca7..b3154d9afc0 100644 --- a/doc/educational_decoder/Makefile +++ b/doc/educational_decoder/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -10,7 +10,7 @@ ZSTD ?= zstd # note: requires zstd installation on local system -UNAME?= $(shell uname) +UNAME?= $(shell sh -c 'MSYSTEM="MSYS" uname') ifeq ($(UNAME), SunOS) DIFF ?= gdiff else diff --git a/doc/educational_decoder/harness.c b/doc/educational_decoder/harness.c index 935f60da87e..12c5a801beb 100644 --- a/doc/educational_decoder/harness.c +++ b/doc/educational_decoder/harness.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c index 93640708663..839e085b481 100644 --- a/doc/educational_decoder/zstd_decompress.c +++ b/doc/educational_decoder/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -997,7 +997,8 @@ static void decompress_sequences(frame_context_t *const ctx, const size_t num_sequences); static sequence_command_t decode_sequence(sequence_states_t *const state, const u8 *const src, - i64 *const offset); + i64 *const offset, + int lastSequence); static void decode_seq_table(FSE_dtable *const table, istream_t *const in, const seq_part_t type, const seq_mode_t mode); @@ -1017,12 +1018,7 @@ static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, // This is a variable size field using between 1 and 3 bytes. Let's call its // first byte byte0." u8 header = IO_read_bits(in, 8); - if (header == 0) { - // "There are no sequences. The sequence section stops there. - // Regenerated content is defined entirely by literals section." - *sequences = NULL; - return 0; - } else if (header < 128) { + if (header < 128) { // "Number_of_Sequences = byte0 . Uses 1 byte." num_sequences = header; } else if (header < 255) { @@ -1033,6 +1029,12 @@ static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, num_sequences = IO_read_bits(in, 16) + 0x7F00; } + if (num_sequences == 0) { + // "There are no sequences. The sequence section stops there." + *sequences = NULL; + return 0; + } + *sequences = malloc(num_sequences * sizeof(sequence_command_t)); if (!*sequences) { BAD_ALLOC(); @@ -1114,7 +1116,7 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, for (size_t i = 0; i < num_sequences; i++) { // Decode sequences one by one - sequences[i] = decode_sequence(&states, src, &bit_offset); + sequences[i] = decode_sequence(&states, src, &bit_offset, i==num_sequences-1); } if (bit_offset != 0) { @@ -1125,7 +1127,8 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, // Decode a single sequence and update the state static sequence_command_t decode_sequence(sequence_states_t *const states, const u8 *const src, - i64 *const offset) { + i64 *const offset, + int lastSequence) { // "Each symbol is a code in its own context, which specifies Baseline and // Number_of_Bits to add. Codes are FSE compressed, and interleaved with raw // additional bits in the same bitstream." @@ -1160,7 +1163,7 @@ static sequence_command_t decode_sequence(sequence_states_t *const states, // Literals_Length_State is updated, followed by Match_Length_State, and // then Offset_State." // If the stream is complete don't read bits to update state - if (*offset != 0) { + if (!lastSequence) { FSE_update_state(&states->ll_table, &states->ll_state, src, offset); FSE_update_state(&states->ml_table, &states->ml_state, src, offset); FSE_update_state(&states->of_table, &states->of_state, src, offset); @@ -1210,7 +1213,7 @@ static void decode_seq_table(FSE_dtable *const table, istream_t *const in, break; } case seq_repeat: - // "Repeat_Mode : re-use distribution table from previous compressed + // "Repeat_Mode : reuse distribution table from previous compressed // block." // Nothing to do here, table will be unchanged if (!table->symbols) { @@ -1399,7 +1402,7 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) { /******* END OUTPUT SIZE COUNTING *********************************************/ /******* DICTIONARY PARSING ***************************************************/ -dictionary_t* create_dictionary() { +dictionary_t* create_dictionary(void) { dictionary_t* const dict = calloc(1, sizeof(dictionary_t)); if (!dict) { BAD_ALLOC(); @@ -1890,7 +1893,7 @@ static size_t HUF_decompress_4stream(const HUF_dtable *const dtable, /// Initializes a Huffman table using canonical Huffman codes /// For more explanation on canonical Huffman codes see -/// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html +/// https://www.cs.scranton.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html /// Codes within a level are allocated in symbol order (i.e. smaller symbols get /// earlier codes) static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, diff --git a/doc/educational_decoder/zstd_decompress.h b/doc/educational_decoder/zstd_decompress.h index d89c8352324..c13c8134dee 100644 --- a/doc/educational_decoder/zstd_decompress.h +++ b/doc/educational_decoder/zstd_decompress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/doc/zstd_compression_format.md b/doc/zstd_compression_format.md index 419de38eb81..7700c472dfa 100644 --- a/doc/zstd_compression_format.md +++ b/doc/zstd_compression_format.md @@ -3,7 +3,7 @@ Zstandard Compression Format ### Notices -Copyright (c) 2016-2021 Yann Collet, Facebook, Inc. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is granted to copy and distribute this document for any purpose and without charge, @@ -16,7 +16,7 @@ Distribution of this document is unlimited. ### Version -0.3.7 (2020-12-09) +0.4.4 (2025-03-22) Introduction @@ -26,7 +26,7 @@ The purpose of this document is to define a lossless compressed data format, that is independent of CPU type, operating system, file system and character set, suitable for file compression, pipe and streaming compression, -using the [Zstandard algorithm](http://www.zstandard.org). +using the [Zstandard algorithm](https://facebook.github.io/zstd/). The text of the specification assumes a basic background in programming at the level of bits and other primitive data representations. @@ -35,7 +35,7 @@ even for an arbitrarily long sequentially presented input data stream, using only an a priori bounded amount of intermediate storage, and hence can be used in data communications. The format uses the Zstandard compression method, -and optional [xxHash-64 checksum method](http://www.xxhash.org), +and optional [xxHash-64 checksum method](https://cyan4973.github.io/xxHash/), for detection of data corruption. The data format defined by this specification @@ -134,7 +134,7 @@ __`Content_Checksum`__ An optional 32-bit checksum, only present if `Content_Checksum_flag` is set. The content checksum is the result -of [xxh64() hash function](http://www.xxhash.org) +of [xxh64() hash function](https://cyan4973.github.io/xxHash/) digesting the original (decoded) data as input, and a seed of zero. The low 4 bytes of the checksum are stored in __little-endian__ format. @@ -390,7 +390,7 @@ __`Block_Content`__ and __`Block_Maximum_Size`__ The size of `Block_Content` is limited by `Block_Maximum_Size`, which is the smallest of: - `Window_Size` -- 128 KB +- 128 KiB (131.072 bytes) `Block_Maximum_Size` is constant for a given frame. This maximum is applicable to both the decompressed size @@ -435,7 +435,7 @@ They can be decoded first, and then copied during [Sequence Execution], or they can be decoded on the flow during [Sequence Execution]. Literals can be stored uncompressed or compressed using Huffman prefix codes. -When compressed, an optional tree description can be present, +When compressed, a tree description may optionally be present, followed by 1 or 4 streams. | `Literals_Section_Header` | [`Huffman_Tree_Description`] | [jumpTable] | Stream1 | [Stream2] | [Stream3] | [Stream4] | @@ -470,6 +470,7 @@ This field uses 2 lowest bits of first byte, describing 4 different block types repeated `Regenerated_Size` times. - `Compressed_Literals_Block` - This is a standard Huffman-compressed block, starting with a Huffman tree description. + In this mode, there are at least 2 different literals represented in the Huffman tree description. See details below. - `Treeless_Literals_Block` - This is a Huffman-compressed block, using Huffman tree _from previous Huffman-compressed literals block_. @@ -510,7 +511,7 @@ Its value is : `Size_Format = (Literals_Section_Header[0]>>2) & 3` `Regenerated_Size = (Literals_Section_Header[0]>>4) + (Literals_Section_Header[1]<<4) + (Literals_Section_Header[2]<<12)` Only Stream1 is present for these cases. -Note : it's allowed to represent a short value (for example `13`) +Note : it's allowed to represent a short value (for example `27`) using a long format, even if it's less efficient. __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ : @@ -521,18 +522,37 @@ __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 01 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). + Both `Regenerated_Size` and `Compressed_Size` use 10 bits (6-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 10 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 14 bits (0-16383). + Both `Regenerated_Size` and `Compressed_Size` use 14 bits (6-16383). `Literals_Section_Header` uses 4 bytes. - `Size_Format` == 11 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143). + Both `Regenerated_Size` and `Compressed_Size` use 18 bits (6-262143). `Literals_Section_Header` uses 5 bytes. Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention. Note: `Compressed_Size` __includes__ the size of the Huffman Tree description _when_ it is present. +Note 2: `Compressed_Size` can never be `==0`. +Even in single-stream scenario, assuming an empty content, it must be `>=1`, +since it contains at least the final end bit flag. +In 4-streams scenario, a valid `Compressed_Size` is necessarily `>= 10` +(6 bytes for the jump table, + 4x1 bytes for the 4 streams). + +4 streams is faster than 1 stream in decompression speed, +by exploiting instruction level parallelism. +But it's also more expensive, +costing on average ~7.3 bytes more than the 1 stream mode, mostly from the jump table. + +In general, use the 4 streams mode when there are more literals to decode, +to favor higher decompression speeds. +Note that beyond >1KB of literals, the 4 streams mode is compulsory. + +Note that a minimum of 6 bytes is required for the 4 streams mode. +That's a technical minimum, but it's not recommended to employ the 4 streams mode +for such a small quantity, that would be wasteful. +A more practical lower bound would be around ~256 bytes. #### Raw Literals Block The data in Stream1 is `Regenerated_Size` bytes long, @@ -552,6 +572,7 @@ or from a dictionary. ### `Huffman_Tree_Description` This section is only present when `Literals_Block_Type` type is `Compressed_Literals_Block` (`2`). +The tree describes the weights of all literals symbols that can be present in the literals block, at least 2 and up to 256. The format of the Huffman tree description can be found at [Huffman Tree description](#huffman-tree-description). The size of `Huffman_Tree_Description` is determined during decoding process, it must be used to determine where streams begin. @@ -561,10 +582,10 @@ it must be used to determine where streams begin. ### Jump Table The Jump Table is only present when there are 4 Huffman-coded streams. -Reminder : Huffman compressed data consists of either 1 or 4 Huffman-coded streams. +Reminder : Huffman compressed data consists of either 1 or 4 streams. If only one stream is present, it is a single bitstream occupying the entire -remaining portion of the literals block, encoded as described within +remaining portion of the literals block, encoded as described in [Huffman-Coded Streams](#huffman-coded-streams). If there are four streams, `Literals_Section_Header` only provided @@ -575,17 +596,18 @@ except for the last stream which may be up to 3 bytes smaller, to reach a total decompressed size as specified in `Regenerated_Size`. The compressed size of each stream is provided explicitly in the Jump Table. -Jump Table is 6 bytes long, and consist of three 2-byte __little-endian__ fields, +Jump Table is 6 bytes long, and consists of three 2-byte __little-endian__ fields, describing the compressed sizes of the first three streams. -`Stream4_Size` is computed from total `Total_Streams_Size` minus sizes of other streams. +`Stream4_Size` is computed from `Total_Streams_Size` minus sizes of other streams: `Stream4_Size = Total_Streams_Size - 6 - Stream1_Size - Stream2_Size - Stream3_Size`. -Note: if `Stream1_Size + Stream2_Size + Stream3_Size > Total_Streams_Size`, +`Stream4_Size` is necessarily `>= 1`. Therefore, +if `Total_Streams_Size < Stream1_Size + Stream2_Size + Stream3_Size + 6 + 1`, data is considered corrupted. Each of these 4 bitstreams is then decoded independently as a Huffman-Coded stream, -as described at [Huffman-Coded Streams](#huffman-coded-streams) +as described in [Huffman-Coded Streams](#huffman-coded-streams) Sequences Section @@ -628,13 +650,15 @@ __`Number_of_Sequences`__ This is a variable size field using between 1 and 3 bytes. Let's call its first byte `byte0`. -- `if (byte0 == 0)` : there are no sequences. - The sequence section stops there. - Decompressed content is defined entirely as Literals Section content. - The FSE tables used in `Repeat_Mode` aren't updated. - `if (byte0 < 128)` : `Number_of_Sequences = byte0` . Uses 1 byte. -- `if (byte0 < 255)` : `Number_of_Sequences = ((byte0-128) << 8) + byte1` . Uses 2 bytes. -- `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00` . Uses 3 bytes. +- `if (byte0 < 255)` : `Number_of_Sequences = ((byte0 - 0x80) << 8) + byte1`. Uses 2 bytes. + Note that the 2 bytes format fully overlaps the 1 byte format. +- `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00`. Uses 3 bytes. + +`if (Number_of_Sequences == 0)` : there are no sequences. + The sequence section stops immediately, + FSE tables used in `Repeat_Mode` aren't updated. + Block's decompressed content is defined solely by the Literals Section content. __Symbol compression modes__ @@ -905,7 +929,10 @@ There is an exception though, when current sequence's `literals_length = 0`. In this case, repeated offsets are shifted by one, so an `offset_value` of 1 means `Repeated_Offset2`, an `offset_value` of 2 means `Repeated_Offset3`, -and an `offset_value` of 3 means `Repeated_Offset1 - 1_byte`. +and an `offset_value` of 3 means `Repeated_Offset1 - 1`. + +In the final case, if `Repeated_Offset1 - 1` evaluates to 0, then the +data is considered corrupted. For the first block, the starting offset history is populated with following values : `Repeated_Offset1`=1, `Repeated_Offset2`=4, `Repeated_Offset3`=8, @@ -967,7 +994,7 @@ into a flow of concatenated frames. Skippable frames defined in this specification are compatible with [LZ4] ones. -[LZ4]:http://www.lz4.org +[LZ4]:https://lz4.github.io/lz4/ From a compliant decoder perspective, skippable frames need just be skipped, and their content ignored, resuming decoding after the skippable frame. @@ -1011,55 +1038,57 @@ and to compress Huffman headers. FSE --- FSE, short for Finite State Entropy, is an entropy codec based on [ANS]. -FSE encoding/decoding involves a state that is carried over between symbols, -so decoding must be done in the opposite direction as encoding. +FSE encoding/decoding involves a state that is carried over between symbols. +Decoding must be done in the opposite direction as encoding. Therefore, all FSE bitstreams are read from end to beginning. Note that the order of the bits in the stream is not reversed, -we just read the elements in the reverse order they are written. +we just read each multi-bits element in the reverse order they are encoded. For additional details on FSE, see [Finite State Entropy]. [Finite State Entropy]:https://github.com/Cyan4973/FiniteStateEntropy/ -FSE decoding involves a decoding table which has a power of 2 size, and contain three elements: +FSE decoding is directed by a decoding table with a power of 2 size, each row containing three elements: `Symbol`, `Num_Bits`, and `Baseline`. The `log2` of the table size is its `Accuracy_Log`. An FSE state value represents an index in this table. To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a __little-endian__ value. -The next symbol in the stream is the `Symbol` indicated in the table for that state. +The first symbol in the stream is the `Symbol` indicated in the table for that state. To obtain the next state value, the decoder should consume `Num_Bits` bits from the stream as a __little-endian__ value and add it to `Baseline`. [ANS]: https://en.wikipedia.org/wiki/Asymmetric_Numeral_Systems ### FSE Table Description -To decode FSE streams, it is necessary to construct the decoding table. -The Zstandard format encodes FSE table descriptions as follows: - -An FSE distribution table describes the probabilities of all symbols -from `0` to the last present one (included) -on a normalized scale of `1 << Accuracy_Log` . -Note that there must be two or more symbols with nonzero probability. +To decode an FSE bitstream, it is necessary to build its FSE decoding table. +The decoding table is derived from a distribution of Probabilities. +The Zstandard format encodes distributions of Probabilities as follows: -It's a bitstream which is read forward, in __little-endian__ fashion. -It's not necessary to know bitstream exact size, -it will be discovered and reported by the decoding process. +The distribution of probabilities is described in a bitstream which is read forward, +in __little-endian__ fashion. +The amount of bytes consumed from the bitstream to describe the distribution +is discovered at the end of the decoding process. -The bitstream starts by reporting on which scale it operates. +The bitstream starts by reporting on which scale the distribution operates. Let's `low4Bits` designate the lowest 4 bits of the first byte : `Accuracy_Log = low4bits + 5`. -Then follows each symbol value, from `0` to last present one. -The number of bits used by each field is variable. +An FSE distribution table describes the probabilities of all symbols +from `0` to the last present one (included) in natural order. +The sum of probabilities is normalized to reach a power of 2 total of `1 << Accuracy_Log` . +There must be two or more symbols with non-zero probabilities. + +The number of bits used to decode each probability is variable. It depends on : - Remaining probabilities + 1 : __example__ : Presuming an `Accuracy_Log` of 8, - and presuming 100 probabilities points have already been distributed, + and presuming 100 probability points have already been distributed, the decoder may read any value from `0` to `256 - 100 + 1 == 157` (inclusive). - Therefore, it must read `log2sup(157) == 8` bits. + Therefore, it may read up to `log2sup(157) == 8` bits, where `log2sup(N)` + is the smallest integer `T` that satisfies `(1 << T) > N`. - Value decoded : small values use 1 less bit : __example__ : @@ -1070,111 +1099,133 @@ It depends on : values from 98 to 157 use 8 bits. This is achieved through this scheme : - | Value read | Value decoded | Number of bits used | - | ---------- | ------------- | ------------------- | - | 0 - 97 | 0 - 97 | 7 | - | 98 - 127 | 98 - 127 | 8 | - | 128 - 225 | 0 - 97 | 7 | - | 226 - 255 | 128 - 157 | 8 | + | 8-bit field read | Value decoded | Nb of bits consumed | + | ---------------- | ------------- | ------------------- | + | 0 - 97 | 0 - 97 | 7 | + | 98 - 127 | 98 - 127 | 8 | + | 128 - 225 | 0 - 97 | 7 | + | 226 - 255 | 128 - 157 | 8 | -Symbols probabilities are read one by one, in order. +Probability is derived from Value decoded using the following formula: +`Probality = Value - 1` -Probability is obtained from Value decoded by following formula : -`Proba = value - 1` +Consequently, a Probability of `0` is described by a Value `1`. -It means value `0` becomes negative probability `-1`. -`-1` is a special probability, which means "less than 1". -Its effect on distribution table is described in the [next section]. -For the purpose of calculating total allocated probability points, it counts as one. +A Value `0` is used to signal a special case, named "Probability `-1`". +It describes a probability which should have been "less than 1". +Its effect on the decoding table building process is described in the [next section]. +For the purpose of counting total allocated probability points, it counts as one. [next section]:#from-normalized-distribution-to-decoding-tables -When a symbol has a __probability__ of `zero`, +Symbols probabilities are read one by one, in order. +After each probability is decoded, the total nb of probability points is updated. +This is used to determine how many bits must be read to decode the probability of next symbol. + +When a symbol has a __probability__ of `zero` (decoded from reading a Value `1`), it is followed by a 2-bits repeat flag. This repeat flag tells how many probabilities of zeroes follow the current one. It provides a number ranging from 0 to 3. If it is a 3, another 2-bits repeat flag follows, and so on. -When last symbol reaches cumulated total of `1 << Accuracy_Log`, -decoding is complete. -If the last symbol makes cumulated total go above `1 << Accuracy_Log`, -distribution is considered corrupted. +When the Probability for a symbol makes cumulated total reach `1 << Accuracy_Log`, +then it's the last symbol, and decoding is complete. Then the decoder can tell how many bytes were used in this process, and how many symbols are present. The bitstream consumes a round number of bytes. Any remaining bit within the last byte is just unused. +If this process results in a non-zero probability for a symbol outside of the +valid range of symbols that the FSE table is defined for, even if that symbol is +not used, then the data is considered corrupted. +For the specific case of offset codes, +a decoder implementation may reject a frame containing a non-zero probability +for an offset code larger than the largest offset code supported by the decoder +implementation. + #### From normalized distribution to decoding tables -The distribution of normalized probabilities is enough +The normalized distribution of probabilities is enough to create a unique decoding table. - -It follows the following build rule : +It is generated using the following build rule : The table has a size of `Table_Size = 1 << Accuracy_Log`. -Each cell describes the symbol decoded, -and instructions to get the next state (`Number_of_Bits` and `Baseline`). +Each row specifies the decoded symbol, +and instructions to reach the next state (`Number_of_Bits` and `Baseline`). -Symbols are scanned in their natural order for "less than 1" probabilities. -Symbols with this probability are being attributed a single cell, +Symbols are first scanned in their natural order for "less than 1" probabilities +(previously decoded from a Value of `0`). +Symbols with this special probability are being attributed a single row, starting from the end of the table and retreating. These symbols define a full state reset, reading `Accuracy_Log` bits. -Then, all remaining symbols, sorted in natural order, are allocated cells. -Starting from symbol `0` (if it exists), and table position `0`, -each symbol gets allocated as many cells as its probability. -Cell allocation is spread, not linear : -each successor position follows this rule : +Then, all remaining symbols, sorted in natural order, are allocated rows. +Starting from smallest present symbol, and table position `0`, +each symbol gets allocated as many rows as its probability. +Row allocation is not linear, it follows this order, in modular arithmetic: ``` position += (tableSize>>1) + (tableSize>>3) + 3; position &= tableSize-1; ``` -A position is skipped if already occupied by a "less than 1" probability symbol. -`position` does not reset between symbols, it simply iterates through -each position in the table, switching to the next symbol when enough -states have been allocated to the current one. +Using above ordering rule, each symbol gets allocated as many rows as its probability. +If a position is already occupied by a "less than 1" probability symbol, +it is simply skipped, and the next position is allocated instead. +Once enough rows have been allocated for the current symbol, +the allocation process continues, using the next symbol, in natural order. +This process guarantees that the table is entirely and exactly filled. -The process guarantees that the table is entirely filled. -Each cell corresponds to a state value, which contains the symbol being decoded. +Each row specifies a decoded symbol, and is accessed by current state value. +It also specifies `Number_of_Bits` and `Baseline`, which are required to determine next state value. -To add the `Number_of_Bits` and `Baseline` required to retrieve next state, -it's first necessary to sort all occurrences of each symbol in state order. -Lower states will need 1 more bit than higher ones. -The process is repeated for each symbol. +To correctly set these fields, it's necessary to sort all occurrences of each symbol in state value order, +and then attribute N+1 bits to lower rows, and N bits to higher rows, +following the process described below (using an example): __Example__ : -Presuming a symbol has a probability of 5, -it receives 5 cells, corresponding to 5 state values. -These state values are then sorted in natural order. +Presuming an `Accuracy_Log` of 7, +let's imagine a symbol with a Probability of 5: +it receives 5 rows, corresponding to 5 state values between `0` and `127`. + +In this example, the first state value happens to be `1` (after unspecified previous symbols). +The next 4 states are then determined using above modular arithmetic rule, +which specifies to add `64+16+3 = 83` modulo `128` to jump to next position, +producing the following series: `1`, `84`, `39`, `122`, `77` (modular arithmetic). +(note: the next symbol will then start at `32`). + +These state values are then sorted in natural order, +resulting in the following series: `1`, `39`, `77`, `84`, `122`. -Next power of 2 after 5 is 8. -Space of probabilities must be divided into 8 equal parts. -Presuming the `Accuracy_Log` is 7, it defines a space of 128 states. -Divided by 8, each share is 16 large. +The next power of 2 after 5 is 8. +Therefore, the probability space will be divided into 8 equal parts. +Since the probability space is `1<<7 = 128` large, each share is `128/8 = 16` large. -In order to reach 8 shares, 8-5=3 lowest states will count "double", +In order to reach 8 shares, the `8-5 = 3` lowest states will count "double", doubling their shares (32 in width), hence requiring one more bit. -Baseline is assigned starting from the higher states using fewer bits, -increasing at each state, then resuming at the first state, -each state takes its allocated width from Baseline. +Baseline is assigned starting from the lowest state using fewer bits, +continuing in natural state order, looping back at the beginning. +Each state takes its allocated range from Baseline, sized by its `Number_of_Bits`. -| state value | 1 | 39 | 77 | 84 | 122 | | state order | 0 | 1 | 2 | 3 | 4 | | ---------------- | ----- | ----- | ------ | ---- | ------ | +| state value | 1 | 39 | 77 | 84 | 122 | | width | 32 | 32 | 32 | 16 | 16 | | `Number_of_Bits` | 5 | 5 | 5 | 4 | 4 | -| range number | 2 | 4 | 6 | 0 | 1 | +| allocation order | 3 | 4 | 5 | 1 | 2 | | `Baseline` | 32 | 64 | 96 | 0 | 16 | | range | 32-63 | 64-95 | 96-127 | 0-15 | 16-31 | -During decoding, the next state value is determined from current state value, -by reading the required `Number_of_Bits`, and adding the specified `Baseline`. +During decoding, the next state value is determined by using current state value as row number, +then reading the required `Number_of_Bits` from the bitstream, and adding the specified `Baseline`. + +Note: +as a trivial example, it follows that, for a symbol with a Probability of `1`, +`Baseline` is necessarily `0`, and `Number_of_Bits` is necessarily `Accuracy_Log`. -See [Appendix A] for the results of this process applied to the default distributions. +See [Appendix A] to see the outcome of this process applied to the default distributions. [Appendix A]: #appendix-a---decoding-tables-for-predefined-codes @@ -1183,7 +1234,7 @@ Huffman Coding -------------- Zstandard Huffman-coded streams are read backwards, similar to the FSE bitstreams. -Therefore, to find the start of the bitstream, it is therefore to +Therefore, to find the start of the bitstream, it is required to know the offset of the last byte of the Huffman-coded stream. After writing the last bit containing information, the compressor @@ -1219,48 +1270,61 @@ This specification limits maximum code length to 11 bits. #### Representation -All literal values from zero (included) to last present one (excluded) +All literal symbols from zero (included) to last present one (excluded) are represented by `Weight` with values from `0` to `Max_Number_of_Bits`. Transformation from `Weight` to `Number_of_Bits` follows this formula : ``` Number_of_Bits = Weight ? (Max_Number_of_Bits + 1 - Weight) : 0 ``` -The last symbol's `Weight` is deduced from previously decoded ones, -by completing to the nearest power of 2. -This power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. +When a literal symbol is not present, it receives a `Weight` of 0. +The least frequent symbol receives a `Weight` of 1. +If no literal has a `Weight` of 1, then the data is considered corrupted. +If there are not at least two literals with non-zero `Weight`, then the data +is considered corrupted. +The most frequent symbol receives a `Weight` anywhere between 1 and 11 (max). +The last symbol's `Weight` is deduced from previously retrieved Weights, +by completing to the nearest power of 2. It's necessarily non 0. +If it's not possible to reach a clean power of 2 with a single `Weight` value, +the Huffman Tree Description is considered invalid. +This final power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. `Max_Number_of_Bits` must be <= 11, otherwise the representation is considered corrupted. __Example__ : Let's presume the following Huffman tree must be described : -| literal value | 0 | 1 | 2 | 3 | 4 | 5 | +| literal symbol | A | B | C | D | E | F | | ---------------- | --- | --- | --- | --- | --- | --- | | `Number_of_Bits` | 1 | 2 | 3 | 0 | 4 | 4 | The tree depth is 4, since its longest elements uses 4 bits -(longest elements are the one with smallest frequency). -Value `5` will not be listed, as it can be determined from values for 0-4, -nor will values above `5` as they are all 0. -Values from `0` to `4` will be listed using `Weight` instead of `Number_of_Bits`. +(longest elements are the ones with smallest frequency). + +All symbols will now receive a `Weight` instead of `Number_of_Bits`. Weight formula is : ``` Weight = Number_of_Bits ? (Max_Number_of_Bits + 1 - Number_of_Bits) : 0 ``` -It gives the following series of weights : +It gives the following series of Weights : + +| literal symbol | A | B | C | D | E | F | +| -------------- | --- | --- | --- | --- | --- | --- | +| `Weight` | 4 | 3 | 2 | 0 | 1 | 1 | + +This list will be sent to the decoder, with the following modifications: -| literal value | 0 | 1 | 2 | 3 | 4 | -| ------------- | --- | --- | --- | --- | --- | -| `Weight` | 4 | 3 | 2 | 0 | 1 | +- `F` will not be listed, because it can be determined from previous symbols +- nor will symbols above `F` as they are all 0 +- on the other hand, all symbols before `A`, starting with `\0`, will be listed, with a Weight of 0. The decoder will do the inverse operation : -having collected weights of literal symbols from `0` to `4`, -it knows the last literal, `5`, is present with a non-zero `Weight`. -The `Weight` of `5` can be determined by advancing to the next power of 2. +having collected weights of literal symbols from `A` to `E`, +it knows the last literal, `F`, is present with a non-zero `Weight`. +The `Weight` of `F` can be determined by advancing to the next power of 2. The sum of `2^(Weight-1)` (excluding 0's) is : `8 + 4 + 2 + 0 + 1 = 15`. Nearest larger power of 2 value is 16. -Therefore, `Max_Number_of_Bits = 4` and `Weight[5] = 16-15 = 1`. +Therefore, `Max_Number_of_Bits = log2(16) = 4` and `Weight[F] = log_2(16 - 15) + 1 = 1`. #### Huffman Tree header @@ -1300,7 +1364,7 @@ sharing a single distribution table. To decode an FSE bitstream, it is necessary to know its compressed size. Compressed size is provided by `headerByte`. It's also necessary to know its _maximum possible_ decompressed size, -which is `255`, since literal values span from `0` to `255`, +which is `255`, since literal symbols span from `0` to `255`, and last symbol's `Weight` is not represented. An FSE bitstream starts by a header, describing probabilities distribution. @@ -1322,6 +1386,13 @@ If updating state after decoding a symbol would require more bits than remain in the stream, it is assumed that extra bits are 0. Then, symbols for each of the final states are decoded and the process is complete. +If this process would produce more weights than the maximum number of decoded +weights (255), then the data is considered corrupted. + +If either of the 2 initial states are absent or truncated, then the data is +considered corrupted. Consequently, it is not possible to encode fewer than +2 weights using this mode. + #### Conversion from weights to Huffman prefix codes All present symbols shall now have a `Weight` value. @@ -1329,26 +1400,28 @@ It is possible to transform weights into `Number_of_Bits`, using this formula: ``` Number_of_Bits = (Weight>0) ? Max_Number_of_Bits + 1 - Weight : 0 ``` -Symbols are sorted by `Weight`. -Within same `Weight`, symbols keep natural sequential order. +In order to determine which prefix code is assigned to each Symbol, +Symbols are first sorted by `Weight`, then by natural sequential order. Symbols with a `Weight` of zero are removed. -Then, starting from lowest `Weight`, prefix codes are distributed in sequential order. +Then, starting from lowest `Weight` (hence highest `Number_of_Bits`), +prefix codes are assigned in ascending order. __Example__ : -Let's presume the following list of weights has been decoded : +Let's assume the following list of weights has been decoded: -| Literal | 0 | 1 | 2 | 3 | 4 | 5 | +| Literal | A | B | C | D | E | F | | -------- | --- | --- | --- | --- | --- | --- | | `Weight` | 4 | 3 | 2 | 0 | 1 | 1 | Sorted by weight and then natural sequential order, -it gives the following distribution : +it gives the following prefix codes distribution: -| Literal | 3 | 4 | 5 | 2 | 1 | 0 | -| ---------------- | --- | --- | --- | --- | --- | ---- | -| `Weight` | 0 | 1 | 1 | 2 | 3 | 4 | -| `Number_of_Bits` | 0 | 4 | 4 | 3 | 2 | 1 | -| prefix codes | N/A | 0000| 0001| 001 | 01 | 1 | +| Literal | D | E | F | C | B | A | +| ---------------- | --- | ---- | ---- | ---- | ---- | ---- | +| `Weight` | 0 | 1 | 1 | 2 | 3 | 4 | +| `Number_of_Bits` | 0 | 4 | 4 | 3 | 2 | 1 | +| prefix code | N/A | 0000 | 0001 | 001 | 01 | 1 | +| ascending order | N/A | 0000 | 0001 | 001x | 01xx | 1xxx | ### Huffman-coded Streams @@ -1371,10 +1444,10 @@ it's possible to read the bitstream in a __little-endian__ fashion, keeping track of already used bits. Since the bitstream is encoded in reverse order, starting from the end read symbols in forward order. -For example, if the literal sequence "0145" was encoded using above prefix code, +For example, if the literal sequence `ABEF` was encoded using above prefix code, it would be encoded (in reverse order) as: -|Symbol | 5 | 4 | 1 | 0 | Padding | +|Symbol | F | E | B | A | Padding | |--------|------|------|----|---|---------| |Encoding|`0000`|`0001`|`01`|`1`| `00001` | @@ -1669,6 +1742,13 @@ or at least provide a meaningful error code explaining for which reason it canno Version changes --------------- +- 0.4.4 : minor clarification for block size +- 0.4.3 : clarifications for Huffman prefix code assignment example +- 0.4.2 : refactor FSE table construction process, inspired by Donald Pian +- 0.4.1 : clarifications on a few error scenarios, by Eric Lasota +- 0.4.0 : fixed imprecise behavior for nbSeq==0, detected by Igor Pavlov +- 0.3.9 : clarifications for Huffman-compressed literal sizes. +- 0.3.8 : clarifications for Huffman Blocks and Huffman Tree descriptions. - 0.3.7 : clarifications for Repeat_Offsets, matching RFC8878 - 0.3.6 : clarifications for Dictionary_ID - 0.3.5 : clarifications for Block_Maximum_Size diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html index cca287b729f..89f0761f492 100644 --- a/doc/zstd_manual.html +++ b/doc/zstd_manual.html @@ -1,16 +1,17 @@ -zstd 1.5.3 Manual +zstd 1.5.7 Manual -

    zstd 1.5.3 Manual

    +

    zstd 1.5.7 Manual

    +Note: the content of this file has been automatically generated by parsing "zstd.h"

    Contents

    1. Introduction
    2. Version
    3. -
    4. Simple API
    5. +
    6. Simple Core API
    7. Explicit context
    8. Advanced compression API (Requires v1.4.0+)
    9. Advanced decompression API (Requires v1.4.0+)
    10. @@ -22,15 +23,15 @@

      zstd 1.5.3 Manual

    11. Dictionary helper functions
    12. Advanced dictionary and prefix API (Requires v1.4.0+)
    13. experimental API (static linking only)
    14. -
    15. Frame size functions
    16. +
    17. Frame header and size functions
    18. Memory management
    19. Advanced compression functions
    20. Advanced decompression functions
    21. Advanced streaming functions
    22. -
    23. Buffer-less and synchronous inner streaming functions
    24. +
    25. Buffer-less and synchronous inner streaming functions (DEPRECATED)
    26. Buffer-less streaming compression (synchronous mode)
    27. Buffer-less streaming decompression (synchronous mode)
    28. -
    29. Block level API
    30. +
    31. Block level API (DEPRECATED)

    Introduction

    @@ -74,57 +75,62 @@ 

    zstd 1.5.3 Manual

    Return runtime library version, like "1.4.5". Requires v1.3.0+.


    -

    Simple API

    
    +

    Simple Core API

    
     
     
    size_t ZSTD_compress( void* dst, size_t dstCapacity,
                     const void* src, size_t srcSize,
                           int compressionLevel);
     

    Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + enough space to successfully compress the data. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).


    size_t ZSTD_decompress( void* dst, size_t dstCapacity,
                       const void* src, size_t compressedSize);
    -

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - `dstCapacity` is an upper bound of originalSize to regenerate. - If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - or an errorCode if it fails (which can be tested using ZSTD_isError()). +

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + Multiple compressed frames can be decompressed at once with this method. + The result will be the concatenation of all decompressed frames, back to back. + `dstCapacity` is an upper bound of originalSize to regenerate. + First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + If maximum upper bound isn't known, prefer using streaming mode to decompress data. + @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + or an errorCode if it fails (which can be tested using ZSTD_isError()).


    +

    Decompression helper functions


    #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
     #define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
     unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
    -

    `src` should point to the start of a ZSTD encoded frame. - `srcSize` must be at least as large as the frame header. - hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - @return : - decompressed size of `src` frame content, if known - - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - note 1 : a 0 return value means the frame is valid but "empty". - note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - In which case, it's necessary to use streaming mode to decompress data. - Optionally, application can rely on some implicit limit, - as ZSTD_decompress() only needs an upper bound of decompressed size. - (For example, data could be necessarily cut into blocks <= 16 KB). - note 3 : decompressed size is always present when compression is completed using single-pass functions, - such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - note 4 : decompressed size can be very large (64-bits value), - potentially larger than what local system can handle as a single memory segment. - In which case, it's necessary to use streaming mode to decompress data. - note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - Always ensure return value fits within application's authorized limits. - Each application can set its own limits. - note 6 : This function replaces ZSTD_getDecompressedSize() -


    - -
    ZSTDLIB_API
    -ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
    +

    `src` should point to the start of a ZSTD encoded frame. + `srcSize` must be at least as large as the frame header. + hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. + @return : - decompressed size of `src` frame content, if known + - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) + note 1 : a 0 return value means the frame is valid but "empty". + When invoking this method on a skippable frame, it will return 0. + note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode). + When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. + In which case, it's necessary to use streaming mode to decompress data. + Optionally, application can rely on some implicit limit, + as ZSTD_decompress() only needs an upper bound of decompressed size. + (For example, data could be necessarily cut into blocks <= 16 KB). + note 3 : decompressed size is always present when compression is completed using single-pass functions, + such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). + note 4 : decompressed size can be very large (64-bits value), + potentially larger than what local system can handle as a single memory segment. + In which case, it's necessary to use streaming mode to decompress data. + note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. + Always ensure return value fits within application's authorized limits. + Each application can set its own limits. + note 6 : This function replaces ZSTD_getDecompressedSize() +


    + +
    ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
     unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
    -

    NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). +

    This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results to the same return value (0), while ZSTD_getFrameContentSize() gives them separate return values. @@ -136,48 +142,78 @@

    zstd 1.5.3 Manual

    `srcSize` must be >= first frame size @return : the compressed size of the first frame starting at `src`, suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - or an error code if input is invalid + or an error code if input is invalid + Note 1: this method is called _find*() because it's not enough to read the header, + it may have to scan through the frame's content, to reach its end. + Note 2: this method also works with Skippable Frames. In which case, + it returns the size of the complete skippable frame, + which is always equal to its content size + 8 bytes for headers. +


    + +

    Compression helper functions


    +
    #define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
    +#define ZSTD_COMPRESSBOUND(srcSize)   (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    +size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    +

    maximum compressed size in worst case single-pass scenario. + When invoking `ZSTD_compress()`, or any other one-pass compression function, + it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + as it eliminates one potential failure scenario, + aka not enough room in dst buffer to write the compressed frame. + Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + In which case, ZSTD_compressBound() will return an error code + which can be tested using ZSTD_isError(). + + ZSTD_COMPRESSBOUND() : + same as ZSTD_compressBound(), but as a macro. + It can be used to produce constants, which can be useful for static allocation, + for example to size a static array on stack. + Will produce constant value 0 if srcSize is too large. +


    -

    Helper functions

    #define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
    -size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
    -unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
    -const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
    -int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed, requires v1.4.0+ */
    -int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
    -int         ZSTD_defaultCLevel(void);           /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
    +

    Error helper functions

    /* ZSTD_isError() :
    + * Most ZSTD_* functions returning a size_t value can be tested for error,
    + * using ZSTD_isError().
    + * @return 1 if error, 0 otherwise
    + */
    +unsigned     ZSTD_isError(size_t result);      /*!< tells if a `size_t` function result is an error code */
    +ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */
    +const char*  ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */
    +int          ZSTD_minCLevel(void);             /*!< minimum negative compression level allowed, requires v1.4.0+ */
    +int          ZSTD_maxCLevel(void);             /*!< maximum compression level available */
    +int          ZSTD_defaultCLevel(void);         /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
     

    Explicit context

    
     
     

    Compression context

      When compressing many times,
    -  it is recommended to allocate a context just once,
    -  and re-use it for each successive compression operation.
    -  This will make workload friendlier for system's memory.
    +  it is recommended to allocate a compression context just once,
    +  and reuse it for each successive compression operation.
    +  This will make the workload easier for system's memory.
       Note : re-using context is just a speed / resource optimization.
              It doesn't change the compression ratio, which remains identical.
    -  Note 2 : In multi-threaded environments,
    -         use one different context per thread for parallel execution.
    +  Note 2: For parallel execution in multi-threaded environments,
    +         use one different context per thread .
      
     
    typedef struct ZSTD_CCtx_s ZSTD_CCtx;
     ZSTD_CCtx* ZSTD_createCCtx(void);
    -size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* accept NULL pointer */
    +size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* compatible with NULL pointer */
     

    size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
                              void* dst, size_t dstCapacity,
                        const void* src, size_t srcSize,
                              int compressionLevel);
     

    Same as ZSTD_compress(), using an explicit ZSTD_CCtx. - Important : in order to behave similarly to `ZSTD_compress()`, - this function compresses at requested compression level, - __ignoring any other parameter__ . + Important : in order to mirror `ZSTD_compress()` behavior, + this function compresses at the requested compression level, + __ignoring any other advanced parameter__ . If any advanced parameter was set using the advanced API, - they will all be reset. Only `compressionLevel` remains. + they will all be reset. Only @compressionLevel remains.


    Decompression context

      When decompressing many times,
       it is recommended to allocate a context only once,
    -  and re-use it for each successive compression operation.
    +  and reuse it for each successive compression operation.
       This will make workload friendlier for system's memory.
       Use one context per thread for parallel execution. 
     
    typedef struct ZSTD_DCtx_s ZSTD_DCtx;
    @@ -189,7 +225,7 @@ 

    Decompression context

      When decompressing many times,
                          const void* src, size_t srcSize);
     

    Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx. - Compatible with sticky parameters. + Compatible with sticky parameters (see below).


    @@ -273,6 +309,19 @@

    Decompression context

      When decompressing many times,
                                   * The higher the value of selected strategy, the more complex it is,
                                   * resulting in stronger and slower compression.
                                   * Special: value 0 means "use default strategy". */
    +
    +    ZSTD_c_targetCBlockSize=130, /* v1.5.6+
    +                                  * Attempts to fit compressed block size into approximately targetCBlockSize.
    +                                  * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
    +                                  * Note that it's not a guarantee, just a convergence target (default:0).
    +                                  * No target when targetCBlockSize == 0.
    +                                  * This is helpful in low bandwidth streaming environments to improve end-to-end latency,
    +                                  * when a client can make use of partial documents (a prominent example being Chrome).
    +                                  * Note: this parameter is stable since v1.5.6.
    +                                  * It was present as an experimental parameter in earlier versions,
    +                                  * but it's not recommended using it with earlier library versions
    +                                  * due to massive performance regressions.
    +                                  */
         /* LDM mode parameters */
         ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
                                          * This parameter is designed to improve compression ratio
    @@ -352,16 +401,18 @@ 

    Decompression context

      When decompressing many times,
          * ZSTD_c_forceMaxWindow
          * ZSTD_c_forceAttachDict
          * ZSTD_c_literalCompressionMode
    -     * ZSTD_c_targetCBlockSize
          * ZSTD_c_srcSizeHint
          * ZSTD_c_enableDedicatedDictSearch
          * ZSTD_c_stableInBuffer
          * ZSTD_c_stableOutBuffer
          * ZSTD_c_blockDelimiters
          * ZSTD_c_validateSequences
    -     * ZSTD_c_useBlockSplitter
    +     * ZSTD_c_blockSplitterLevel
    +     * ZSTD_c_splitAfterSequences
          * ZSTD_c_useRowMatchFinder
          * ZSTD_c_prefetchCDictTables
    +     * ZSTD_c_enableSeqProducerFallback
    +     * ZSTD_c_maxBlockSize
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly;
          *        also, the enums values themselves are unstable and can still change.
    @@ -371,7 +422,7 @@ 

    Decompression context

      When decompressing many times,
          ZSTD_c_experimentalParam3=1000,
          ZSTD_c_experimentalParam4=1001,
          ZSTD_c_experimentalParam5=1002,
    -     ZSTD_c_experimentalParam6=1003,
    +     /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */
          ZSTD_c_experimentalParam7=1004,
          ZSTD_c_experimentalParam8=1005,
          ZSTD_c_experimentalParam9=1006,
    @@ -381,7 +432,11 @@ 

    Decompression context

      When decompressing many times,
          ZSTD_c_experimentalParam13=1010,
          ZSTD_c_experimentalParam14=1011,
          ZSTD_c_experimentalParam15=1012,
    -     ZSTD_c_experimentalParam16=1013
    +     ZSTD_c_experimentalParam16=1013,
    +     ZSTD_c_experimentalParam17=1014,
    +     ZSTD_c_experimentalParam18=1015,
    +     ZSTD_c_experimentalParam19=1016,
    +     ZSTD_c_experimentalParam20=1017
     } ZSTD_cParameter;
     

    typedef struct {
    @@ -444,7 +499,7 @@ 

    Decompression context

      When decompressing many times,
                       They will be used to compress next frame.
                       Resetting session never fails.
       - The parameters : changes all parameters back to "default".
    -                  This removes any reference to any dictionary too.
    +                  This also removes any reference to any dictionary or external sequence producer.
                       Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
                       otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
       - Both : similar to resetting the session, followed by resetting parameters.
    @@ -455,11 +510,13 @@ 

    Decompression context

      When decompressing many times,
                            void* dst, size_t dstCapacity,
                      const void* src, size_t srcSize);
     

    Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + (note that this entry point doesn't even expose a compression level parameter). ZSTD_compress2() always starts a new frame. Should cctx hold data from a previously unfinished frame, everything about it is forgotten. - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - The function is always blocking, returns when compression is completed. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + enough space to successfully compress the data, though it is possible it fails for other reasons. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()). @@ -483,13 +540,17 @@

    Decompression context

      When decompressing many times,
          * ZSTD_d_stableOutBuffer
          * ZSTD_d_forceIgnoreChecksum
          * ZSTD_d_refMultipleDDicts
    +     * ZSTD_d_disableHuffmanAssembly
    +     * ZSTD_d_maxBlockSize
          * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
          * note : never ever use experimentalParam? names directly
          */
          ZSTD_d_experimentalParam1=1000,
          ZSTD_d_experimentalParam2=1001,
          ZSTD_d_experimentalParam3=1002,
    -     ZSTD_d_experimentalParam4=1003
    +     ZSTD_d_experimentalParam4=1003,
    +     ZSTD_d_experimentalParam5=1004,
    +     ZSTD_d_experimentalParam6=1005
     
     } ZSTD_dParameter;
     

    @@ -537,14 +598,14 @@

    Decompression context

      When decompressing many times,
       A ZSTD_CStream object is required to track streaming operation.
       Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
       ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
    -  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
    +  It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
     
       For parallel execution, use one separate ZSTD_CStream per thread.
     
       note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
     
       Parameters are sticky : when starting a new compression on the same context,
    -  it will re-use the same sticky parameters as previous compression session.
    +  it will reuse the same sticky parameters as previous compression session.
       When in doubt, it's recommended to fully initialize the context before usage.
       Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
       ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
    @@ -635,6 +696,11 @@ 

    Streaming compression functions

    typedef enum {
                 only ZSTD_e_end or ZSTD_e_flush operations are allowed.
                 Before starting a new compression job, or changing compression parameters,
                 it is required to fully flush internal buffers.
    +  - note: if an operation ends with an error, it may leave @cctx in an undefined state.
    +          Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.
    +          In order to be re-employed after an error, a state must be reset,
    +          which can be done explicitly (ZSTD_CCtx_reset()),
    +          or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())
      
     


    @@ -658,13 +724,16 @@

    Streaming compression functions

    typedef enum {
          ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
          ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
          ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
    +
    + Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
    + to compress with a dictionary.
      
     


    Streaming decompression - HowTo

       A ZSTD_DStream object is required to track streaming operations.
       Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
    -  ZSTD_DStream objects can be re-used multiple times.
    +  ZSTD_DStream objects can be re-employed multiple times.
     
       Use ZSTD_initDStream() to start a new decompression operation.
      @return : recommended first input size
    @@ -674,16 +743,21 @@ 

    Streaming compression functions

    typedef enum {
       The function will update both `pos` fields.
       If `input.pos < input.size`, some input has not been consumed.
       It's up to the caller to present again remaining data.
    +
       The function tries to flush all data decoded immediately, respecting output buffer size.
       If `output.pos < output.size`, decoder has flushed everything it could.
    -  But if `output.pos == output.size`, there might be some data left within internal buffers.,
    +
    +  However, when `output.pos == output.size`, it's more difficult to know.
    +  If @return > 0, the frame is not complete, meaning
    +  either there is still some data left to flush within internal buffers,
    +  or there is more input to read to complete the frame (or both).
       In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
       Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
      @return : 0 when a frame is completely decoded and fully flushed,
             or an error code, which can be tested using ZSTD_isError(),
             or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
                                     the return value is a suggested next input size (just a hint for better latency)
    -                                that will never request more than the remaining frame size.
    +                                that will never request more than the remaining content of the compressed frame.
      
     
    @@ -693,6 +767,40 @@

    ZSTD_DStream management functions

    ZSTD_DStream* ZSTD_
     size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
     

    Streaming decompression functions


    +
    size_t ZSTD_initDStream(ZSTD_DStream* zds);
    +

    Initialize/reset DStream state for new decompression operation. + Call before new decompression operation using same DStream. + + Note : This function is redundant with the advanced API and equivalent to: + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_refDDict(zds, NULL); + +


    + +
    size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
    +

    Streaming decompression function. + Call repetitively to consume full input updating it as necessary. + Function will update both input and output `pos` fields exposing current state via these fields: + - `input.pos < input.size`, some input remaining and caller should provide remaining input + on the next call. + - `output.pos < output.size`, decoder flushed internal output buffer. + - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + check ZSTD_decompressStream() @return value, + if > 0, invoke it again to flush remaining data to output. + Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + + @return : 0 when a frame is completely decoded and fully flushed, + or an error code, which can be tested using ZSTD_isError(), + or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + + Note: when an operation returns with an error code, the @zds state may be left in undefined state. + It's UB to invoke `ZSTD_decompressStream()` on such a state. + In order to re-use such a state, it must be first reset, + which can be done explicitly (`ZSTD_DCtx_reset()`), + or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + +


    +
    size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
     

    size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
    @@ -807,9 +915,11 @@ 

    Streaming decompression functions


    Advanced dictionary and prefix API (Requires v1.4.0+)

      This API allows dictionaries to be used with ZSTD_compress2(),
    - ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
    - only reset with the context is reset with ZSTD_reset_parameters or
    - ZSTD_reset_session_and_parameters. Prefixes are single-use.
    + ZSTD_compressStream2(), and ZSTD_decompressDCtx().
    + Dictionaries are sticky, they remain valid when same context is reused,
    + they only reset when the context is reset
    + with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
    + In contrast, Prefixes are single-use.
     
    size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
    @@ -818,8 +928,9 @@ 

    Streaming decompression functions


    @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, meaning "return to no-dictionary mode". - Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + until parameters are reset, a new dictionary is loaded, or the dictionary + is explicitly invalidated by loading a NULL dictionary. Note 2 : Loading a dictionary involves building tables. It's also a CPU consuming operation, with non-negligible impact on latency. Tables are dependent on compression parameters, and for this reason, @@ -828,11 +939,15 @@

    Streaming decompression functions


    Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. In such a case, dictionary buffer must outlive its users. Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - to precisely select how dictionary content must be interpreted. + to precisely select how dictionary content must be interpreted. + Note 5 : This method does not benefit from LDM (long distance mode). + If you want to employ LDM on some large dictionary content, + prefer employing ZSTD_CCtx_refPrefix() described below. +


    size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
    -

    Reference a prepared dictionary, to be used for all next compressed frames. +

    Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -852,6 +967,7 @@

    Streaming decompression functions


    Decompression will need same prefix to properly regenerate data. Compressing with a prefix is similar in outcome as performing a diff and compressing it, but performs much faster, especially during decompression (compression speed is tunable with compression level). + This method is compatible with LDM (long distance mode). @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -867,9 +983,9 @@

    Streaming decompression functions



    size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
    -

    Create an internal DDict from dict buffer, - to be used to decompress next frames. - The dictionary remains valid for all future frames, until explicitly invalidated. +

    Create an internal DDict from dict buffer, to be used to decompress all future frames. + The dictionary remains valid for all future frames, until explicitly invalidated, or + a new dictionary is loaded. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". @@ -893,9 +1009,10 @@

    Streaming decompression functions


    The memory for the table is allocated on the first call to refDDict, and can be freed with ZSTD_freeDCtx(). + If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + will be managed, and referencing a dictionary effectively "discards" any previous one. + @result : 0, or an error code (which can be tested with ZSTD_isError()). - Note 1 : Currently, only one dictionary can be managed. - Referencing a new dictionary effectively "discards" any previous one. Special: referencing a NULL DDict means "return to no-dictionary mode". Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. @@ -971,7 +1088,7 @@

    Streaming decompression functions


    * * Note: This field is optional. ZSTD_generateSequences() will calculate the value of * 'rep', but repeat offsets do not necessarily need to be calculated from an external - * sequence provider's perspective. For example, ZSTD_compressSequences() does not + * sequence provider perspective. For example, ZSTD_compressSequences() does not * use this 'rep' field at all (as of now). */ } ZSTD_Sequence; @@ -1076,16 +1193,16 @@

    Streaming decompression functions


    } ZSTD_literalCompressionMode_e;

    typedef enum {
    -  /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
    -   * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
    -   * or ZSTD_ps_disable allow for a force enable/disable the feature.
    +  /* Note: This enum controls features which are conditionally beneficial.
    +   * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
    +   * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
        */
       ZSTD_ps_auto = 0,         /* Let the library automatically determine whether the feature shall be enabled */
       ZSTD_ps_enable = 1,       /* Force-enable the feature */
       ZSTD_ps_disable = 2       /* Do not use the feature */
    -} ZSTD_paramSwitch_e;
    +} ZSTD_ParamSwitch_e;
     

    -

    Frame size functions

    
    +

    Frame header and size functions

    
     
     
    ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
     

    `src` should point to the start of a series of ZSTD encoded and/or skippable frames @@ -1126,29 +1243,126 @@

    Streaming decompression functions



    ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
    -

    srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. +

    srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX. @return : size of the Frame Header, or an error code (if srcSize is too small)


    +
    typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
    +

    +
    typedef struct {
    +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
    +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
    +    unsigned blockSizeMax;
    +    ZSTD_FrameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
    +    unsigned headerSize;
    +    unsigned dictID;                     /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */
    +    unsigned checksumFlag;
    +    unsigned _reserved1;
    +    unsigned _reserved2;
    +} ZSTD_FrameHeader;
    +

    +
    ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize);
    +/*! ZSTD_getFrameHeader_advanced() :
    + *  same as ZSTD_getFrameHeader(),
    + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
    +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
    +

    decode Frame Header into `zfhPtr`, or requires larger `srcSize`. + @return : 0 => header is complete, `zfhPtr` is correctly filled, + >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled, + or an error code, which can be tested using ZSTD_isError() +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
    +

    Zstd supports in-place decompression, where the input and output buffers overlap. + In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + and the input buffer must be at the end of the output buffer. + + _______________________ Output Buffer ________________________ + | | + | ____ Input Buffer ____| + | | | + v v v + |---------------------------------------|-----------|----------| + ^ ^ ^ + |___________________ Output_Size ___________________|_ Margin _| + + NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + ZSTD_decompressDCtx(). + NOTE: This function supports multi-frame input. + + @param src The compressed frame(s) + @param srcSize The size of the compressed frame(s) + @returns The decompression margin or an error that can be checked with ZSTD_isError(). + +


    + +
    #define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)(                                              \
    +        ZSTD_FRAMEHEADERSIZE_MAX                                                              /* Frame header */ + \
    +        4                                                                                         /* checksum */ + \
    +        ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
    +        (blockSize)                                                                    /* One block of margin */   \
    +    ))
    +

    Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + the compressed frame, compute it from the original size and the blockSizeLog. + See ZSTD_decompressionMargin() for details. + + WARNING: This macro does not support multi-frame input, the input must be a single + zstd frame. If you need that support use the function, or implement it yourself. + + @param originalSize The original uncompressed size of the data. + @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + Unless you explicitly set the windowLog smaller than + ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + +


    +
    typedef enum {
    -  ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
    -  ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
    -} ZSTD_sequenceFormat_e;
    +  ZSTD_sf_noBlockDelimiters = 0,         /* ZSTD_Sequence[] has no block delimiters, just sequences */
    +  ZSTD_sf_explicitBlockDelimiters = 1    /* ZSTD_Sequence[] contains explicit block delimiters */
    +} ZSTD_SequenceFormat_e;
     

    -

    Generate sequences using ZSTD_compress2(), given a source buffer. +

    ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
    +

    `srcSize` : size of the input buffer + @return : upper-bound for the number of sequences that can be generated + from a buffer of srcSize bytes + + note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + +


    + +
    ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
    +ZSTDLIB_STATIC_API size_t
    +ZSTD_generateSequences(ZSTD_CCtx* zc,
    +           ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
    +           const void* src, size_t srcSize);
    +

    WARNING: This function is meant for debugging and informational purposes ONLY! + Its implementation is flawed, and it will be deleted in a future version. + It is not guaranteed to succeed, as there are several cases where it will give + up and fail. You should NOT use this function in production code. + + This function is deprecated, and will be removed in a future version. + + Generate sequences using ZSTD_compress2(), given a source buffer. + + @param zc The compression context to be used for ZSTD_compress2(). Set any + compression parameters you need on this context. + @param outSeqs The output sequences buffer of size @p outSeqsSize + @param outSeqsCapacity The size of the output sequences buffer. + ZSTD_sequenceBound(srcSize) is an upper bound on the number + of sequences that can be generated. + @param src The source buffer to generate sequences from of size @p srcSize. + @param srcSize The size of the source buffer. Each block will end with a dummy sequence with offset == 0, matchLength == 0, and litLength == length of last literals. litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) simply acts as a block delimiter. - @zc can be used to insert custom compression params. - This function invokes ZSTD_compress2(). - - The output of this function can be fed into ZSTD_compressSequences() with CCtx - setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters - @return : number of sequences generated + @returns The number of sequences generated, necessarily less than + ZSTD_sequenceBound(srcSize), or an error code that can be checked + with ZSTD_isError().


    @@ -1166,13 +1380,14 @@

    Streaming decompression functions



    ZSTDLIB_STATIC_API size_t
    -ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
    -            const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
    -            const void* src, size_t srcSize);
    +ZSTD_compressSequences(ZSTD_CCtx* cctx,
    +           void* dst, size_t dstCapacity,
    +     const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
    +     const void* src, size_t srcSize);
     

    Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. @src contains the entire input (not just the literals). If @srcSize > sum(sequence.length), the remaining bytes are considered all literals - If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) + If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). The entire source is compressed into a single frame. The compression behavior changes based on cctx params. In particular: @@ -1181,11 +1396,17 @@

    Streaming decompression functions


    the block size derived from the cctx, and sequences may be split. This is the default setting. If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain - block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + + When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. - If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined - behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for - specifics regarding offset/matchlength requirements) then the function will bail out and return an error. + If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + specifics regarding offset/matchlength requirements) and then bail out and return an error. In addition to the two adjustable experimental params, there are other important cctx params. - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. @@ -1193,21 +1414,47 @@

    Streaming decompression functions


    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md - Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. - Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, - and cannot emit an RLE block that disagrees with the repcode history + Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + and cannot emit an RLE block that disagrees with the repcode history. + @return : final compressed size, or a ZSTD error code. + +


    + +
    ZSTDLIB_STATIC_API size_t
    +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
    +                      void* dst, size_t dstCapacity,
    +                const ZSTD_Sequence* inSeqs, size_t nbSequences,
    +                const void* literals, size_t litSize, size_t litBufCapacity,
    +                size_t decompressedSize);
    +

    This is a variant of ZSTD_compressSequences() which, + instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + aka all the literals, already extracted and laid out into a single continuous buffer. + This can be useful if the process generating the sequences also happens to generate the buffer of literals, + thus skipping an extraction + caching stage. + It's a speed optimization, useful when the right conditions are met, + but it also features the following limitations: + - Only supports explicit delimiter mode + - Currently does not support Sequences validation (so input Sequences are trusted) + - Not compatible with frame checksum, which must be disabled + - If any block is incompressible, will fail and return an error + - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + @litBufCapacity must be at least 8 bytes larger than @litSize. + - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. @return : final compressed size, or a ZSTD error code.


    ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
    -                                const void* src, size_t srcSize, unsigned magicVariant);
    +                                 const void* src, size_t srcSize,
    +                                       unsigned magicVariant);
     

    Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. - As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so - the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. + As such, the parameter magicVariant controls the exact skippable frame magic number variant used, + so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. Returns an error if destination buffer is not large enough, if the source size is not representable with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). @@ -1216,13 +1463,14 @@

    Streaming decompression functions



    -
    size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
    -                                const void* src, size_t srcSize);
    -

    Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. +

    ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
    +                                      unsigned* magicVariant,
    +                                      const void* src, size_t srcSize);
    +

    Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer. - The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, - i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested - in the magicVariant. + The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, + i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. + This can be NULL if the caller is not interested in the magicVariant. Returns an error if destination buffer is not large enough, or if the frame is not skippable. @@ -1230,58 +1478,69 @@

    Streaming decompression functions



    -
    unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
    +
    ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
     

    Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.


    Memory management

    
     
    -
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
     

    These functions make it possible to estimate memory usage of a future {D,C}Ctx, before its creation. + This is useful in combination with ZSTD_initStatic(), + which makes it possible to employ a static buffer for ZSTD_CCtx* state. ZSTD_estimateCCtxSize() will provide a memory budget large enough - for any compression level up to selected one. - Note : Unlike ZSTD_estimateCStreamSize*(), this estimate - does not include space for a window buffer. - Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. + to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + associated with any compression level up to max specified one. The estimate will assume the input may be arbitrarily large, which is the worst case. + Note that the size estimation is specific for one-shot compression, + it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + nor other potential ways of using a ZSTD_CCtx* state. + When srcSize can be bound by a known and rather "small" value, - this fact can be used to provide a tighter estimation - because the CCtx compression context will need less memory. - This tighter estimation can be provided by more advanced functions + this knowledge can be used to provide a tighter budget estimation + because the ZSTD_CCtx* state will need less memory for small inputs. + This tighter estimation can be provided by employing more advanced functions ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - Note 2 : only single-threaded compression is supported. + Note : only single-threaded compression is supported. ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.


    -
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
    +
    ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
    -ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
    +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize);
     ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
    -

    ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - It will also consider src size to be arbitrarily "large", which is worst case. +

    ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + using any compression level up to the max specified one. + It will also consider src size to be arbitrarily "large", which is a worst case scenario. If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. Note : CStream size estimation is only correct for single-threaded compression. - ZSTD_DStream memory budget depends on window Size. + ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered. + + ZSTD_DStream memory budget depends on frame's window Size. This information can be passed manually, using ZSTD_estimateDStreamSize, or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + Any frame requesting a window size larger than max specified one will be rejected. Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. - In this case, get total size by adding ZSTD_estimate?DictSize + In this case, get total size by adding ZSTD_estimate?DictSize +


    ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
    @@ -1326,13 +1585,14 @@ 

    Streaming decompression functions


    #ifdef __GNUC__ __attribute__((__unused__)) #endif -ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };
    /**< this constant defers to stdlib's functions */

    These prototypes make it possible to pass your own allocation/free functions. ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. All allocation/free operations will be completed using these custom variants instead of regular ones.


    +
    ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
    +

    typedef struct POOL_ctx_s ZSTD_threadPool;
     ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
     ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
    @@ -1381,20 +1641,41 @@ 

    Streaming decompression functions


    This function never fails (wide contract)


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_compress2")
    +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
    +

    Set all parameters provided within @p cparams into the working @p cctx. + Note : if modifying parameters during compression (MT mode only), + note that changes to the .windowLog parameter will be ignored. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + On failure, no parameters are updated. + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
    +

    Set all parameters provided within @p fparams into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


    + +
    ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
    +

    Set all parameters provided within @p params into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


    + +
    ZSTD_DEPRECATED("use ZSTD_compress2")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
    -                              void* dst, size_t dstCapacity,
    -                        const void* src, size_t srcSize,
    -                        const void* dict,size_t dictSize,
    -                              ZSTD_parameters params);
    +                  void* dst, size_t dstCapacity,
    +            const void* src, size_t srcSize,
    +            const void* dict,size_t dictSize,
    +                  ZSTD_parameters params);
     

    Note : this function is now DEPRECATED. It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. This prototype will generate compilation warnings.


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
    +
    ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                                       void* dst, size_t dstCapacity,
                                 const void* src, size_t srcSize,
    @@ -1555,8 +1836,8 @@ 

    Streaming decompression functions



    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
     

    This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). Instruct the decoder context about what kind of data to decode next. @@ -1582,8 +1863,8 @@

    Streaming decompression functions



    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
                  int compressionLevel,
                  unsigned long long pledgedSrcSize);
    @@ -1600,8 +1881,8 @@ 

    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
              const void* dict, size_t dictSize,
                    int compressionLevel);
    @@ -1618,18 +1899,15 @@ 

    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
             const void* dict, size_t dictSize,
                   ZSTD_parameters params,
                   unsigned long long pledgedSrcSize);
    -

    This function is DEPRECATED, and is approximately equivalent to: +

    This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - // Pseudocode: Set each zstd parameter and leave the rest as-is. - for ((param, value) : params) { - ZSTD_CCtx_setParameter(zcs, param, value); - } + ZSTD_CCtx_setParams(zcs, params); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); @@ -1640,8 +1918,8 @@

    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
     

    This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1652,18 +1930,15 @@

    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
                        const ZSTD_CDict* cdict,
                              ZSTD_frameParameters fParams,
                              unsigned long long pledgedSrcSize);
    -

    This function is DEPRECATED, and is approximately equivalent to: +

    This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - for ((fParam, value) : fParams) { - ZSTD_CCtx_setParameter(zcs, fParam, value); - } + ZSTD_CCtx_setFParams(zcs, fParams); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_refCDict(zcs, cdict); @@ -1674,8 +1949,8 @@

    Advanced Streaming compression functions


    -
    ZSTDLIB_STATIC_API
    -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +
    ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API
     size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
     

    This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1685,7 +1960,7 @@

    Advanced Streaming compression functions

    0, its value must be correct, as it will be written in header, and controlled at the end. @@ -1721,50 +1996,97 @@

    Advanced Streaming compression functions


    Advanced Streaming decompression functions


    -
    ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_loadDictionary(zds, dict, dictSize); note: no dictionary will be used if dict == NULL or dictSize < 8 - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


    -
    ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_refDDict(zds, ddict); note : ddict is referenced, it must outlive decompression session - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


    -
    ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
    +
    ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
    +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
     

    ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - re-use decompression parameters from previous init; saves dictionary loading - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + reuse decompression parameters from previous init; saves dictionary loading


    -

    Buffer-less and synchronous inner streaming functions

    -  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
    -  But it's also a complex one, with several restrictions, documented below.
    -  Prefer normal streaming API for an easier experience.
    +
    ZSTDLIB_STATIC_API void
    +ZSTD_registerSequenceProducer(
    +  ZSTD_CCtx* cctx,
    +  void* sequenceProducerState,
    +  ZSTD_sequenceProducer_F sequenceProducer
    +);
    +

    Instruct zstd to use a block-level external sequence producer function. + + The sequenceProducerState must be initialized by the caller, and the caller is + responsible for managing its lifetime. This parameter is sticky across + compressions. It will remain set until the user explicitly resets compression + parameters. + + Sequence producer registration is considered to be an "advanced parameter", + part of the "advanced API". This means it will only have an effect on compression + APIs which respect advanced parameters, such as compress2() and compressStream2(). + Older compression APIs such as compressCCtx(), which predate the introduction of + "advanced parameters", will ignore any external sequence producer setting. + + The sequence producer can be "cleared" by registering a NULL function pointer. This + removes all limitations described above in the "LIMITATIONS" section of the API docs. + + The user is strongly encouraged to read the full API documentation (above) before + calling this function. +


    + +
    ZSTDLIB_STATIC_API void
    +ZSTD_CCtxParams_registerSequenceProducer(
    +  ZSTD_CCtx_params* params,
    +  void* sequenceProducerState,
    +  ZSTD_sequenceProducer_F sequenceProducer
    +);
    +

    Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + + If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + is required, then this function is for you. Otherwise, you probably don't need it. + + See tests/zstreamtest.c for example usage. +


    + +

    Buffer-less and synchronous inner streaming functions (DEPRECATED)

    +  This API is deprecated, and will be removed in a future version.
    +  It allows streaming (de)compression with user allocated buffers.
    +  However, it is hard to use, and not as well tested as the rest of
    +  our API.
    +
    +  Please use the normal streaming API instead: ZSTD_compressStream2,
    +  and ZSTD_decompressStream.
    +  If there is functionality that you need, but it doesn't provide,
    +  please open an issue on our GitHub.
      
     

    Buffer-less streaming compression (synchronous mode)

       A ZSTD_CCtx object is required to track streaming operations.
       Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
    -  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
    +  ZSTD_CCtx object can be reused multiple times within successive compression operations.
     
       Start by initializing a context.
       Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
    -  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
     
       Then, consume your input using ZSTD_compressContinue().
       There are some important considerations to keep in mind when using this advanced function :
    @@ -1782,30 +2104,34 @@ 

    Advanced Streaming decompression functions

    It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. - `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. + `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.
    -

    Buffer-less streaming compression functions

    ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
    +

    Buffer-less streaming compression functions

    ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
    +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
     ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
    +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
     ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
    -ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
     

    +
    size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
    +

    size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
     

    Buffer-less streaming decompression (synchronous mode)

       A ZSTD_DCtx object is required to track streaming operations.
       Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
    -  A ZSTD_DCtx object can be re-used multiple times.
    +  A ZSTD_DCtx object can be reused multiple times.
     
       First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
       Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
       Data fragment must be large enough to ensure successful decoding.
      `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
    -  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    -           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
    +  result  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
    +           >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
                errorCode, which can be tested using ZSTD_isError().
     
    -  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
    +  It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
       such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
       Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
       As a consequence, check that values remain within valid application range.
    @@ -1821,7 +2147,7 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
     
       The most memory efficient way is to use a round buffer of sufficient size.
       Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
    -  which can @return an error code if required value is too large for current system (in 32-bits mode).
    +  which can return an error code if required value is too large for current system (in 32-bits mode).
       In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
       up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
       which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
    @@ -1841,7 +2167,7 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
       ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
       ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
     
    - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
    +  result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
       It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
       It can also be an error code, which can be tested with ZSTD_isError().
     
    @@ -1863,34 +2189,25 @@ 

    Buffer-less streaming compression functions

    ZSTDLIB_S
       For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
     
    -

    Buffer-less streaming decompression functions

    typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
    -typedef struct {
    -    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
    -    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
    -    unsigned blockSizeMax;
    -    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
    -    unsigned headerSize;
    -    unsigned dictID;
    -    unsigned checksumFlag;
    -} ZSTD_frameHeader;
    -

    -
    ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
    -/*! ZSTD_getFrameHeader_advanced() :
    - *  same as ZSTD_getFrameHeader(),
    - *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
    -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
    -ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
    -

    decode Frame Header, or requires larger `srcSize`. - @return : 0, `zfhPtr` is correctly filled, - >0, `srcSize` is too small, value is wanted `srcSize` amount, - or an error code, which can be tested using ZSTD_isError() -


    - +

    Buffer-less streaming decompression functions


    +
    ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
    +

    typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
     

    -

    Block level API

    
    +

    Block level API (DEPRECATED)

    
    +
    +

    You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. -

    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). + Block functions produce and decode raw zstd blocks, without frame metadata. + Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : @@ -1899,7 +2216,6 @@

    Buffer-less streaming decompression functions

    typedef
         - It is necessary to init context before starting
           + compression : any ZSTD_compressBegin*() variant, including with dictionary
           + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
    -      + copyCCtx() and copyDCtx() can be used too
         - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
           + If input is larger than a block size, it's necessary to split input data into multiple blocks
           + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
    @@ -1915,9 +2231,13 @@ 

    Buffer-less streaming decompression functions

    typedef
             Use ZSTD_insertBlock() for such a case.
     


    -

    Raw zstd block functions

    ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
    +

    Raw zstd block functions

    ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
    +ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
     ZSTDLIB_STATIC_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
     ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
     ZSTDLIB_STATIC_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
     

    diff --git a/examples/Makefile b/examples/Makefile index 8d7361dd867..31f52d35763 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/examples/common.h b/examples/common.h index 9b09030eb69..4873e877a7a 100644 --- a/examples/common.h +++ b/examples/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/dictionary_compression.c b/examples/dictionary_compression.c index 7c7d3428852..83edc1cad9b 100644 --- a/examples/dictionary_compression.c +++ b/examples/dictionary_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/dictionary_decompression.c b/examples/dictionary_decompression.c index 107cfc1ee1a..e6c999964a2 100644 --- a/examples/dictionary_decompression.c +++ b/examples/dictionary_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_simple_compression.c b/examples/multiple_simple_compression.c index 5d2a28fcdca..bf77ca13317 100644 --- a/examples/multiple_simple_compression.c +++ b/examples/multiple_simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/multiple_streaming_compression.c b/examples/multiple_streaming_compression.c index d4efc8e5773..b12ad03dce1 100644 --- a/examples/multiple_streaming_compression.c +++ b/examples/multiple_streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_compression.c b/examples/simple_compression.c index 27a65b17f50..7c880725fc7 100644 --- a/examples/simple_compression.c +++ b/examples/simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/simple_decompression.c b/examples/simple_decompression.c index 59c1fd414aa..f499156f64e 100644 --- a/examples/simple_decompression.c +++ b/examples/simple_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_compression.c b/examples/streaming_compression.c index ff1875829ea..063aa82a294 100644 --- a/examples/streaming_compression.c +++ b/examples/streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -42,7 +42,13 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve */ CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel) ); CHECK_ZSTD( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) ); - ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); + if (nbThreads > 1) { + size_t const r = ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads); + if (ZSTD_isError(r)) { + fprintf (stderr, "Note: the linked libzstd library doesn't support multithreading. " + "Reverting to single-thread mode. \n"); + } + } /* This loop read from the input file, compresses that entire chunk, * and writes all output produced to the output file. @@ -117,7 +123,7 @@ int main(int argc, const char** argv) } int cLevel = 1; - int nbThreads = 4; + int nbThreads = 1; if (argc >= 3) { cLevel = atoi (argv[2]); diff --git a/examples/streaming_compression_thread_pool.c b/examples/streaming_compression_thread_pool.c index 21cb3d54999..a1a024129f2 100644 --- a/examples/streaming_compression_thread_pool.c +++ b/examples/streaming_compression_thread_pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Martin Liska, SUSE, Facebook, Inc. + * Copyright (c) Martin Liska, SUSE, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_decompression.c b/examples/streaming_decompression.c index 6dc4c22677b..95fa1122773 100644 --- a/examples/streaming_decompression.c +++ b/examples/streaming_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/examples/streaming_memory_usage.c b/examples/streaming_memory_usage.c index a5219ef1e47..957acb61a39 100644 --- a/examples/streaming_memory_usage.c +++ b/examples/streaming_memory_usage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/BUCK b/lib/BUCK index 60c6bbb54d9..8c555a8be18 100644 --- a/lib/BUCK +++ b/lib/BUCK @@ -57,7 +57,7 @@ cxx_library( srcs=glob(['legacy/*.c']), deps=[':common'], exported_preprocessor_flags=[ - '-DZSTD_LEGACY_SUPPORT=4', + '-DZSTD_LEGACY_SUPPORT=0', ], ) diff --git a/lib/Makefile b/lib/Makefile index 74c6f1baf61..569bd6090c9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -8,6 +8,9 @@ # You may select, at your option, one of the above-listed licenses. # ################################################################ +# default target (when running `make` with no argument) +lib-release: + # Modules ZSTD_LIB_COMPRESSION ?= 1 ZSTD_LIB_DECOMPRESSION ?= 1 @@ -54,13 +57,14 @@ VERSION := $(ZSTD_VERSION) # Note: by default, the static library is built single-threaded and dynamic library is built # multi-threaded. It is possible to force multi or single threaded builds by appending # -mt or -nomt to the build target (like lib-mt for multi-threaded, lib-nomt for single-threaded). -.PHONY: default -default: lib-release + CPPFLAGS_DYNLIB += -DZSTD_MULTITHREAD # dynamic library build defaults to multi-threaded LDFLAGS_DYNLIB += -pthread -CPPFLAGS_STATLIB += # static library build defaults to single-threaded +CPPFLAGS_STATICLIB += # static library build defaults to single-threaded +# pkg-config Libs.private points to LDFLAGS_DYNLIB +PCLIB := $(LDFLAGS_DYNLIB) ifeq ($(findstring GCC,$(CCVER)),GCC) decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize @@ -69,13 +73,15 @@ endif # macOS linker doesn't support -soname, and use different extension # see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html -ifeq ($(UNAME), Darwin) +UNAME_TARGET_SYSTEM ?= $(UNAME) + +ifeq ($(UNAME_TARGET_SYSTEM), Darwin) SHARED_EXT = dylib SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) else - ifeq ($(UNAME), AIX) + ifeq ($(UNAME_TARGET_SYSTEM), AIX) SONAME_FLAGS = else SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) @@ -91,7 +97,7 @@ all: lib .PHONY: libzstd.a # must be run every time -libzstd.a: CPPFLAGS += $(CPPFLAGS_STATLIB) +libzstd.a: CPPFLAGS += $(CPPFLAGS_STATICLIB) SET_CACHE_DIRECTORY = \ +$(MAKE) --no-print-directory $@ \ @@ -109,20 +115,20 @@ libzstd.a: else # BUILD_DIR is defined -ZSTD_STATLIB_DIR := $(BUILD_DIR)/static -ZSTD_STATLIB := $(ZSTD_STATLIB_DIR)/libzstd.a -ZSTD_STATLIB_OBJ := $(addprefix $(ZSTD_STATLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) -$(ZSTD_STATLIB): ARFLAGS = rcs -$(ZSTD_STATLIB): | $(ZSTD_STATLIB_DIR) -$(ZSTD_STATLIB): $(ZSTD_STATLIB_OBJ) +ZSTD_STATICLIB_DIR := $(BUILD_DIR)/static +ZSTD_STATICLIB := $(ZSTD_STATICLIB_DIR)/libzstd.a +ZSTD_STATICLIB_OBJ := $(addprefix $(ZSTD_STATICLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) +$(ZSTD_STATICLIB): ARFLAGS = rcs +$(ZSTD_STATICLIB): | $(ZSTD_STATICLIB_DIR) +$(ZSTD_STATICLIB): $(ZSTD_STATICLIB_OBJ) # Check for multithread flag at target execution time $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ @echo compiling multi-threaded static library $(LIBVER),\ @echo compiling single-threaded static library $(LIBVER)) $(AR) $(ARFLAGS) $@ $^ -libzstd.a: $(ZSTD_STATLIB) - cp -f $< $@ +libzstd.a: $(ZSTD_STATICLIB) + $(CP) $< $@ endif @@ -160,13 +166,13 @@ $(ZSTD_DYNLIB): $(ZSTD_DYNLIB_OBJ) $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ @echo compiling multi-threaded dynamic library $(LIBVER),\ @echo compiling single-threaded dynamic library $(LIBVER)) - $(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ + $(CC) $(FLAGS) $^ $(SONAME_FLAGS) -o $@ @echo creating versioned links - ln -sf $@ libzstd.$(SHARED_EXT_MAJOR) - ln -sf $@ libzstd.$(SHARED_EXT) + $(LN) -sf $@ libzstd.$(SHARED_EXT_MAJOR) + $(LN) -sf $@ libzstd.$(SHARED_EXT) $(LIBZSTD): $(ZSTD_DYNLIB) - cp -f $< $@ + $(CP) $< $@ endif # ifndef BUILD_DIR endif # if windows @@ -182,14 +188,17 @@ lib : libzstd.a libzstd # make does not consider implicit pattern rule for .PHONY target %-mt : CPPFLAGS_DYNLIB := -DZSTD_MULTITHREAD -%-mt : CPPFLAGS_STATLIB := -DZSTD_MULTITHREAD +%-mt : CPPFLAGS_STATICLIB := -DZSTD_MULTITHREAD %-mt : LDFLAGS_DYNLIB := -pthread +%-mt : PCLIB := +%-mt : PCMTLIB := $(LDFLAGS_DYNLIB) %-mt : % @echo multi-threaded build completed %-nomt : CPPFLAGS_DYNLIB := %-nomt : LDFLAGS_DYNLIB := -%-nomt : CPPFLAGS_STATLIB := +%-nomt : CPPFLAGS_STATICLIB := +%-nomt : PCLIB := %-nomt : % @echo single-threaded build completed @@ -200,43 +209,53 @@ lib : libzstd.a libzstd # Generate .h dependencies automatically -DEPFLAGS = -MT $@ -MMD -MP -MF +# -MMD: compiler generates dependency information as a side-effect of compilation, without system headers +# -MP: adds phony target for each dependency other than main file. +DEPFLAGS = -MMD -MP -$(ZSTD_DYNLIB_DIR)/%.o : %.c $(ZSTD_DYNLIB_DIR)/%.d | $(ZSTD_DYNLIB_DIR) +# ensure that ZSTD_DYNLIB_DIR exists prior to generating %.o +$(ZSTD_DYNLIB_DIR)/%.o : %.c | $(ZSTD_DYNLIB_DIR) @echo CC $@ - $(COMPILE.c) $(DEPFLAGS) $(ZSTD_DYNLIB_DIR)/$*.d $(OUTPUT_OPTION) $< + $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< -$(ZSTD_STATLIB_DIR)/%.o : %.c $(ZSTD_STATLIB_DIR)/%.d | $(ZSTD_STATLIB_DIR) +$(ZSTD_STATICLIB_DIR)/%.o : %.c | $(ZSTD_STATICLIB_DIR) @echo CC $@ - $(COMPILE.c) $(DEPFLAGS) $(ZSTD_STATLIB_DIR)/$*.d $(OUTPUT_OPTION) $< + $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< $(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR) @echo AS $@ $(COMPILE.S) $(OUTPUT_OPTION) $< -$(ZSTD_STATLIB_DIR)/%.o : %.S | $(ZSTD_STATLIB_DIR) +$(ZSTD_STATICLIB_DIR)/%.o : %.S | $(ZSTD_STATICLIB_DIR) @echo AS $@ $(COMPILE.S) $(OUTPUT_OPTION) $< -MKDIR ?= mkdir -$(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATLIB_DIR): - $(MKDIR) -p $@ +MKDIR ?= mkdir -p +$(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATICLIB_DIR): + $(MKDIR) $@ -DEPFILES := $(ZSTD_DYNLIB_OBJ:.o=.d) $(ZSTD_STATLIB_OBJ:.o=.d) +DEPFILES := $(ZSTD_DYNLIB_OBJ:.o=.d) $(ZSTD_STATICLIB_OBJ:.o=.d) $(DEPFILES): -include $(wildcard $(DEPFILES)) +# The leading '-' means: do not fail is include fails (ex: directory does not exist yet) +-include $(wildcard $(DEPFILES)) -# Special case : building library in single-thread mode _and_ without zstdmt_compress.c -ZSTDMT_FILES = compress/zstdmt_compress.c -ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(ZSTD_FILES)) +# Special case : build library in single-thread mode _and_ without zstdmt_compress.c +# Note : we still need threading.c and pool.c for the dictionary builder, +# but they will correctly behave single-threaded. +ZSTDMT_FILES = zstdmt_compress.c +ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(notdir $(ZSTD_FILES))) libzstd-nomt: CFLAGS += -fPIC -fvisibility=hidden libzstd-nomt: LDFLAGS += -shared libzstd-nomt: $(ZSTD_NOMT_FILES) @echo compiling single-thread dynamic library $(LIBVER) @echo files : $(ZSTD_NOMT_FILES) - $(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ + @if echo "$(ZSTD_NOMT_FILES)" | tr ' ' '\n' | $(GREP) -q zstdmt; then \ + echo "Error: Found zstdmt in list."; \ + exit 1; \ + fi + $(CC) $(FLAGS) $^ $(SONAME_FLAGS) -o $@ .PHONY: clean clean: @@ -249,7 +268,7 @@ clean: #----------------------------------------------------------------------------- # make install is validated only for below listed environments #----------------------------------------------------------------------------- -ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) lib: libzstd.pc @@ -280,13 +299,21 @@ PCLIBPREFIX := $(if $(findstring $(LIBDIR),$(PCLIBDIR)),,$${exec_prefix}) # to PREFIX, rather than as a resolved value. PCEXEC_PREFIX := $(if $(HAS_EXPLICIT_EXEC_PREFIX),$(EXEC_PREFIX),$${prefix}) -ifneq (,$(filter $(UNAME),FreeBSD NetBSD DragonFly)) + +ifneq ($(MT),) + PCLIB := + PCMTLIB := $(LDFLAGS_DYNLIB) +else + PCLIB := $(LDFLAGS_DYNLIB) +endif + +ifneq (,$(filter FreeBSD NetBSD DragonFly,$(UNAME))) PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig else PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig endif -ifneq (,$(filter $(UNAME),SunOS)) +ifneq (,$(filter SunOS,$(UNAME))) INSTALL ?= ginstall else INSTALL ?= install @@ -296,6 +323,10 @@ INSTALL_PROGRAM ?= $(INSTALL) INSTALL_DATA ?= $(INSTALL) -m 644 +# pkg-config library define. +# For static single-threaded library declare -pthread in Libs.private +# For static multi-threaded library declare -pthread in Libs and Cflags +.PHONY: libzstd.pc libzstd.pc: libzstd.pc.in @echo creating pkgconfig @sed \ @@ -304,7 +335,8 @@ libzstd.pc: libzstd.pc.in -e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \ -e 's|@LIBDIR@|$(PCLIBPREFIX)$(PCLIBDIR)|' \ -e 's|@VERSION@|$(VERSION)|' \ - -e 's|@LIBS_PRIVATE@|$(LDFLAGS_DYNLIB)|' \ + -e 's|@LIBS_MT@|$(PCMTLIB)|' \ + -e 's|@LIBS_PRIVATE@|$(PCLIB)|' \ $< >$@ .PHONY: install @@ -331,8 +363,8 @@ install-shared: [ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ @echo Installing shared library $(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR) - ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) + $(LN) -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) + $(LN) -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) .PHONY: install-includes install-includes: diff --git a/lib/README.md b/lib/README.md index 015cde567e5..aa92bd659da 100644 --- a/lib/README.md +++ b/lib/README.md @@ -7,15 +7,30 @@ in order to make it easier to select or exclude features. #### Building -`Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions), +A `Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions), including commands variables, staged install, directory variables and standard targets. - `make` : generates both static and dynamic libraries -- `make install` : install libraries and headers in target system directories +- `make install` : install libraries, headers and pkg-config in local system directories -`libzstd` default scope is pretty large, including compression, decompression, dictionary builder, -and support for decoding legacy formats >= v0.5.0. +`libzstd` default scope includes compression, decompression, and dictionary builder. +Note: starting v1.6.0, support for decoding legacy formats is disabled by default. +See _modular build_ below to learn how to enable it. The scope can be reduced on demand (see paragraph _modular build_). +#### Multiarch Support + +For multiarch systems (like Debian/Ubuntu), libraries should be installed to architecture-specific directories. +When creating packages for such systems, use the `LIBDIR` variable to specify the correct multiarch path: + +```bash +# For x86_64 systems on Ubuntu/Debian: +make install PREFIX=/usr LIBDIR=/usr/lib/x86_64-linux-gnu + +# For ARM64 systems on Ubuntu/Debian: +make install PREFIX=/usr LIBDIR=/usr/lib/aarch64-linux-gnu +``` + +This will not only install the files in the correct directories, but also generate the correct paths for `pkg-config`. #### Multithreading support @@ -27,12 +42,16 @@ Enabling multithreading requires 2 conditions : For convenience, we provide a build target to generate multi and single threaded libraries: - Force enable multithreading on both dynamic and static libraries by appending `-mt` to the target, e.g. `make lib-mt`. + Note that the `.pc` generated on calling `make lib-mt` will already include the require Libs and Cflags. - Force disable multithreading on both dynamic and static libraries by appending `-nomt` to the target, e.g. `make lib-nomt`. - By default, as mentioned before, dynamic library is multithreaded, and static library is single-threaded, e.g. `make lib`. When linking a POSIX program with a multithreaded version of `libzstd`, note that it's necessary to invoke the `-pthread` flag during link stage. +The `.pc` generated from `make install` or `make install-pc` always assume a single-threaded static library +is compiled. To correctly generate a `.pc` for the multi-threaded static library, set `MT=1` as ENV variable. + Multithreading capabilities are exposed via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351). @@ -81,14 +100,14 @@ The file structure is designed to make this selection manually achievable for an Specifying a number limits versions supported to that version onward. For example, `ZSTD_LEGACY_SUPPORT=2` means : "support legacy formats >= v0.2.0". Conversely, `ZSTD_LEGACY_SUPPORT=0` means "do __not__ support legacy formats". - By default, this build macro is set as `ZSTD_LEGACY_SUPPORT=5`. + By default, this build macro is set as `ZSTD_LEGACY_SUPPORT=0` (disabled). Decoding supported legacy format is a transparent capability triggered within decompression functions. It's also allowed to invoke legacy API directly, exposed in `lib/legacy/zstd_legacy.h`. Each version does also provide its own set of advanced API. For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` . - While invoking `make libzstd`, it's possible to define build macros - `ZSTD_LIB_COMPRESSION, ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, + `ZSTD_LIB_COMPRESSION`, `ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the corresponding features. This will also disable compilation of all dependencies (e.g. `ZSTD_LIB_COMPRESSION=0` will also disable @@ -119,6 +138,15 @@ The file structure is designed to make this selection manually achievable for an binary is achieved by using `HUF_FORCE_DECOMPRESS_X1` and `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT` (implied by `ZSTD_LIB_MINIFY`). + On the compressor side, Zstd's compression levels map to several internal + strategies. In environments where the higher compression levels aren't used, + it is possible to exclude all but the fastest strategy with + `ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1`. (Note that this will change + the behavior of the default compression level.) Or if you want to retain the + default compressor as well, you can set + `ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP=1`, at the cost of an additional + ~20KB or so. + For squeezing the last ounce of size out, you can also define `ZSTD_NO_INLINE`, which disables inlining, and `ZSTD_STRIP_ERROR_STRINGS`, which removes the error messages that are otherwise returned by @@ -136,6 +164,13 @@ The file structure is designed to make this selection manually achievable for an will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in the shared library, which is now hidden by default. +- The build macro `STATIC_BMI2` can be set to 1 to force usage of `bmi2` instructions. + It is generally not necessary to set this build macro, + because `STATIC_BMI2` will be automatically set to 1 + on detecting the presence of the corresponding instruction set in the compilation target. + It's nonetheless available as an optional manual toggle for better control, + and can also be used to forcefully disable `bmi2` instructions by setting it to 0. + - The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries which can detect at runtime the presence of BMI2 instructions, and use them only if present. These instructions contribute to better performance, notably on the decoder side. @@ -161,6 +196,22 @@ The file structure is designed to make this selection manually achievable for an `ZSTD_DCtx` decompression contexts, but might also result in a small decompression speed cost. +- The C compiler macros `ZSTDLIB_VISIBLE`, `ZSTDERRORLIB_VISIBLE` and `ZDICTLIB_VISIBLE` + can be overridden to control the visibility of zstd's API. Additionally, + `ZSTDLIB_STATIC_API` and `ZDICTLIB_STATIC_API` can be overridden to control the visibility + of zstd's static API. Specifically, it can be set to `ZSTDLIB_HIDDEN` to hide the symbols + from the shared library. These macros default to `ZSTDLIB_VISIBILITY`, + `ZSTDERRORLIB_VSIBILITY`, and `ZDICTLIB_VISIBILITY` if unset, for backwards compatibility + with the old macro names. + +- The C compiler macro `HUF_DISABLE_FAST_DECODE` disables the newer Huffman fast C + and assembly decoding loops. You may want to use this macro if these loops are + slower on your platform. + +- The macro `ZDICT_QSORT` can enforce selection of a specific sorting variant, + which is useful when autodetection fails, for example with older versions of `musl`. + For this scenario, it can be set as `ZDICT_QSORT=ZDICT_QSORT_C90`. + Other selectable suffixes are `_GNU`, `_APPLE`, `_MSVC` and `_C11`. #### Windows : using MinGW+MSYS to create DLL diff --git a/lib/common/allocations.h b/lib/common/allocations.h new file mode 100644 index 00000000000..d4d392998f7 --- /dev/null +++ b/lib/common/allocations.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This file provides custom allocation primitives + */ + +#define ZSTD_DEPS_NEED_MALLOC +#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ + +#include "compiler.h" /* MEM_STATIC */ +#define ZSTD_STATIC_LINKING_ONLY +#include "../zstd.h" /* ZSTD_customMem */ + +#ifndef ZSTD_ALLOCATIONS_H +#define ZSTD_ALLOCATIONS_H + +/* custom memory allocation functions */ + +MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); + return ZSTD_malloc(size); +} + +MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); + + if (ptr == NULL) { + return NULL; + } + + ZSTD_memset(ptr, 0, size); + return ptr; + } + return ZSTD_calloc(1, size); +} + +MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else + ZSTD_free(ptr); + } +} + +#endif /* ZSTD_ALLOCATIONS_H */ diff --git a/lib/common/bits.h b/lib/common/bits.h index c0e9177507f..910ffa38812 100644 --- a/lib/common/bits.h +++ b/lib/common/bits.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,38 +17,40 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val) { assert(val != 0); { - static const int DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, + static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; - return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27]; + return DeBruijnBytePos[((U32) ((val & (0-val)) * 0x077CB531U)) >> 27]; } } MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) { assert(val != 0); -# if defined(_MSC_VER) -# if STATIC_BMI2 == 1 - return _tzcnt_u32(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)__builtin_ctz(val); -# else - return ZSTD_countTrailingZeros32_fallback(val); -# endif +#if defined(_MSC_VER) +# if STATIC_BMI2 + return (unsigned)_tzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_ctz(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_ctz(val); +#else + return ZSTD_countTrailingZeros32_fallback(val); +#endif } -MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { +MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) +{ assert(val != 0); { static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, @@ -67,86 +69,89 @@ MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) { assert(val != 0); -# if defined(_MSC_VER) -# if STATIC_BMI2 == 1 - return _lzcnt_u32(val); -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)(31 - r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)__builtin_clz(val); -# else - return ZSTD_countLeadingZeros32_fallback(val); -# endif +#if defined(_MSC_VER) +# if STATIC_BMI2 + return (unsigned)_lzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)(31 - r); + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_clz(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_clz(val); +#else + return ZSTD_countLeadingZeros32_fallback(val); +#endif } MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) { assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 == 1 - return _tzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) - return (unsigned)__builtin_ctzll(val); -# else - { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (leastSignificantWord == 0) { - return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); - } else { - return ZSTD_countTrailingZeros32(leastSignificantWord); - } +#if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return (unsigned)_tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (unsigned)r; + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) + return (unsigned)__builtin_ctzll(val); +#elif defined(__ICCARM__) + return (unsigned)__builtin_ctzll(val); +#else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); + } else { + return ZSTD_countTrailingZeros32(leastSignificantWord); } -# endif + } +#endif } MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) { assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 == 1 - return _lzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, val); - return (unsigned)(63 - r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)(__builtin_clzll(val)); -# else - { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (mostSignificantWord == 0) { - return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); - } else { - return ZSTD_countLeadingZeros32(mostSignificantWord); - } +#if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return (unsigned)_lzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, val); + return (unsigned)(63 - r); + } else { + __assume(0); /* Should not reach this code path */ + } +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)(__builtin_clzll(val)); +#elif defined(__ICCARM__) + return (unsigned)(__builtin_clzll(val)); +#else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (mostSignificantWord == 0) { + return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); + } else { + return ZSTD_countLeadingZeros32(mostSignificantWord); } -# endif + } +#endif } MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) @@ -172,4 +177,29 @@ MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCo return 31 - ZSTD_countLeadingZeros32(val); } +/* ZSTD_rotateRight_*(): + * Rotates a bitfield to the right by "count" bits. + * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts + */ +MEM_STATIC +U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { + assert(count < 64); + count &= 0x3F; /* for fickle pattern recognition */ + return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); +} + +MEM_STATIC +U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { + assert(count < 32); + count &= 0x1F; /* for fickle pattern recognition */ + return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); +} + +MEM_STATIC +U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { + assert(count < 16); + count &= 0x0F; /* for fickle pattern recognition */ + return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); +} + #endif /* ZSTD_BITS_H */ diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h index 84177862645..6ff482a8399 100644 --- a/lib/common/bitstream.h +++ b/lib/common/bitstream.h @@ -1,7 +1,7 @@ /* ****************************************************************** * bitstream * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -14,9 +14,6 @@ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, @@ -32,7 +29,6 @@ extern "C" { #include "error_private.h" /* error codes and messages */ #include "bits.h" /* ZSTD_highbit32 */ - /*========================================= * Target specific =========================================*/ @@ -52,12 +48,13 @@ extern "C" { /*-****************************************** * bitStream encoding API (write forward) ********************************************/ +typedef size_t BitContainerType; /* bitStream can mix input from multiple sources. * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ typedef struct { - size_t bitContainer; + BitContainerType bitContainer; unsigned bitPos; char* startPtr; char* ptr; @@ -65,7 +62,7 @@ typedef struct { } BIT_CStream_t; MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); -MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); @@ -74,7 +71,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. * * bits are first added to a local register. -* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems. * Writing data into memory is an explicit operation, performed by the flushBits function. * Hence keep track how many bits are potentially stored into local register to avoid register overflow. * After a flushBits, a maximum of 7 bits might still be stored into local register. @@ -91,28 +88,28 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); * bitStream decoding API (read backward) **********************************************/ typedef struct { - size_t bitContainer; + BitContainerType bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; const char* limitPtr; } BIT_DStream_t; -typedef enum { BIT_DStream_unfinished = 0, - BIT_DStream_endOfBuffer = 1, - BIT_DStream_completed = 2, - BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ - /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ +typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */ + BIT_DStream_endOfBuffer = 1, /* still some bits left in bitstream */ + BIT_DStream_completed = 2, /* bitstream entirely consumed, bit-exact */ + BIT_DStream_overflow = 3 /* user requested more bits than present in bitstream */ + } BIT_DStream_status; /* result of BIT_reloadDStream() */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. -* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (BitContainerType). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. @@ -124,7 +121,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /*-**************************************** * unsafe API ******************************************/ -MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); @@ -162,10 +159,15 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, return 0; } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits) { -#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS) - return _bzhi_u64(bitContainer, nbBits); +#if STATIC_BMI2 && !defined(ZSTD_NO_INTRINSICS) +# if (defined(__x86_64__) || defined(_M_X64)) && !defined(__ILP32__) + return _bzhi_u64(bitContainer, nbBits); +# else + DEBUG_STATIC_ASSERT(sizeof(bitContainer) == sizeof(U32)); + return _bzhi_u32(bitContainer, nbBits); +# endif #else assert(nbBits < BIT_MASK_SIZE); return bitContainer & BIT_mask[nbBits]; @@ -176,7 +178,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) + BitContainerType value, unsigned nbBits) { DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); @@ -189,7 +191,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) + BitContainerType value, unsigned nbBits) { assert((value>>nbBits) == 0); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); @@ -236,7 +238,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ - return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); + return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } @@ -267,22 +269,22 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { - case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + case 7: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); ZSTD_FALLTHROUGH; - case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + case 6: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); ZSTD_FALLTHROUGH; - case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + case 5: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); ZSTD_FALLTHROUGH; - case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + case 4: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[3]) << 24; ZSTD_FALLTHROUGH; - case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + case 3: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[2]) << 16; ZSTD_FALLTHROUGH; - case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + case 2: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[1]) << 8; ZSTD_FALLTHROUGH; default: break; @@ -297,12 +299,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si return srcSize; } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start) { return bitContainer >> start; } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits) { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ @@ -312,7 +314,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c * such cpus old (pre-Haswell, 2013) and their performance is not of that * importance. */ -#if defined(__x86_64__) || defined(_M_X86) +#if defined(__x86_64__) || defined(_M_X64) return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1); #else return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; @@ -325,7 +327,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. * @return : value extracted */ -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ #if 1 @@ -341,14 +343,14 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U3 /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ -MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } -MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } @@ -357,23 +359,38 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) +FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBits(bitD, nbBits); + BitContainerType const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_readBitsFast() : * unsafe version; only works if nbBits >= 1 */ -MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) +MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBitsFast(bitD, nbBits); + BitContainerType const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } +/*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD) +{ + assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); + bitD->ptr -= bitD->bitsConsumed >> 3; + assert(bitD->ptr >= bitD->start); + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; +} + /*! BIT_reloadDStreamFast() : * Similar to BIT_reloadDStream(), but with two differences: * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! @@ -384,31 +401,35 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) { if (UNLIKELY(bitD->ptr < bitD->limitPtr)) return BIT_DStream_overflow; - assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); - bitD->ptr -= bitD->bitsConsumed >> 3; - bitD->bitsConsumed &= 7; - bitD->bitContainer = MEM_readLEST(bitD->ptr); - return BIT_DStream_unfinished; + return BIT_reloadDStream_internal(bitD); } /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . - * This function is safe, it guarantees it will not read beyond src buffer. + * This function is safe, it guarantees it will not never beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + /* note : once in overflow mode, a bitstream remains in this mode until it's reset */ + if (UNLIKELY(bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))) { + static const BitContainerType zeroFilled = 0; + bitD->ptr = (const char*)&zeroFilled; /* aliasing is allowed for char */ + /* overflow detected, erroneous scenario or end of stream: no update */ return BIT_DStream_overflow; + } + + assert(bitD->ptr >= bitD->start); if (bitD->ptr >= bitD->limitPtr) { - return BIT_reloadDStreamFast(bitD); + return BIT_reloadDStream_internal(bitD); } if (bitD->ptr == bitD->start) { + /* reached end of bitStream => no update */ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } - /* start < ptr < limitPtr */ + /* start < ptr < limitPtr => cautious update */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { @@ -430,8 +451,4 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } -#if defined (__cplusplus) -} -#endif - #endif /* BITSTREAM_H_MODULE */ diff --git a/lib/common/compiler.h b/lib/common/compiler.h index 6c7100e835a..5e70570ec7a 100644 --- a/lib/common/compiler.h +++ b/lib/common/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,6 +11,8 @@ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H +#include + #include "portability_macros.h" /*-******************************************************* @@ -25,7 +27,7 @@ # define INLINE_KEYWORD #endif -#if defined(__GNUC__) || defined(__ICCARM__) +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline @@ -51,12 +53,19 @@ # define WIN_CDECL #endif +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to eliminate the constant * branches. */ -#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR /** * HINT_INLINE is used to help the compiler generate better code. It is *not* * used for "templates", so it can be tweaked based on the compilers @@ -71,21 +80,37 @@ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 # define HINT_INLINE static INLINE_KEYWORD #else -# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR +# define HINT_INLINE FORCE_INLINE_TEMPLATE #endif -/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +/* "soft" inline : + * The compiler is free to select if it's a good idea to inline or not. + * The main objective is to silence compiler warnings + * when a defined function in included but not used. + * + * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit. + * Updating the prefix is probably preferable, but requires a fairly large codemod, + * since this name is used everywhere. + */ +#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */ #if defined(__GNUC__) -# define UNUSED_ATTR __attribute__((unused)) +# define MEM_STATIC static __inline UNUSED_ATTR +#elif defined(__IAR_SYSTEMS_ICC__) +# define MEM_STATIC static inline UNUSED_ATTR +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline #else -# define UNUSED_ATTR +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif #endif /* force no inlining */ #ifdef _MSC_VER # define FORCE_NOINLINE static __declspec(noinline) #else -# if defined(__GNUC__) || defined(__ICCARM__) +# if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define FORCE_NOINLINE static __attribute__((__noinline__)) # else # define FORCE_NOINLINE static @@ -94,7 +119,7 @@ /* target attribute */ -#if defined(__GNUC__) || defined(__ICCARM__) +#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) #else # define TARGET_ATTRIBUTE(target) @@ -109,10 +134,10 @@ /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ #if defined(NO_PREFETCH) -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ +# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ #else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) && !defined(_M_ARM64EC) /* _mm_prefetch() is not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) @@ -120,24 +145,25 @@ # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) # elif defined(__aarch64__) -# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))) -# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))) +# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0) +# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0) # else -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ +# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ # endif #endif /* NO_PREFETCH */ #define CACHELINE_SIZE 64 -#define PREFETCH_AREA(p, s) { \ - const char* const _ptr = (const char*)(p); \ - size_t const _size = (size_t)(s); \ - size_t _pos; \ - for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ - PREFETCH_L2(_ptr + _pos); \ - } \ -} +#define PREFETCH_AREA(p, s) \ + do { \ + const char* const _ptr = (const char*)(p); \ + size_t const _size = (size_t)(s); \ + size_t _pos; \ + for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ + PREFETCH_L2(_ptr + _pos); \ + } \ + } while (0) /* vectorization * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, @@ -165,6 +191,12 @@ #define UNLIKELY(x) (x) #endif +#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) +# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0) +#else +# define ZSTD_UNREACHABLE do { assert(0); } while (0) +#endif + /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ @@ -175,35 +207,44 @@ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif -/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/ -#ifndef STATIC_BMI2 -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) -# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 -# define STATIC_BMI2 1 -# endif -# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__) -# define STATIC_BMI2 1 -# endif -#endif - -#ifndef STATIC_BMI2 - #define STATIC_BMI2 0 -#endif - /* compile time determination of SIMD support */ #if !defined(ZSTD_NO_INTRINSICS) -# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) +# if defined(__AVX2__) +# define ZSTD_ARCH_X86_AVX2 +# endif +# if defined(__SSE2__) || defined(_M_X64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) # define ZSTD_ARCH_X86_SSE2 # endif # if defined(__ARM_NEON) || defined(_M_ARM64) # define ZSTD_ARCH_ARM_NEON # endif +# if defined(__ARM_FEATURE_SVE) +# define ZSTD_ARCH_ARM_SVE +# endif +# if defined(__ARM_FEATURE_SVE2) +# define ZSTD_ARCH_ARM_SVE2 +# endif +# if defined(__riscv) && defined(__riscv_vector) +# if ((defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 14) || \ + (defined(__clang__) && __clang_major__ >= 19)) + #define ZSTD_ARCH_RISCV_RVV +# endif +#endif # +# if defined(ZSTD_ARCH_X86_AVX2) +# include +# endif # if defined(ZSTD_ARCH_X86_SSE2) # include # elif defined(ZSTD_ARCH_ARM_NEON) # include # endif +# if defined(ZSTD_ARCH_ARM_SVE) || defined(ZSTD_ARCH_ARM_SVE2) +# include +# endif +# if defined(ZSTD_ARCH_RISCV_RVV) +# include +# endif #endif /* C-language Attributes are added in C23. */ @@ -243,9 +284,15 @@ #endif /*-************************************************************** -* Alignment check +* Alignment *****************************************************************/ +/* @return 1 if @u is a 2^n value, 0 otherwise + * useful to check a value is valid for alignment restrictions */ +MEM_STATIC int ZSTD_isPower2(size_t u) { + return (u & (u-1)) == 0; +} + /* this test was initially positioned in mem.h, * but this file is removed (or replaced) for linux kernel * so it's now hosted in compiler.h, @@ -271,11 +318,105 @@ # endif #endif /* ZSTD_ALIGNOF */ +#ifndef ZSTD_ALIGNED +/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */ +# if defined(__GNUC__) || defined(__clang__) +# define ZSTD_ALIGNED(a) __attribute__((aligned(a))) +# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define ZSTD_ALIGNED(a) _Alignas(a) +#elif defined(_MSC_VER) +# define ZSTD_ALIGNED(n) __declspec(align(n)) +# else + /* this compiler will require its own alignment instruction */ +# define ZSTD_ALIGNED(...) +# endif +#endif /* ZSTD_ALIGNED */ + + /*-************************************************************** * Sanitizer *****************************************************************/ -#if ZSTD_MEMORY_SANITIZER +/** + * Zstd relies on pointer overflow in its decompressor. + * We add this attribute to functions that rely on pointer overflow. + */ +#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +# if __has_attribute(no_sanitize) +# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8 + /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */ +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow"))) +# else + /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */ +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow"))) +# endif +# else +# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +# endif +#endif + +/** + * Helper function to perform a wrapped pointer difference without triggering + * UBSAN. + * + * @returns lhs - rhs with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs) +{ + return lhs - rhs; +} + +/** + * Helper function to perform a wrapped pointer add without triggering UBSAN. + * + * @return ptr + add with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +const void* ZSTD_wrappedPtrAdd(const void* ptr, ptrdiff_t add) +{ + return (const char*)ptr + add; +} + +/** + * Helper function to perform a wrapped pointer subtraction without triggering + * UBSAN. + * + * @return ptr - sub with wrapping + */ +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +const void* ZSTD_wrappedPtrSub(const void* ptr, ptrdiff_t sub) +{ + return (const char*)ptr - sub; +} + +/** + * Helper function to add to a pointer that works around C's undefined behavior + * of adding 0 to NULL. + * + * @returns `ptr + add` except it defines `NULL + 0 == NULL`. + */ +MEM_STATIC +void* ZSTD_maybeNullPtrAdd(void* ptr, ptrdiff_t add) +{ + return add > 0 ? (char*)ptr + add : ptr; +} + +/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an + * abundance of caution, disable our custom poisoning on mingw. */ +#ifdef __MINGW32__ +#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE +#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1 +#endif +#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE +#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1 +#endif +#endif + +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ @@ -294,9 +435,13 @@ void __msan_poison(const volatile void *a, size_t size); /* Returns the offset of the first (at least partially) poisoned byte in the memory range, or -1 if the whole range is good. */ intptr_t __msan_test_shadow(const volatile void *x, size_t size); + +/* Print shadow and origin for the memory range to stderr in a human-readable + format. */ +void __msan_print_shadow(const volatile void *x, size_t size); #endif -#if ZSTD_ADDRESS_SANITIZER +#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ diff --git a/lib/common/cpu.h b/lib/common/cpu.h index 8acd33be3cd..3f15d560f0c 100644 --- a/lib/common/cpu.h +++ b/lib/common/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -35,6 +35,7 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f7b = 0; U32 f7c = 0; #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) +#if !defined(_M_X64) || !defined(__clang__) || __clang_major__ >= 16 int reg[4]; __cpuid((int*)reg, 0); { @@ -50,6 +51,41 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { f7c = (U32)reg[2]; } } +#else + /* Clang compiler has a bug (fixed in https://reviews.llvm.org/D101338) in + * which the `__cpuid` intrinsic does not save and restore `rbx` as it needs + * to due to being a reserved register. So in that case, do the `cpuid` + * ourselves. Clang supports inline assembly anyway. + */ + U32 n; + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "popq %%rbx\n\t" + : "=a"(n) + : "a"(0) + : "rcx", "rdx"); + if (n >= 1) { + U32 f1a; + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "popq %%rbx\n\t" + : "=a"(f1a), "=c"(f1c), "=d"(f1d) + : "a"(1) + :); + } + if (n >= 7) { + __asm__( + "pushq %%rbx\n\t" + "cpuid\n\t" + "movq %%rbx, %%rax\n\t" + "popq %%rbx" + : "=a"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "rdx"); + } +#endif #elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) /* The following block like the normal cpuid branch below, but gcc * reserves ebx for use of its pic register so we must specially diff --git a/lib/common/debug.c b/lib/common/debug.c index bb863c9ea61..9d0b7d229c1 100644 --- a/lib/common/debug.c +++ b/lib/common/debug.c @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -21,4 +21,10 @@ #include "debug.h" +#if !defined(ZSTD_LINUX_KERNEL) || (DEBUGLEVEL>=2) +/* We only use this when DEBUGLEVEL>=2, but we get -Werror=pedantic errors if a + * translation unit is empty. So remove this from Linux kernel builds, but + * otherwise just leave it in. + */ int g_debuglevel = DEBUGLEVEL; +#endif diff --git a/lib/common/debug.h b/lib/common/debug.h index 3b2a320a188..4b60ddf7f51 100644 --- a/lib/common/debug.h +++ b/lib/common/debug.h @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -32,10 +32,6 @@ #ifndef DEBUG_H_12987983217 #define DEBUG_H_12987983217 -#if defined (__cplusplus) -extern "C" { -#endif - /* static assert is triggered at compile time, leaving no runtime artefact. * static assert only works with compile-time constants. @@ -85,23 +81,27 @@ extern int g_debuglevel; /* the variable is only declared, It's useful when enabling very verbose levels on selective conditions (such as position in src) */ -# define RAWLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - ZSTD_DEBUG_PRINT(__VA_ARGS__); \ - } } -# define DEBUGLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \ - ZSTD_DEBUG_PRINT(" \n"); \ - } } +# define RAWLOG(l, ...) \ + do { \ + if (l<=g_debuglevel) { \ + ZSTD_DEBUG_PRINT(__VA_ARGS__); \ + } \ + } while (0) + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) +#define LINE_AS_STRING TOSTRING(__LINE__) + +# define DEBUGLOG(l, ...) \ + do { \ + if (l<=g_debuglevel) { \ + ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \ + ZSTD_DEBUG_PRINT(" \n"); \ + } \ + } while (0) #else -# define RAWLOG(l, ...) {} /* disabled */ -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - - -#if defined (__cplusplus) -} +# define RAWLOG(l, ...) do { } while (0) /* disabled */ +# define DEBUGLOG(l, ...) do { } while (0) /* disabled */ #endif #endif /* DEBUG_H_12987983217 */ diff --git a/lib/common/entropy_common.c b/lib/common/entropy_common.c index 98bd4238d62..e2173afb0a8 100644 --- a/lib/common/entropy_common.c +++ b/lib/common/entropy_common.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Common functions of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,7 +19,6 @@ #include "error_private.h" /* ERR_*, ERROR */ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ #include "fse.h" -#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ #include "huf.h" #include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */ @@ -237,7 +236,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, const void* src, size_t srcSize) { U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; - return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0); + return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0); } FORCE_INLINE_TEMPLATE size_t @@ -329,13 +328,13 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, - int bmi2) + int flags) { #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } #endif - (void)bmi2; + (void)flags; return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } diff --git a/lib/common/error_private.c b/lib/common/error_private.c index 1b67500f3bb..7e7f24f6756 100644 --- a/lib/common/error_private.c +++ b/lib/common/error_private.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -29,7 +29,9 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; case PREFIX(corruption_detected): return "Data corruption detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; @@ -38,6 +40,7 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block"; case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; @@ -45,11 +48,15 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; + case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full"; + case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; + case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code"; + case PREFIX(externalSequences_invalid): return "External sequences are not valid"; case PREFIX(maxCode): default: return notErrorCode; } diff --git a/lib/common/error_private.h b/lib/common/error_private.h index 007d81066ab..9dcc8595123 100644 --- a/lib/common/error_private.h +++ b/lib/common/error_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,11 +13,6 @@ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - /* **************************************** * Dependencies ******************************************/ @@ -26,7 +21,6 @@ extern "C" { #include "debug.h" #include "zstd_deps.h" /* size_t */ - /* **************************************** * Compiler-specific ******************************************/ @@ -60,8 +54,13 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /* check and forward error code */ -#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e -#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } +#define CHECK_V_F(e, f) \ + size_t const e = f; \ + do { \ + if (ERR_isError(e)) \ + return e; \ + } while (0) +#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0) /*-**************************************** @@ -95,10 +94,12 @@ void _force_has_format_string(const char *format, ...) { * We want to force this function invocation to be syntactically correct, but * we don't want to force runtime evaluation of its arguments. */ -#define _FORCE_HAS_FORMAT_STRING(...) \ - if (0) { \ - _force_has_format_string(__VA_ARGS__); \ - } +#define _FORCE_HAS_FORMAT_STRING(...) \ + do { \ + if (0) { \ + _force_has_format_string(__VA_ARGS__); \ + } \ + } while (0) #define ERR_QUOTE(str) #str @@ -109,51 +110,49 @@ void _force_has_format_string(const char *format, ...) { * In order to do that (particularly, printing the conditional that failed), * this can't just wrap RETURN_ERROR(). */ -#define RETURN_ERROR_IF(cond, err, ...) \ - if (cond) { \ - RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ - __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } +#define RETURN_ERROR_IF(cond, err, ...) \ + do { \ + if (cond) { \ + RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } \ + } while (0) /** * Unconditionally return the specified error. * * In debug modes, prints additional information. */ -#define RETURN_ERROR(err, ...) \ - do { \ - RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ - __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } while(0); +#define RETURN_ERROR(err, ...) \ + do { \ + RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } while(0) /** * If the provided expression evaluates to an error code, returns that error code. * * In debug modes, prints additional information. */ -#define FORWARD_IF_ERROR(err, ...) \ - do { \ - size_t const err_code = (err); \ - if (ERR_isError(err_code)) { \ - RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ - __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return err_code; \ - } \ - } while(0); - -#if defined (__cplusplus) -} -#endif +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ + if (ERR_isError(err_code)) { \ + RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ + __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return err_code; \ + } \ + } while(0) #endif /* ERROR_H_MODULE */ diff --git a/lib/common/fse.h b/lib/common/fse.h index 466a072818a..b6c2a3e9ccc 100644 --- a/lib/common/fse.h +++ b/lib/common/fse.h @@ -1,7 +1,7 @@ /* ****************************************************************** * FSE : Finite State Entropy codec * Public Prototypes declaration - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -11,11 +11,6 @@ * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ - -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef FSE_H #define FSE_H @@ -25,7 +20,6 @@ extern "C" { ******************************************/ #include "zstd_deps.h" /* size_t, ptrdiff_t */ - /*-***************************************** * FSE_PUBLIC_API : control library symbols visibility ******************************************/ @@ -53,34 +47,6 @@ extern "C" { FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ -/*-**************************************** -* FSE simple functions -******************************************/ -/*! FSE_compress() : - Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. - 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). - @return : size of compressed data (<= dstCapacity). - Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. - if FSE_isError(return), compression failed (more details using FSE_getErrorName()) -*/ -FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/*! FSE_decompress(): - Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', - into already allocated destination buffer 'dst', of size 'dstCapacity'. - @return : size of regenerated data (<= maxDstSize), - or an error code, which can be tested using FSE_isError() . - - ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! - Why ? : making this distinction requires a header. - Header management is intentionally delegated to the user layer, which can better manage special cases. -*/ -FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, - const void* cSrc, size_t cSrcSize); - - /*-***************************************** * Tool functions ******************************************/ @@ -91,20 +57,6 @@ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ -/*-***************************************** -* FSE advanced functions -******************************************/ -/*! FSE_compress2() : - Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' - Both parameters can be defined as '0' to mean : use default value - @return : size of compressed data - Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. - if FSE_isError(return), it's an error code. -*/ -FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); - - /*-***************************************** * FSE detailed API ******************************************/ @@ -164,8 +116,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); -FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @@ -241,23 +191,7 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize, int bmi2); -/*! Constructor and Destructor of FSE_DTable. - Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); -FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); - -/*! FSE_buildDTable(): - Builds 'dt', which must be already allocated, using FSE_createDTable(). - return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_decompress_usingDTable(): - Decompress compressed source `cSrc` of size `cSrcSize` using `dt` - into `dst` which must be already allocated. - @return : size of regenerated data (necessarily <= `dstCapacity`), - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : @@ -289,13 +223,11 @@ If there is an error, the function will return an error code, which can be teste #endif /* FSE_H */ + #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY - -/* *** Dependency *** */ #include "bitstream.h" - /* ***************************************** * Static allocation *******************************************/ @@ -320,16 +252,6 @@ If there is an error, the function will return an error code, which can be teste unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ -/* FSE_compress_wksp() : - * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). - * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. - */ -#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) -size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); - -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ - size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ @@ -347,19 +269,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ - -size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ - #define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) #define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize); -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */ - size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); -/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */ +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`. + * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */ typedef enum { FSE_repeat_none, /**< Cannot use the previous table */ @@ -542,13 +456,13 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); - BIT_addBits(bitC, statePtr->value, nbBitsOut); + BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { - BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } @@ -708,10 +622,4 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) #define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) - #endif /* FSE_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif diff --git a/lib/common/fse_decompress.c b/lib/common/fse_decompress.c index 7034fd97b47..c8f1bb0cf23 100644 --- a/lib/common/fse_decompress.c +++ b/lib/common/fse_decompress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy decoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -22,8 +22,7 @@ #define FSE_STATIC_LINKING_ONLY #include "fse.h" #include "error_private.h" -#define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" +#include "zstd_deps.h" /* ZSTD_memcpy */ #include "bits.h" /* ZSTD_highbit32 */ @@ -56,19 +55,6 @@ #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - -/* Function templates */ -FSE_DTable* FSE_createDTable (unsigned tableLog) -{ - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); -} - -void FSE_freeDTable (FSE_DTable* dt) -{ - ZSTD_free(dt); -} - static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ @@ -97,7 +83,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; - symbolNext[s] = normalizedCounter[s]; + symbolNext[s] = (U16)normalizedCounter[s]; } } } ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); } @@ -112,8 +98,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo * all symbols have counts <= 8. We ensure we have 8 bytes at the end of * our buffer to handle the over-write. */ - { - U64 const add = 0x0101010101010101ull; + { U64 const add = 0x0101010101010101ull; size_t pos = 0; U64 sv = 0; U32 s; @@ -124,9 +109,8 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo for (i = 8; i < n; i += 8) { MEM_write64(spread + pos + i, sv); } - pos += n; - } - } + pos += (size_t)n; + } } /* Now we spread those positions across the table. * The benefit of doing it in two stages is that we avoid the * variable size inner loop, which caused lots of branch misses. @@ -185,49 +169,6 @@ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsi /*-******************************************************* * Decompression (Byte symbols) *********************************************************/ -size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - void* dPtr = dt + 1; - FSE_decode_t* const cell = (FSE_decode_t*)dPtr; - - DTableH->tableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - void* dPtr = dt + 1; - FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSV1 = tableMask+1; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; sfastMode; - - /* select fast mode (static) */ - if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) -{ - return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0); + assert(op >= ostart); + return (size_t)(op-ostart); } typedef struct { short ncount[FSE_MAX_SYMBOL_VALUE + 1]; - FSE_DTable dtable[1]; /* Dynamically sized */ } FSE_DecompressWksp; @@ -328,13 +251,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace; + size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable); + FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos; - DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0); + FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0); if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC); + /* correct offset to dtable depends on this property */ + FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0); + /* normal FSE decoding mode */ - { - size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); + { size_t const NCountLength = + FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); if (FSE_isError(NCountLength)) return NCountLength; if (tableLog > maxLog) return ERROR(tableLog_tooLarge); assert(NCountLength <= cSrcSize); @@ -347,16 +275,16 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); - CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); + CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); { - const void* ptr = wksp->dtable; + const void* ptr = dtable; const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ - if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1); - return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0); + if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); + return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); } } @@ -384,22 +312,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } - -typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { - U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)]; - return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp)); -} - -size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) -{ - /* Static analyzer seems unable to understand this table will be properly initialized later */ - U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp)); -} -#endif - - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/common/huf.h b/lib/common/huf.h index 85518481ec6..4b142c4f996 100644 --- a/lib/common/huf.h +++ b/lib/common/huf.h @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman codec, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -12,108 +12,26 @@ * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef HUF_H_298734234 #define HUF_H_298734234 /* *** Dependencies *** */ #include "zstd_deps.h" /* size_t */ - - -/* *** library symbols visibility *** */ -/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, - * HUF symbols remain "private" (internal symbols for library only). - * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define HUF_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ -#else -# define HUF_PUBLIC_API -#endif - - -/* ========================== */ -/* *** simple functions *** */ -/* ========================== */ - -/** HUF_compress() : - * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. - * 'dst' buffer must be already allocated. - * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). - * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. - * @return : size of compressed data (<= `dstCapacity`). - * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) - */ -HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/** HUF_decompress() : - * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', - * into already allocated buffer 'dst', of minimum size 'dstSize'. - * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. - * Note : in contrast with FSE, HUF_decompress can regenerate - * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, - * because it knows size to regenerate (originalSize). - * @return : size of regenerated data (== originalSize), - * or an error code, which can be tested using HUF_isError() - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); - +#include "mem.h" /* U32 */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ - - -/* *** Advanced function *** */ +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ -/** HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . - * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ -HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog); -/** HUF_compress4X_wksp() : - * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) -HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize); - -#endif /* HUF_H_298734234 */ - -/* ****************************************************************** - * WARNING !! - * The following section contains advanced and experimental definitions - * which shall never be used in the context of a dynamic library, - * because they are not guaranteed to remain stable in the future. - * Only consider them in association with static linking. - * *****************************************************************/ -#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) -#define HUF_H_HUF_STATIC_LINKING_ONLY - -/* *** Dependencies *** */ -#include "mem.h" /* U32 */ -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" - /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ @@ -154,25 +72,49 @@ typedef U32 HUF_DTable; /* **************************************** * Advanced decompression functions ******************************************/ -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -#endif -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +/** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ +typedef enum { + /** + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. + */ + HUF_flags_bmi2 = (1 << 0), + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = (1 << 1), + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = (1 << 2), + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = (1 << 3), + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = (1 << 4), + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = (1 << 5) +} HUF_flags_e; /* **************************************** * HUF detailed API * ****************************************/ +#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") @@ -185,12 +127,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +unsigned HUF_minTableLog(unsigned symbolCardinality); +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue); +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace, + size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); @@ -199,6 +141,7 @@ typedef enum { HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; + /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -209,13 +152,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, @@ -241,7 +184,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workspace, size_t wkspSize, - int bmi2); + int flags); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ @@ -249,9 +192,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void /** HUF_getNbBitsFromCTable() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : is not inlined, as HUF_CElt definition is private */ + * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 + * Note 2 : is not inlined, as HUF_CElt definition is private + */ U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); +typedef struct { + BYTE tableLog; + BYTE maxSymbolValue; + BYTE unused[sizeof(size_t) - 2]; +} HUF_CTableHeader; + +/** HUF_readCTableHeader() : + * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. + */ +HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable); + /* * HUF_decompress() does the following: * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics @@ -279,32 +235,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif - /* ====================== */ /* single stream variants */ /* ====================== */ -size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -315,50 +251,27 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ -#endif - -size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); -size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */ #endif /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif -#endif /* HUF_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif +#endif /* HUF_H_298734234 */ diff --git a/lib/common/mem.h b/lib/common/mem.h index 4b10f7c1f06..e66a2eaeb27 100644 --- a/lib/common/mem.h +++ b/lib/common/mem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,6 @@ #ifndef MEM_H_MODULE #define MEM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - /*-**************************************** * Dependencies ******************************************/ @@ -30,15 +26,8 @@ extern "C" { #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ -#endif -#if defined(__GNUC__) -# define MEM_STATIC static __inline __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#elif defined(__ICCARM__) +# include #endif /*-************************************************************** @@ -83,7 +72,6 @@ extern "C" { typedef signed long long S64; #endif - /*-************************************************************** * Memory I/O API *****************************************************************/ @@ -133,21 +121,15 @@ MEM_STATIC size_t MEM_swapST(size_t in); /*-************************************************************** * Memory I/O Implementation *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. +/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory: + * Method 0 : always use `memcpy()`. Safe and portable. + * Method 1 : Use compiler extension to set unaligned access. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) + * Default : method 1 if supported, else method 0 */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) +# ifdef __GNUC__ # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -165,10 +147,12 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return 1; #elif defined(__clang__) && __BIG_ENDIAN__ return 0; -#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86) +#elif defined(_MSC_VER) && (_M_X64 || _M_IX86) return 1; #elif defined(__DMC__) && defined(_M_IX86) return 1; +#elif defined(__IAR_SYSTEMS_ICC__) && __LITTLE_ENDIAN__ + return 1; #else const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; @@ -190,30 +174,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) - __pragma( pack(push, 1) ) - typedef struct { U16 v; } unalign16; - typedef struct { U32 v; } unalign32; - typedef struct { U64 v; } unalign64; - typedef struct { size_t v; } unalignArch; - __pragma( pack(pop) ) -#else - typedef struct { U16 v; } __attribute__((packed)) unalign16; - typedef struct { U32 v; } __attribute__((packed)) unalign32; - typedef struct { U64 v; } __attribute__((packed)) unalign64; - typedef struct { size_t v; } __attribute__((packed)) unalignArch; -#endif +typedef __attribute__((aligned(1))) U16 unalign16; +typedef __attribute__((aligned(1))) U32 unalign32; +typedef __attribute__((aligned(1))) U64 unalign64; +typedef __attribute__((aligned(1))) size_t unalignArch; -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } +MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; } -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; } #else @@ -272,6 +245,8 @@ MEM_STATIC U32 MEM_swap32(U32 in) #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); +#elif defined(__ICCARM__) + return __REV(in); #else return MEM_swap32_fallback(in); #endif @@ -444,9 +419,4 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) /* code only tested on 32 and 64 bits systems */ MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } - -#if defined (__cplusplus) -} -#endif - #endif /* MEM_H_MODULE */ diff --git a/lib/common/pool.c b/lib/common/pool.c index 5c1d07d356e..dd5fb0c4d4b 100644 --- a/lib/common/pool.c +++ b/lib/common/pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,9 +10,9 @@ /* ====== Dependencies ======= */ +#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */ #include "zstd_deps.h" /* size_t */ #include "debug.h" /* assert */ -#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "pool.h" /* ====== Compiler specifics ====== */ @@ -126,7 +126,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, * empty and full queues. */ ctx->queueSize = queueSize + 1; - ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; @@ -140,21 +140,19 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, } ctx->shutdown = 0; /* Allocate space for the thread handles */ - ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; + ctx->threadLimit = numThreads; ctx->customMem = customMem; /* Check for errors */ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } /* Initialize the threads */ - { size_t i; - for (i = 0; i < numThreads; ++i) { - if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = i; - POOL_free(ctx); - return NULL; - } } - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; + while (ctx->threadCapacity < numThreads) { + if (ZSTD_pthread_create(&ctx->threads[ctx->threadCapacity++], NULL, &POOL_thread, ctx)) { + --ctx->threadCapacity; + POOL_free(ctx); + return NULL; + } } return ctx; } @@ -173,7 +171,7 @@ static void POOL_join(POOL_ctx* ctx) { /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */ } } } @@ -220,23 +218,22 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) return 0; } /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); + ctx->threadLimit = numThreads; + { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; - /* replace existing thread pool */ - ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); + /* extend existing thread pool */ + ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(ZSTD_pthread_t)); ZSTD_customFree(ctx->threads, ctx->customMem); ctx->threads = threadPool; /* Initialize additional threads */ - { size_t threadId; - for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { - if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = threadId; - return 1; - } } - } } + while (ctx->threadCapacity < numThreads) { + if (ZSTD_pthread_create(&threadPool[ctx->threadCapacity++], NULL, &POOL_thread, ctx)) { + --ctx->threadCapacity; + return 1; + } + } + } /* successfully expanded */ - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; return 0; } @@ -271,7 +268,9 @@ static int isQueueFull(POOL_ctx const* ctx) { static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { - POOL_job const job = {function, opaque}; + POOL_job job; + job.function = function; + job.opaque = opaque; assert(ctx != NULL); if (ctx->shutdown) return; diff --git a/lib/common/pool.h b/lib/common/pool.h index b86a3452e5c..f39b7f1eb99 100644 --- a/lib/common/pool.h +++ b/lib/common/pool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,6 @@ #ifndef POOL_H #define POOL_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_deps.h" #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ @@ -47,7 +43,7 @@ void POOL_joinJobs(POOL_ctx* ctx); /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, - * since it tries to preserve and re-use existing threads. + * since it tries to preserve and reuse existing threads. * `numThreads` must be at least 1. * @return : 0 when resize was successful, * !0 (typically 1) if there is an error. @@ -82,9 +78,4 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); */ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); - -#if defined (__cplusplus) -} -#endif - #endif diff --git a/lib/common/portability_macros.h b/lib/common/portability_macros.h index 1650fa3d8cf..bcca634e419 100644 --- a/lib/common/portability_macros.h +++ b/lib/common/portability_macros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -68,30 +68,45 @@ /* Mark the internal assembly functions as hidden */ #ifdef __ELF__ # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func +#elif defined(__APPLE__) +# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func #else # define ZSTD_HIDE_ASM_FUNCTION(func) #endif +/* Compile time determination of BMI2 support */ +#ifndef STATIC_BMI2 +# if defined(__BMI2__) +# define STATIC_BMI2 1 +# elif defined(_MSC_VER) && defined(__AVX2__) +# define STATIC_BMI2 1 /* MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 */ +# endif +#endif + +#ifndef STATIC_BMI2 +# define STATIC_BMI2 0 +#endif + /* Enable runtime BMI2 dispatch based on the CPU. * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ #ifndef DYNAMIC_BMI2 - #if ((defined(__clang__) && __has_attribute(__target__)) \ +# if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ - && (defined(__x86_64__) || defined(_M_X64)) \ + && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \ && !defined(__BMI2__) - # define DYNAMIC_BMI2 1 - #else - # define DYNAMIC_BMI2 0 - #endif +# define DYNAMIC_BMI2 1 +# else +# define DYNAMIC_BMI2 0 +# endif #endif /** - * Only enable assembly for GNUC compatible compilers, + * Only enable assembly for GNU C compatible compilers, * because other platforms may not support GAS assembly syntax. * - * Only enable assembly for Linux / MacOS, other platforms may + * Only enable assembly for Linux / MacOS / Win32, other platforms may * work, but they haven't been tested. This could likely be * extended to BSD systems. * @@ -99,7 +114,7 @@ * 100% of code to be instrumented to work. */ #if defined(__GNUC__) -# if defined(__linux__) || defined(__linux) || defined(__APPLE__) +# if defined(__linux__) || defined(__linux) || defined(__APPLE__) || defined(_WIN32) # if ZSTD_MEMORY_SANITIZER # define ZSTD_ASM_SUPPORTED 0 # elif ZSTD_DATAFLOW_SANITIZER @@ -137,12 +152,39 @@ /* * For x86 ELF targets, add .note.gnu.property section for Intel CET in * assembly sources when CET is enabled. + * + * Additionally, any function that may be called indirectly must begin + * with ZSTD_CET_ENDBRANCH. */ #if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \ && defined(__has_include) # if __has_include() # include +# define ZSTD_CET_ENDBRANCH _CET_ENDBR # endif #endif +#ifndef ZSTD_CET_ENDBRANCH +# define ZSTD_CET_ENDBRANCH +#endif + +/** + * ZSTD_IS_DETERMINISTIC_BUILD must be set to 0 if any compilation macro is + * active that impacts the compressed output. + * + * NOTE: ZSTD_MULTITHREAD is allowed to be set or unset. + */ +#if defined(ZSTD_CLEVEL_DEFAULT) \ + || defined(ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) +# define ZSTD_IS_DETERMINISTIC_BUILD 0 +#else +# define ZSTD_IS_DETERMINISTIC_BUILD 1 +#endif + #endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/lib/common/threading.c b/lib/common/threading.c index 4f69f766286..1d5c97d3cea 100644 --- a/lib/common/threading.c +++ b/lib/common/threading.c @@ -23,8 +23,7 @@ int g_ZSTD_threading_useless_symbol; #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ @@ -35,39 +34,94 @@ int g_ZSTD_threading_useless_symbol; /* === Implementation === */ +typedef struct { + void* (*start_routine)(void*); + void* arg; + int initialized; + ZSTD_pthread_cond_t initialized_cond; + ZSTD_pthread_mutex_t initialized_mutex; +} ZSTD_thread_params_t; + static unsigned __stdcall worker(void *arg) { - ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; - thread->arg = thread->start_routine(thread->arg); + void* (*start_routine)(void*); + void* thread_arg; + + /* Initialized thread_arg and start_routine and signal main thread that we don't need it + * to wait any longer. + */ + { + ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg; + thread_arg = thread_param->arg; + start_routine = thread_param->start_routine; + + /* Signal main thread that we are running and do not depend on its memory anymore */ + ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex); + thread_param->initialized = 1; + ZSTD_pthread_cond_signal(&thread_param->initialized_cond); + ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex); + } + + start_routine(thread_arg); + return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { + ZSTD_thread_params_t thread_param; (void)unused; - thread->arg = arg; - thread->start_routine = start_routine; - thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); - if (!thread->handle) + if (thread==NULL) return -1; + *thread = NULL; + + thread_param.start_routine = start_routine; + thread_param.arg = arg; + thread_param.initialized = 0; + + /* Setup thread initialization synchronization */ + if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) { + /* Should never happen on Windows */ + return -1; + } + if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) { + /* Should never happen on Windows */ + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + return -1; + } + + /* Spawn thread */ + *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL); + if (*thread==NULL) { + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); return errno; - else - return 0; + } + + /* Wait for thread to be initialized */ + ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex); + while(!thread_param.initialized) { + ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex); + } + ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex); + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + + return 0; } -int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +int ZSTD_pthread_join(ZSTD_pthread_t thread) { DWORD result; - if (!thread.handle) return 0; + if (!thread) return 0; - result = WaitForSingleObject(thread.handle, INFINITE); - CloseHandle(thread.handle); + result = WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); switch (result) { case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; @@ -85,14 +139,23 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) { + assert(mutex != NULL); *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t)); if (!*mutex) return 1; - return pthread_mutex_init(*mutex, attr); + { + int const ret = pthread_mutex_init(*mutex, attr); + if (ret != 0) { + ZSTD_free(*mutex); + *mutex = NULL; + } + return ret; + } } int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) { + assert(mutex != NULL); if (!*mutex) return 0; { @@ -104,14 +167,23 @@ int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) { + assert(cond != NULL); *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t)); if (!*cond) return 1; - return pthread_cond_init(*cond, attr); + { + int const ret = pthread_cond_init(*cond, attr); + if (ret != 0) { + ZSTD_free(*cond); + *cond = NULL; + } + return ret; + } } int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) { + assert(cond != NULL); if (!*cond) return 0; { diff --git a/lib/common/threading.h b/lib/common/threading.h index fd0060d5aa2..e123cdf14a3 100644 --- a/lib/common/threading.h +++ b/lib/common/threading.h @@ -16,15 +16,10 @@ #include "debug.h" -#if defined (__cplusplus) -extern "C" { -#endif - #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ #ifdef WINVER # undef WINVER @@ -62,22 +57,17 @@ extern "C" { #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef struct { - HANDLE handle; - void* (*start_routine)(void*); - void* arg; -} ZSTD_pthread_t; +typedef HANDLE ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); -int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); +int ZSTD_pthread_join(ZSTD_pthread_t thread); /** * add here more wrappers as required */ - #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ /* === POSIX Systems === */ # include @@ -99,7 +89,7 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #else /* DEBUGLEVEL >= 1 */ @@ -124,7 +114,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #endif @@ -148,8 +138,5 @@ typedef int ZSTD_pthread_cond_t; #endif /* ZSTD_MULTITHREAD */ -#if defined (__cplusplus) -} -#endif #endif /* THREADING_H_938743 */ diff --git a/lib/common/xxhash.c b/lib/common/xxhash.c index d49497cf1cf..052cd522824 100644 --- a/lib/common/xxhash.c +++ b/lib/common/xxhash.c @@ -1,24 +1,18 @@ /* - * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. - * - * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com - * - xxHash source repository : https://github.com/Cyan4973/xxHash + * xxHash - Extremely Fast Hash algorithm + * Copyright (c) Yann Collet - Meta Platforms, Inc * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. -*/ - - + */ /* * xxhash.c instantiates functions defined in xxhash.h */ -#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ -#define XXH_IMPLEMENTATION /* access definitions */ +#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ +#define XXH_IMPLEMENTATION /* access definitions */ #include "xxhash.h" diff --git a/lib/common/xxhash.h b/lib/common/xxhash.h index f87102a1d49..b6af402fdf4 100644 --- a/lib/common/xxhash.h +++ b/lib/common/xxhash.h @@ -1,17 +1,15 @@ /* - * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. - * - * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com - * - xxHash source repository : https://github.com/Cyan4973/xxHash + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (c) Yann Collet - Meta Platforms, Inc * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. -*/ + */ +/* Local adaptations for Zstandard */ #ifndef XXH_NO_XXH3 # define XXH_NO_XXH3 @@ -24,70 +22,289 @@ /*! * @mainpage xxHash * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * + * @anchor canonical_representation_example + * **Canonical Representation** + * + * The default return values from XXH functions are unsigned 32, 64 and 128 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + * + * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(), + * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(), + * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(), + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which prints XXH32_hash_t in human readable format + * void printXxh32(XXH32_hash_t hash) + * { + * XXH32_canonical_t cano; + * XXH32_canonicalFromHash(&cano, hash); + * size_t i; + * for(i = 0; i < sizeof(cano.digest); ++i) { + * printf("%02x", cano.digest[i]); + * } + * printf("\n"); + * } + * + * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t + * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano) + * { + * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano); + * return hash; + * } + * @endcode + * + * * @file xxhash.h * xxHash prototypes and implementation */ -/* TODO: update */ -/* Notice extracted from xxHash homepage: - -xxHash is an extremely fast hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MurmurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -Note: SMHasher's CRC32 implementation is not the fastest one. -Other speed-oriented implementations can be faster, -especially in combination with PCLMUL instruction: -https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 - -A 64-bit version, named XXH64, is available since r35. -It offers much better speed, but for 64-bit applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ - -#if defined (__cplusplus) -extern "C" { -#endif /* **************************** * INLINE mode ******************************/ /*! - * XXH_INLINE_ALL (and XXH_PRIVATE_API) + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ + +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * * Use these build macros to inline xxhash into the target unit. * Inlining improves performance on small inputs, especially when the length is * expressed as a compile-time constant: * - * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html * * It also keeps xxHash symbols private to the unit, so they are not exported. * * Usage: + * @code{.c} * #define XXH_INLINE_ALL * #include "xxhash.h" - * + * @endcode * Do not compile and link xxhash.o as a separate object, as it is not useful. */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ && !defined(XXH_INLINE_ALL_31684351384) /* this section should be traversed only once */ @@ -202,21 +419,13 @@ extern "C" { # undef XXHASH_H_STATIC_13879238742 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ - - /* **************************************************************** * Stable API *****************************************************************/ #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 - -/*! - * @defgroup public Public API - * Contains details on the public xxHash functions. - * @{ - */ -/* specific declaration modes for Windows */ +/*! @brief Marks a global symbol. */ #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) # ifdef XXH_EXPORT @@ -229,24 +438,6 @@ extern "C" { # endif #endif -#ifdef XXH_DOXYGEN -/*! - * @brief Emulate a namespace by transparently prefixing all symbols. - * - * If you want to include _and expose_ xxHash functions from within your own - * library, but also want to avoid symbol collisions with other libraries which - * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix - * any public symbol from xxhash library with the value of XXH_NAMESPACE - * (therefore, avoid empty or numeric values). - * - * Note that no change is required within the calling program as long as it - * includes `xxhash.h`: Regular symbol names will be automatically translated - * by this header. - */ -# define XXH_NAMESPACE /* YOUR NAME HERE */ -# undef XXH_NAMESPACE -#endif - #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) @@ -306,30 +497,70 @@ extern "C" { #endif +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif + /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 8 -#define XXH_VERSION_RELEASE 1 +#define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +#if defined (__cplusplus) +extern "C" { +#endif /*! * @brief Obtains the xxHash version. * * This is mostly useful when xxHash is compiled as a shared library, * since the returned value comes from the library, as opposed to header file. * - * @return `XXH_VERSION_NUMBER` of the invoked library. + * @return @ref XXH_VERSION_NUMBER of the invoked library. */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void); +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); +#if defined (__cplusplus) +} +#endif /* **************************** * Common basic types ******************************/ #include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; /*-********************************************************************** @@ -346,44 +577,48 @@ typedef uint32_t XXH32_hash_t; #elif !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# ifdef _AIX +# include +# else +# include +# endif typedef uint32_t XXH32_hash_t; #else # include # if UINT_MAX == 0xFFFFFFFFUL typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; # else -# if ULONG_MAX == 0xFFFFFFFFUL - typedef unsigned long XXH32_hash_t; -# else -# error "unsupported platform: need a 32-bit type" -# endif +# error "unsupported platform: need a 32-bit type" # endif #endif +#if defined (__cplusplus) +extern "C" { +#endif + /*! * @} * - * @defgroup xxh32_family XXH32 family + * @defgroup XXH32_family XXH32 family * @ingroup public * Contains functions used in the classic 32-bit xxHash algorithm. * * @note * XXH32 is useful for older platforms, with no or poor 64-bit performance. - * Note that @ref xxh3_family provides competitive speed - * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. * - * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families - * @see @ref xxh32_impl for implementation details + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details * @{ */ /*! * @brief Calculates the 32-bit hash of @p input using xxHash32. * - * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s - * * @param input The block of data to be hashed, at least @p length bytes in size. * @param length The length of @p input, in bytes. * @param seed The 32-bit seed to alter the hash's output predictably. @@ -393,66 +628,13 @@ typedef uint32_t XXH32_hash_t; * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return The calculated 32-bit hash value. - * - * @see - * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); - -/*! - * Streaming functions generate the xxHash value from an incremental input. - * This method is slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. - * - * An XXH state must first be allocated using `XXH*_createState()`. - * - * Start a new hash by initializing the state with a seed using `XXH*_reset()`. - * - * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. - * - * The function returns an error code, with 0 meaning OK, and any other value - * meaning there is an error. - * - * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. - * This function returns the nn-bits hash as an int or long long. - * - * It's still possible to continue inserting input into the hash state after a - * digest, and generate new hash values later on by invoking `XXH*_digest()`. - * - * When done, release the state using `XXH*_freeState()`. - * - * Example code for incrementally hashing a file: - * @code{.c} - * #include - * #include - * #define BUFFER_SIZE 256 + * @return The calculated 32-bit xxHash32 value. * - * // Note: XXH64 and XXH3 use the same interface. - * XXH32_hash_t - * hashFile(FILE* stream) - * { - * XXH32_state_t* state; - * unsigned char buf[BUFFER_SIZE]; - * size_t amt; - * XXH32_hash_t hash; - * - * state = XXH32_createState(); // Create a state - * assert(state != NULL); // Error check here - * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed - * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) { - * XXH32_update(state, buf, amt); // Hash the file in chunks - * } - * hash = XXH32_digest(state); // Finalize the hash - * XXH32_freeState(state); // Clean up - * return hash; - * } - * @endcode + * @see @ref single_shot_example "Single Shot Example" for an example. */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); +#ifndef XXH_NO_STREAM /*! * @typedef struct XXH32_state_s XXH32_state_t * @brief The opaque state struct for the XXH32 streaming API. @@ -464,16 +646,21 @@ typedef struct XXH32_state_s XXH32_state_t; /*! * @brief Allocates an @ref XXH32_state_t. * - * Must be freed with XXH32_freeState(). - * @return An allocated XXH32_state_t on success, `NULL` on failure. + * @return An allocated pointer of @ref XXH32_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH32_freeState(). */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); /*! * @brief Frees an @ref XXH32_state_t. * - * Must be allocated with XXH32_createState(). * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). - * @return XXH_OK. + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH32_createState(). + * */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); /*! @@ -489,23 +676,22 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_ /*! * @brief Resets an @ref XXH32_state_t to begin a new hash. * - * This function resets and seeds a state. Call it before @ref XXH32_update(). - * * @param statePtr The state struct to reset. * @param seed The 32-bit seed to alter the hash result predictably. * * @pre * @p statePtr must not be `NULL`. * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref XXH32_update(). */ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); /*! * @brief Consumes a block of @p input to an @ref XXH32_state_t. * - * Call this to incrementally consume blocks of data. - * * @param statePtr The state struct to update. * @param input The block of data to be hashed, at least @p length bytes in size. * @param length The length of @p input, in bytes. @@ -517,47 +703,32 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. */ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); /*! * @brief Returns the calculated hash value from an @ref XXH32_state_t. * - * @note - * Calling XXH32_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * * @param statePtr The state struct to calculate the hash from. * * @pre * @p statePtr must not be `NULL`. * - * @return The calculated xxHash32 value from that state. + * @return The calculated 32-bit xxHash32 value from that state. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. */ -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ -/* - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * This the simplest and fastest format for further post-processing. - * - * However, this leaves open the question of what is the order on the byte level, - * since little and big endian conventions will store the same number differently. - * - * The canonical representation settles this issue by mandating big-endian - * convention, the same convention as human-readable numbers (large digits first). - * - * When writing hash values to storage, sending them over a network, or printing - * them, it's highly recommended to use the canonical representation to ensure - * portability across a wider range of systems, present and future. - * - * The following functions allow transformation of hash values to and from - * canonical format. - */ - /*! * @brief Canonical (big endian) representation of @ref XXH32_hash_t. */ @@ -568,11 +739,13 @@ typedef struct { /*! * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. * - * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param dst The @ref XXH32_canonical_t pointer to be stored to. * @param hash The @ref XXH32_hash_t to be converted. * * @pre * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); @@ -585,49 +758,83 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t * @p src must not be `NULL`. * * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +/*! @cond Doxygen ignores this part */ #ifdef __has_attribute # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) #else # define XXH_HAS_ATTRIBUTE(x) 0 #endif +/*! @endcond */ +/*! @cond Doxygen ignores this part */ +/* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ /* C-language Attributes are added in C23. */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) #else # define XXH_HAS_C_ATTRIBUTE(x) 0 #endif +/*! @endcond */ +/*! @cond Doxygen ignores this part */ #if defined(__cplusplus) && defined(__has_cpp_attribute) # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else # define XXH_HAS_CPP_ATTRIBUTE(x) 0 #endif +/*! @endcond */ +/*! @cond Doxygen ignores this part */ /* -Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute -introduced in CPP17 and C23. -CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough -C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough -*/ -#if XXH_HAS_C_ATTRIBUTE(x) -# define XXH_FALLTHROUGH [[fallthrough]] -#elif XXH_HAS_CPP_ATTRIBUTE(x) + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) # define XXH_FALLTHROUGH [[fallthrough]] #elif XXH_HAS_ATTRIBUTE(__fallthrough__) -# define XXH_FALLTHROUGH __attribute__ ((fallthrough)) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) #else -# define XXH_FALLTHROUGH +# define XXH_FALLTHROUGH /* fallthrough */ #endif +/*! @endcond */ -/*! - * @} - * @ingroup public - * @{ +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ + +#if defined (__cplusplus) +} /* end of extern "C" */ +#endif + +/*! + * @} + * @ingroup public + * @{ */ #ifndef XXH_NO_LONG_LONG @@ -644,7 +851,11 @@ typedef uint64_t XXH64_hash_t; #elif !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# ifdef _AIX +# include +# else +# include +# endif typedef uint64_t XXH64_hash_t; #else # include @@ -657,10 +868,13 @@ typedef uint64_t XXH64_hash_t; # endif #endif +#if defined (__cplusplus) +extern "C" { +#endif /*! * @} * - * @defgroup xxh64_family XXH64 family + * @defgroup XXH64_family XXH64 family * @ingroup public * @{ * Contains functions used in the classic 64-bit xxHash algorithm. @@ -671,13 +885,9 @@ typedef uint64_t XXH64_hash_t; * It provides better speed for systems with vector processing capabilities. */ - /*! * @brief Calculates the 64-bit hash of @p input using xxHash64. * - * This function usually runs faster on 64-bit systems, but slower on 32-bit - * systems (see benchmark). - * * @param input The block of data to be hashed, at least @p length bytes in size. * @param length The length of @p input, in bytes. * @param seed The 64-bit seed to alter the hash's output predictably. @@ -687,41 +897,145 @@ typedef uint64_t XXH64_hash_t; * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return The calculated 64-bit hash. + * @return The calculated 64-bit xxHash64 value. * - * @see - * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); /******* Streaming *******/ +#ifndef XXH_NO_STREAM /*! * @brief The opaque state struct for the XXH64 streaming API. * * @see XXH64_state_s for details. */ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * @return An allocated pointer of @ref XXH64_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH64_freeState(). + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH64_createState(). + */ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref XXH64_update(). + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated 64-bit xxHash64 value from that state. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); #ifndef XXH_NO_XXH3 + /*! * @} * ************************************************************************ - * @defgroup xxh3_family XXH3 family + * @defgroup XXH3_family XXH3 family * @ingroup public * @{ * @@ -741,16 +1055,26 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, * but does not require it. - * Any 32-bit and 64-bit targets that can run XXH32 smoothly - * can run XXH3 at competitive speeds, even without vector support. - * Further details are explained in the implementation. - * - * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, - * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. * * XXH3 implementation is portable: * it has a generic C90 formulation that can be compiled on any platform, - * all implementations generage exactly the same hash value on all platforms. + * all implementations generate exactly the same hash value on all platforms. * Starting from v0.8.0, it's also labelled "stable", meaning that * any future version will also generate the same hash value. * @@ -762,24 +1086,59 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * * The API supports one-shot hashing, streaming mode, and custom secrets. */ - /*-********************************************************************** * XXH3 64-bit variant ************************************************************************/ -/* XXH3_64bits(): - * default 64-bit variant, using default secret and default seed of 0. - * It's the fastest variant. */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); +/*! + * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); -/* - * XXH3_64bits_withSeed(): - * This variant generates a custom secret on the fly - * based on default secret altered using the `seed` value. +/*! + * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * * While this operation is decently fast, note that it's not completely free. - * Note: seed==0 produces the same results as XXH3_64bits(). + * + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); /*! * The bare minimum size for a custom secret. @@ -790,27 +1149,43 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, X */ #define XXH3_SECRET_SIZE_MIN 136 -/* - * XXH3_64bits_withSecret(): +/*! + * @brief Calculates 64-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * * It's possible to provide any blob of bytes as a "secret" to generate the hash. * This makes it more difficult for an external actor to prepare an intentional collision. - * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). * However, the quality of the secret impacts the dispersion of the hash algorithm. * Therefore, the secret _must_ look like a bunch of random bytes. * Avoid "trivial" or structured data such as repeated sequences or a text document. * Whenever in doubt about the "randomness" of the blob of bytes, - * consider employing "XXH3_generateSecret()" instead (see below). + * consider employing @ref XXH3_generateSecret() instead (see below). * It will generate a proper high entropy secret derived from the blob of bytes. * Another advantage of using XXH3_generateSecret() is that * it guarantees that all bits within the initial blob of bytes * will impact every bit of the output. * This is not necessarily the case when using the blob of bytes directly * because, when hashing _small_ inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); /******* Streaming *******/ +#ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -819,40 +1194,124 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, */ /*! - * @brief The state struct for the XXH3 streaming API. + * @brief The opaque state struct for the XXH3 streaming API. * * @see XXH3_state_s for details. */ typedef struct XXH3_state_s XXH3_state_t; -XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); -XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state); -/* - * XXH3_64bits_reset(): - * Initialize with default parameters. - * digest will be equivalent to `XXH3_64bits()`. +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr); -/* - * XXH3_64bits_reset_withSeed(): - * Generate a custom secret from `seed`, and store it into `statePtr`. - * digest will be equivalent to `XXH3_64bits_withSeed()`. +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default parameters. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits()`. + * */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); -/* - * XXH3_64bits_reset_withSecret(): - * `secret` is referenced, it _must outlive_ the hash streaming session. - * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * `secret` is referenced, it _must outlive_ the hash streaming session. + * + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, * and the quality of produced hash values depends on secret's entropy * (secret's content should look like a bunch of random bytes). * When in doubt about the randomness of a candidate `secret`, * consider employing `XXH3_generateSecret()` instead (see below). */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ /* note : canonical representation of XXH3 is the same as XXH64 * since they both produce XXH64_hash_t values */ @@ -873,11 +1332,76 @@ typedef struct { XXH64_hash_t high64; /*!< `value >> 64` */ } XXH128_hash_t; -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); +/*! + * @brief Calculates 128-bit unseeded variant of XXH3 of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! + * @brief Calculates 128-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing @ref XXH3_generateSecret() instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); /******* Streaming *******/ +#ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -890,42 +1414,171 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t le * All reset and streaming functions have same meaning as their 64-bit counterpart. */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr); -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default parameters. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr); +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits_withSeed()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ /* Following helper functions make it possible to compare XXH128_hast_t values. * Since XXH128_hash_t is a structure, this capability is not offered by the language. * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ /*! - * XXH128_isEqual(): - * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + * @brief Check equality of two XXH128_hash_t values + * + * @param h1 The 128-bit hash value. + * @param h2 Another 128-bit hash value. + * + * @return `1` if `h1` and `h2` are equal. + * @return `0` if they are not. */ -XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); /*! - * XXH128_cmp(): + * @brief Compares two @ref XXH128_hash_t * * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. * - * return: >0 if *h128_1 > *h128_2 - * =0 if *h128_1 == *h128_2 - * <0 if *h128_1 < *h128_2 + * @param h128_1 Left-hand side value + * @param h128_2 Right-hand side value + * + * @return >0 if @p h128_1 > @p h128_2 + * @return =0 if @p h128_1 == @p h128_2 + * @return <0 if @p h128_1 < @p h128_2 */ -XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); /******* Canonical representation *******/ typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; -XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); -XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); #endif /* !XXH_NO_XXH3 */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + #endif /* XXH_NO_LONG_LONG */ /*! @@ -996,7 +1649,6 @@ struct XXH64_state_s { XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ }; /* typedef'd to XXH64_state_t */ - #ifndef XXH_NO_XXH3 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ @@ -1032,6 +1684,7 @@ struct XXH64_state_s { #define XXH3_INTERNALBUFFER_SIZE 256 /*! + * @internal * @brief Default size of the secret buffer (and @ref XXH3_kSecret). * * This is the size used in @ref XXH3_kSecret and the seeded functions. @@ -1064,7 +1717,7 @@ struct XXH64_state_s { */ struct XXH3_state_s { XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); - /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */ + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); /*!< Used to store a custom secret generated from a seed. */ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); @@ -1104,69 +1757,152 @@ struct XXH3_state_s { * Note that this doesn't prepare the state for a streaming operation, * it's still necessary to use XXH3_NNbits_reset*() afterwards. */ -#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; } +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) -/* XXH128() : - * simple alias to pre-selected XXH3_128bits variant +#if defined (__cplusplus) +extern "C" { +#endif + +/*! + * @brief Calculates the 128-bit hash of @p data using XXH3. + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p len is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 128-bit XXH3 value. + * + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); /* === Experimental API === */ /* Symbols defined below must be considered tied to a specific library version. */ -/* - * XXH3_generateSecret(): +/*! + * @brief Derive a high-entropy secret from any user-defined content, named customSeed. + * + * @param secretBuffer A writable buffer for derived high-entropy secret data. + * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_DEFAULT_SIZE. + * @param customSeed A user-defined content. + * @param customSeedSize Size of customSeed, in bytes. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. * - * Derive a high-entropy secret from any user-defined content, named customSeed. * The generated secret can be used in combination with `*_withSecret()` functions. - * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed, - * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. * * The function accepts as input a custom seed of any length and any content, - * and derives from it a high-entropy secret of length @secretSize - * into an already allocated buffer @secretBuffer. - * @secretSize must be >= XXH3_SECRET_SIZE_MIN + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. * * The generated secret can then be used with any `*_withSecret()` variant. - * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, - * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() * are part of this list. They all accept a `secret` parameter - * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN) + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) * _and_ feature very high entropy (consist of random-looking bytes). - * These conditions can be a high bar to meet, so - * XXH3_generateSecret() can be employed to ensure proper quality. + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. * - * customSeed can be anything. It can have any size, even small ones, - * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes. - * The resulting `secret` will nonetheless provide all required qualities. + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. * - * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode */ -XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize); +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); - -/* - * XXH3_generateSecret_fromSeed(): - * - * Generate the same secret as the _withSeed() variants. +/*! + * @brief Generate the same secret as the _withSeed() variants. * - * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily). - * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes. + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The 64-bit seed to alter the hash result predictably. * * The generated secret can be used in combination with *`*_withSecret()` and `_withSecretandSeed()` variants. - * This generator is notably useful in combination with `_withSecretandSeed()`, - * as a way to emulate a faster `_withSeed()` variant. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode */ -XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed); +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); -/* - * *_withSecretandSeed() : +/*! + * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * * These variants generate hash values using either - * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) - * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX). * * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. * `_withSeed()` has to generate the secret on the fly for "large" keys. @@ -1175,7 +1911,7 @@ XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_ * which requires more instructions than _withSeed() variants. * Therefore, _withSecretandSeed variant combines the best of both worlds. * - * When @secret has been generated by XXH3_generateSecret_fromSeed(), + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), * this variant produces *exactly* the same results as `_withSeed()` variant, * hence offering only a pure speed benefit on "large" input, * by skipping the need to regenerate the secret for every large input. @@ -1184,34 +1920,77 @@ XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_ * for example with XXH3_64bits(), which then becomes the seed, * and then employ both the seed and the secret in _withSecretandSeed(). * On top of speed, an added benefit is that each bit in the secret - * has a 50% chance to swap each bit in the output, - * via its impact to the seed. + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * * This is not guaranteed when using the secret directly in "small data" scenarios, * because only portions of the secret are employed for small data. */ -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecretandSeed(const void* data, size_t len, - const void* secret, size_t secretSize, +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed); - -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecretandSeed(const void* data, size_t len, - const void* secret, size_t secretSize, +/*! + * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param input The block of data to be hashed, at least @p len bytes in size. + * @param length The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64); - +#ifndef XXH_NO_STREAM +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, - const void* secret, size_t secretSize, +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64); - +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, - const void* secret, size_t secretSize, +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ +#if defined (__cplusplus) +} /* extern "C" */ +#endif -#endif /* XXH_NO_XXH3 */ +#endif /* !XXH_NO_XXH3 */ #endif /* XXH_NO_LONG_LONG */ + #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) # define XXH_IMPLEMENTATION #endif @@ -1264,7 +2043,7 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, /*! * @brief Define this to disable 64-bit code. * - * Useful if only using the @ref xxh32_family and you have a strict C90 compiler. + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. */ # define XXH_NO_LONG_LONG # undef XXH_NO_LONG_LONG /* don't actually */ @@ -1287,7 +2066,7 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * Use `memcpy()`. Safe and portable. Note that most modern compilers will * eliminate the function call and treat it as an unaligned access. * - * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))` + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` * @par * Depends on compiler extensions and is therefore not portable. * This method is safe _if_ your compiler supports it, @@ -1307,19 +2086,47 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * inline small `memcpy()` calls, and it might also be faster on big-endian * systems which lack a native byteswap instruction. However, some compilers * will emit literal byteshifts even if the target supports unaligned access. - * . + * * * @warning * Methods 1 and 2 rely on implementation-defined behavior. Use these with * care, as what works on one compiler/platform/optimization level may cause * another to read garbage data or even crash. * - * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. * * Prefer these methods in priority order (0 > 3 > 1 > 2) */ # define XXH_FORCE_MEMORY_ACCESS 0 +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + /*! * @def XXH_FORCE_ALIGN_CHECK * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() @@ -1341,9 +2148,11 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * * In these cases, the alignment check can be removed by setting this macro to 0. * Then the code will always use unaligned memory access. - * Align check is automatically disabled on x86, x64 & arm64, + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips * which are platforms known to offer good unaligned memory accesses performance. * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * * This option does not affect XXH3 (only XXH32 and XXH64). */ # define XXH_FORCE_ALIGN_CHECK 0 @@ -1365,11 +2174,28 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the * compiler full control on whether to inline or not. * - * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using - * -fno-inline with GCC or Clang, this will automatically be defined. + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. */ # define XXH_NO_INLINE_HINTS 0 +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + /*! * @def XXH32_ENDJMP * @brief Whether to use a jump for `XXH32_finalize`. @@ -1391,34 +2217,45 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, */ # define XXH_OLD_NAMES # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ #endif /* XXH_DOXYGEN */ /*! * @} */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ - /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */ -# if !defined(__clang__) && \ -( \ - (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - ( \ - defined(__GNUC__) && ( \ - (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \ - ( \ - defined(__mips__) && \ - (__mips <= 5 || __mips_isa_rev < 6) && \ - (!defined(__mips16) || defined(__mips_mips16e2)) \ - ) \ - ) \ - ) \ -) + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ - || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 @@ -1426,14 +2263,22 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, #endif #ifndef XXH_NO_INLINE_HINTS -# if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ - || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ # define XXH_NO_INLINE_HINTS 1 # else # define XXH_NO_INLINE_HINTS 0 # endif #endif +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + #ifndef XXH32_ENDJMP /* generally preferable for performance */ # define XXH32_ENDJMP 0 @@ -1444,18 +2289,80 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, * @{ */ - /* ************************************* * Includes & Memory related functions ***************************************/ -/* Modify the local functions below should you wish to use some other memory routines */ -/* for ZSTD_malloc(), ZSTD_free() */ -#define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */ -static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); } -static void XXH_free (void* p) { ZSTD_free(p); } -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); } +#include /* memcmp, memcpy */ +#include /* ULLONG_MAX */ + +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* XXH_NO_STDLIB */ + +#if defined (__cplusplus) +extern "C" { +#endif +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} +#if defined (__cplusplus) +} /* extern "C" */ +#endif /* ************************************* * Compiler Specific Options @@ -1487,6 +2394,11 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ # define XXH_NO_INLINE static #endif +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif /* ************************************* @@ -1512,14 +2424,17 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ # include /* note: can still be disabled with NDEBUG */ # define XXH_ASSERT(c) assert(c) #else -# define XXH_ASSERT(c) ((void)0) +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif #endif /* note: use after variable declarations */ #ifndef XXH_STATIC_ASSERT # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ -# include -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) # else @@ -1534,7 +2449,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ * @brief Used to prevent unwanted optimizations for @p var. * * It uses an empty GCC inline assembly statement with a register constraint - * which forces @p var into a general purpose register (e.g. eax, ebx, ecx + * which forces @p var into a general purpose register (eg eax, ebx, ecx * on x86) and marks it as modified. * * This is used in a few places to avoid unwanted autovectorization (e.g. @@ -1545,18 +2460,30 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ * XXH3_initCustomSecret_scalar(). */ #if defined(__GNUC__) || defined(__clang__) -# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var)) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) #else # define XXH_COMPILER_GUARD(var) ((void)0) #endif +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + /* ************************************* * Basic Types ***************************************/ #if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include +# ifdef _AIX +# include +# else +# include +# endif typedef uint8_t xxh_u8; #else typedef unsigned char xxh_u8; @@ -1564,11 +2491,16 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_ typedef XXH32_hash_t xxh_u32; #ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" # define BYTE xxh_u8 # define U8 xxh_u8 # define U32 xxh_u32 #endif +#if defined (__cplusplus) +extern "C" { +#endif + /* *** Memory access *** */ /*! @@ -1637,25 +2569,26 @@ static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* - * __pack instructions are safer but compiler specific, hence potentially - * problematic for some compilers. - * - * Currently only defined for GCC and ICC. + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; #endif static xxh_u32 XXH_read32(const void* ptr) { - typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign; - return ((const xxh_unalign*)ptr)->u32; + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); } #else /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void* memPtr) { @@ -1731,6 +2664,51 @@ static int XXH_isLittleEndian(void) # define XXH_HAS_BUILTIN(x) 0 #endif + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + /*! * @internal * @def XXH_rotl32(x,r) @@ -1853,8 +2831,10 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } *********************************************************************/ /*! * @} - * @defgroup xxh32_impl XXH32 implementation + * @defgroup XXH32_impl XXH32 implementation * @ingroup impl + * + * Details on the XXH32 implementation. * @{ */ /* #define instead of static const, to be used as initializers */ @@ -1888,7 +2868,7 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) acc += input * XXH_PRIME32_2; acc = XXH_rotl32(acc, 13); acc *= XXH_PRIME32_1; -#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) /* * UGLY HACK: * A compiler fence is the only thing that prevents GCC and Clang from @@ -1918,9 +2898,12 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * can load data, while v3 can multiply. SSE forces them to operate * together. * - * This is also enabled on AArch64, as Clang autovectorizes it incorrectly - * and it is pointless writing a NEON implementation that is basically the - * same speed as scalar for XXH32. + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. */ XXH_COMPILER_GUARD(acc); #endif @@ -1934,17 +2917,17 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * The final mix ensures that all input bits have a chance to impact any bit in * the output digest, resulting in an unbiased distribution. * - * @param h32 The hash to avalanche. + * @param hash The hash to avalanche. * @return The avalanched hash. */ -static xxh_u32 XXH32_avalanche(xxh_u32 h32) +static xxh_u32 XXH32_avalanche(xxh_u32 hash) { - h32 ^= h32 >> 15; - h32 *= XXH_PRIME32_2; - h32 ^= h32 >> 13; - h32 *= XXH_PRIME32_3; - h32 ^= h32 >> 16; - return(h32); + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; } #define XXH_get32bits(p) XXH_readLE32_align(p, align) @@ -1957,24 +2940,25 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32) * This final stage will digest them to ensure that all input bytes are present * in the final mix. * - * @param h32 The hash to finalize. + * @param hash The hash to finalize. * @param ptr The pointer to the remaining input. * @param len The remaining length, modulo 16. * @param align Whether @p ptr is aligned. * @return The finalized hash. + * @see XXH64_finalize(). */ -static xxh_u32 -XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) { -#define XXH_PROCESS1 do { \ - h32 += (*ptr++) * XXH_PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ } while (0) -#define XXH_PROCESS4 do { \ - h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ - ptr += 4; \ - h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ } while (0) if (ptr==NULL) XXH_ASSERT(len == 0); @@ -1990,49 +2974,49 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) XXH_PROCESS1; --len; } - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); } else { switch(len&15) /* or switch(bEnd - p) */ { case 12: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 8: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 4: XXH_PROCESS4; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 13: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 9: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 5: XXH_PROCESS4; XXH_PROCESS1; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 14: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 10: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 6: XXH_PROCESS4; XXH_PROCESS1; XXH_PROCESS1; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 15: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 11: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 7: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 3: XXH_PROCESS1; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 2: XXH_PROCESS1; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 1: XXH_PROCESS1; - XXH_FALLTHROUGH; - case 0: return XXH32_avalanche(h32); + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); } XXH_ASSERT(0); - return h32; /* reaching this point is deemed impossible */ + return hash; /* reaching this point is deemed impossible */ } } @@ -2052,7 +3036,7 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) * @param align Whether @p input is aligned. * @return The calculated hash. */ -XXH_FORCE_INLINE xxh_u32 +XXH_FORCE_INLINE XXH_PUREF xxh_u32 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) { xxh_u32 h32; @@ -2085,10 +3069,10 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment return XXH32_finalize(h32, input, len&15, align); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) { -#if 0 +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH32_state_t state; XXH32_reset(&state, seed); @@ -2107,27 +3091,26 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t s /******* Hash streaming *******/ -/*! - * @ingroup xxh32_family - */ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) { XXH_memcpy(dstState, srcState, sizeof(*dstState)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) { XXH_ASSERT(statePtr != NULL); @@ -2140,7 +3123,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t s } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t* state, const void* input, size_t len) { @@ -2195,7 +3178,7 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len) } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) { xxh_u32 h32; @@ -2213,31 +3196,18 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); } - +#endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ -/*! - * @ingroup xxh32_family - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * - * The canonical representation uses big endian convention, the same convention - * as human-readable numbers (large digits first). - * - * This way, hash values can be written into a file or buffer, remaining - * comparable across different systems. - * - * The following functions allow transformation of hash values to and from their - * canonical format. - */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { - /* XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); */ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); XXH_memcpy(dst, &hash, sizeof(*dst)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { return XXH_readBE32(src); @@ -2278,25 +3248,26 @@ static xxh_u64 XXH_read64(const void* memPtr) #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* - * __pack instructions are safer, but compiler specific, hence potentially - * problematic for some compilers. - * - * Currently only defined for GCC and ICC. + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; #endif static xxh_u64 XXH_read64(const void* ptr) { - typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64; - return ((const xxh_unalign64*)ptr)->u64; + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); } #else /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void* memPtr) { @@ -2380,8 +3351,10 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align) /******* xxh64 *******/ /*! * @} - * @defgroup xxh64_impl XXH64 implementation + * @defgroup XXH64_impl XXH64 implementation * @ingroup impl + * + * Details on the XXH64 implementation. * @{ */ /* #define rather that static const, to be used as initializers */ @@ -2399,11 +3372,29 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align) # define PRIME64_5 XXH_PRIME64_5 #endif +/*! @copydoc XXH32_round */ static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) { acc += input * XXH_PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= XXH_PRIME64_1; +#if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * DISABLE AUTOVECTORIZATION: + * A compiler fence is used to prevent GCC and Clang from + * autovectorizing the XXH64 loop (pragmas and attributes don't work for some + * reason) without globally disabling AVX512. + * + * Autovectorization of XXH64 tends to be detrimental, + * though the exact outcome may change depending on exact cpu and compiler version. + * For information, it has been reported as detrimental for Skylake-X, + * but possibly beneficial for Zen4. + * + * The default is to disable auto-vectorization, + * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable. + */ + XXH_COMPILER_GUARD(acc); +#endif return acc; } @@ -2415,43 +3406,59 @@ static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) return acc; } -static xxh_u64 XXH64_avalanche(xxh_u64 h64) +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) { - h64 ^= h64 >> 33; - h64 *= XXH_PRIME64_2; - h64 ^= h64 >> 29; - h64 *= XXH_PRIME64_3; - h64 ^= h64 >> 32; - return h64; + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; } #define XXH_get64bits(p) XXH_readLE64_align(p, align) -static xxh_u64 -XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) { if (ptr==NULL) XXH_ASSERT(len == 0); len &= 31; while (len >= 8) { xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); ptr += 8; - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; len -= 8; } if (len >= 4) { - h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; ptr += 4; - h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; len -= 4; } while (len > 0) { - h64 ^= (*ptr++) * XXH_PRIME64_5; - h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; --len; } - return XXH64_avalanche(h64); + return XXH64_avalanche(hash); } #ifdef XXH_OLD_NAMES @@ -2464,7 +3471,15 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) # undef XXH_PROCESS8_64 #endif -XXH_FORCE_INLINE xxh_u64 +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) { xxh_u64 h64; @@ -2501,10 +3516,10 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) { -#if 0 +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; XXH64_reset(&state, seed); @@ -2522,27 +3537,27 @@ XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t s } /******* Hash Streaming *******/ - -/*! @ingroup xxh64_family*/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) { return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } -/*! @ingroup xxh64_family */ +/*! @ingroup XXH64_family */ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) { XXH_memcpy(dstState, srcState, sizeof(*dstState)); } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) { XXH_ASSERT(statePtr != NULL); memset(statePtr, 0, sizeof(*statePtr)); @@ -2553,9 +3568,9 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t s return XXH_OK; } -/*! @ingroup xxh64_family */ +/*! @ingroup XXH64_family */ XXH_PUBLIC_API XXH_errorcode -XXH64_update (XXH64_state_t* state, const void* input, size_t len) +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) { if (input==NULL) { XXH_ASSERT(len == 0); @@ -2605,8 +3620,8 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len) } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) { xxh_u64 h64; @@ -2624,24 +3639,28 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state) return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); } - +#endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) { - /* XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); */ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); XXH_memcpy(dst, &hash, sizeof(*dst)); } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) { return XXH_readBE64(src); } +#if defined (__cplusplus) +} +#endif + #ifndef XXH_NO_XXH3 /* ********************************************************************* @@ -2650,7 +3669,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src ************************************************************************ */ /*! * @} - * @defgroup xxh3_impl XXH3 implementation + * @defgroup XXH3_impl XXH3 implementation * @ingroup impl * @{ */ @@ -2658,11 +3677,19 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src /* === Compiler specifics === */ #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ -# define XXH_RESTRICT /* disable */ +# define XXH_RESTRICT /* disable */ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ # define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict #else -/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */ # define XXH_RESTRICT /* disable */ #endif @@ -2676,10 +3703,26 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src # define XXH_unlikely(x) (x) #endif +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +/* + * Not defined as XXH_HAS_INCLUDE(x) (function-like) because + * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) + */ +# define XXH_HAS_INCLUDE __has_include +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + #if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif # if defined(__ARM_NEON__) || defined(__ARM_NEON) \ - || defined(__aarch64__) || defined(_M_ARM) \ - || defined(_M_ARM64) || defined(_M_ARM64EC) + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ # define inline __inline__ /* circumvent a clang bug */ # include # undef inline @@ -2790,7 +3833,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * Note that these are actually implemented as macros. * * If this is not defined, it is detected automatically. - * @ref XXH_X86DISPATCH overrides this. + * internal macro XXH_X86DISPATCH overrides this. */ enum XXH_VECTOR_TYPE /* fake enum */ { XXH_SCALAR = 0, /*!< Portable scalar version */ @@ -2802,8 +3845,13 @@ enum XXH_VECTOR_TYPE /* fake enum */ { */ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ - XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ }; /*! * @ingroup tuning @@ -2825,12 +3873,16 @@ enum XXH_VECTOR_TYPE /* fake enum */ { # define XXH_AVX512 3 # define XXH_NEON 4 # define XXH_VSX 5 +# define XXH_SVE 6 #endif #ifndef XXH_VECTOR /* can be defined on command line */ -# if ( \ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ ) && ( \ defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ @@ -2840,7 +3892,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ { # define XXH_VECTOR XXH_AVX512 # elif defined(__AVX2__) # define XXH_VECTOR XXH_AVX2 -# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# elif defined(__SSE2__) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) # define XXH_VECTOR XXH_SSE2 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ || (defined(__s390x__) && defined(__VEC__)) \ @@ -2851,6 +3903,17 @@ enum XXH_VECTOR_TYPE /* fake enum */ { # endif #endif +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + /* * Controls the alignment of the accumulator, * for compatibility with aligned vector loads, which are usually faster. @@ -2870,16 +3933,26 @@ enum XXH_VECTOR_TYPE /* fake enum */ { # define XXH_ACC_ALIGN 16 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */ # define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 # endif #endif #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 # define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN #else # define XXH_SEC_ALIGN 8 #endif +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + /* * UGLY HACK: * GCC usually generates the best code with -O3 for xxHash. @@ -2903,153 +3976,130 @@ enum XXH_VECTOR_TYPE /* fake enum */ { */ #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ # pragma GCC push_options # pragma GCC optimize("-O2") #endif +#if defined (__cplusplus) +extern "C" { +#endif #if XXH_VECTOR == XXH_NEON + /* - * NEON's setup for vmlal_u32 is a little more complicated than it is on - * SSE2, AVX2, and VSX. + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. * - * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast. - * - * To do the same operation, the 128-bit 'Q' register needs to be split into - * two 64-bit 'D' registers, performing this operation:: - * - * [ a | b ] - * | '---------. .--------' | - * | x | - * | .---------' '--------. | - * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ] - * - * Due to significant changes in aarch64, the fastest method for aarch64 is - * completely different than the fastest method for ARMv7-A. - * - * ARMv7-A treats D registers as unions overlaying Q registers, so modifying - * D11 will modify the high half of Q5. This is similar to how modifying AH - * will only affect bits 8-15 of AX on x86. - * - * VZIP takes two registers, and puts even lanes in one register and odd lanes - * in the other. - * - * On ARMv7-A, this strangely modifies both parameters in place instead of - * taking the usual 3-operand form. - * - * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the - * lower and upper halves of the Q register to end up with the high and low - * halves where we want - all in one instruction. - * - * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] } - * - * Unfortunately we need inline assembly for this: Instructions modifying two - * registers at once is not possible in GCC or Clang's IR, and they have to - * create a copy. - * - * aarch64 requires a different approach. - * - * In order to make it easier to write a decent compiler for aarch64, many - * quirks were removed, such as conditional execution. - * - * NEON was also affected by this. - * - * aarch64 cannot access the high bits of a Q-form register, and writes to a - * D-form register zero the high bits, similar to how writes to W-form scalar - * registers (or DWORD registers on x86_64) work. - * - * The formerly free vget_high intrinsics now require a vext (with a few - * exceptions) - * - * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent - * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one - * operand. - * - * The equivalent of the VZIP.32 on the lower and upper halves would be this - * mess: - * - * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] } - * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } - * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] } + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. * - * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN): + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). * - * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32); - * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF); + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. * - * This is available on ARMv7-A, but is less efficient than a single VZIP.32. + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif /*! - * Function-like macro: - * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi) - * { - * outLo = (uint32x2_t)(in & 0xFFFFFFFF); - * outHi = (uint32x2_t)(in >> 32); - * in = UNDEFINED; - * } - */ -# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ - && (defined(__GNUC__) || defined(__clang__)) \ - && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM)) -# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ - do { \ - /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \ - /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \ - /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \ - __asm__("vzip.32 %e0, %f0" : "+w" (in)); \ - (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \ - (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \ - } while (0) -# else -# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ - do { \ - (outLo) = vmovn_u64 (in); \ - (outHi) = vshrn_n_u64 ((in), 32); \ - } while (0) -# endif + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif /*! * @ingroup tuning * @brief Controls the NEON to scalar ratio for XXH3 * - * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and - * 2 lanes on scalar by default. + * This can be set to 2, 4, 6, or 8. * - * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the - * emulated 64-bit arithmetic is too slow. + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. * - * Modern ARM CPUs are _very_ sensitive to how their pipelines are used. + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. * - * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't - * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions, - * you are only using 2/3 of the CPU bandwidth. - * - * This is even more noticeable on the more advanced cores like the A76 which + * This is even more noticeable on the more advanced cores like the Cortex-A76 which * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. * - * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the - * remaining lanes will use scalar instructions. This improves the bandwidth - * and also gives the integer pipelines something to do besides twiddling loop - * counters and pointers. + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. * * This change benefits CPUs with large micro-op buffers without negatively affecting - * other CPUs: + * most other CPUs: * * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | * |:----------------------|:--------------------|----------:|-----------:|------:| * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | * * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * * @see XXH3_accumulate_512_neon() */ # ifndef XXH3_NEON_LANES # if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ - && !defined(__OPTIMIZE_SIZE__) + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 # define XXH3_NEON_LANES 6 # else # define XXH3_NEON_LANES XXH_ACC_NB @@ -3057,6 +4107,10 @@ enum XXH_VECTOR_TYPE /* fake enum */ { # endif #endif /* XXH_VECTOR == XXH_NEON */ +#if defined (__cplusplus) +} /* extern "C" */ +#endif + /* * VSX and Z Vector helpers. * @@ -3066,27 +4120,42 @@ enum XXH_VECTOR_TYPE /* fake enum */ { * inconsistent intrinsics, spotty coverage, and multiple endiannesses. */ #if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + # if defined(__s390x__) # include # else -/* gcc's altivec.h can have the unwanted consequence to unconditionally - * #define bool, vector, and pixel keywords, - * with bad consequences for programs already using these keywords for other purposes. - * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined. - * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler, - * but it seems that, in some cases, it isn't. - * Force the build macro to be defined, so that keywords are not altered. - */ -# if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__) -# define __APPLE_ALTIVEC__ -# endif # include # endif +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + typedef __vector unsigned long long xxh_u64x2; typedef __vector unsigned char xxh_u8x16; typedef __vector unsigned xxh_u32x4; +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + # ifndef XXH_VSX_BE # if defined(__BIG_ENDIAN__) \ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) @@ -3103,6 +4172,9 @@ typedef __vector unsigned xxh_u32x4; # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) # define XXH_vec_revb vec_revb # else +#if defined (__cplusplus) +extern "C" { +#endif /*! * A polyfill for POWER9's vec_revb(). */ @@ -3112,9 +4184,15 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; return vec_perm(val, val, vByteSwap); } +#if defined (__cplusplus) +} /* extern "C" */ +#endif # endif # endif /* XXH_VSX_BE */ +#if defined (__cplusplus) +extern "C" { +#endif /*! * Performs an unaligned vector load and byte swaps it on big endian. */ @@ -3138,8 +4216,9 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) /* s390x is always big endian, no issue on this platform */ # define XXH_vec_mulo vec_mulo # define XXH_vec_mule vec_mule -# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ # define XXH_vec_mulo __builtin_altivec_vmulouw # define XXH_vec_mule __builtin_altivec_vmuleuw # else @@ -3158,15 +4237,35 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) return result; } # endif /* XXH_vec_mulo, XXH_vec_mule */ + +#if defined (__cplusplus) +} /* extern "C" */ +#endif + #endif /* XXH_VECTOR == XXH_VSX */ +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ /* prefetch * can be disabled, by declaring XXH_NO_PREFETCH build macro */ #if defined(XXH_NO_PREFETCH) # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ #else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) @@ -3176,7 +4275,9 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) # endif #endif /* XXH_NO_PREFETCH */ - +#if defined (__cplusplus) +extern "C" { +#endif /* ========================================== * XXH3 default settings * ========================================== */ @@ -3203,6 +4304,8 @@ XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, }; +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ #ifdef XXH_OLD_NAMES # define kSecret XXH3_kSecret @@ -3394,7 +4497,7 @@ XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) } /*! Seems to produce slightly better code on GCC for some reason. */ -XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { XXH_ASSERT(0 <= shift && shift < 64); return v64 ^ (v64 >> shift); @@ -3407,7 +4510,7 @@ XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) { h64 = XXH_xorshift64(h64, 37); - h64 *= 0x165667919E3779F9ULL; + h64 *= PRIME_MX1; h64 = XXH_xorshift64(h64, 32); return h64; } @@ -3421,9 +4524,9 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) { /* this mix is inspired by Pelle Evensen's rrmxmx */ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); - h64 *= 0x9FB21C651E98DF25ULL; + h64 *= PRIME_MX2; h64 ^= (h64 >> 35) + len ; - h64 *= 0x9FB21C651E98DF25ULL; + h64 *= PRIME_MX2; return XXH_xorshift64(h64, 28); } @@ -3461,7 +4564,7 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) * * This adds an extra layer of strength for custom secrets. */ -XXH_FORCE_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); @@ -3483,7 +4586,7 @@ XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_h } } -XXH_FORCE_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); @@ -3499,7 +4602,7 @@ XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_h } } -XXH_FORCE_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); @@ -3516,7 +4619,7 @@ XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_ } } -XXH_FORCE_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(len <= 16); @@ -3586,7 +4689,7 @@ XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, } /* For mid range keys, XXH3 uses a Mum-hash variant. */ -XXH_FORCE_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) @@ -3595,6 +4698,14 @@ XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, XXH_ASSERT(16 < len && len <= 128); { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else if (len > 32) { if (len > 64) { if (len > 96) { @@ -3609,14 +4720,17 @@ XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, } acc += XXH3_mix16B(input+0, secret+0, seed); acc += XXH3_mix16B(input+len-16, secret+16, seed); - +#endif return XXH3_avalanche(acc); } } +/*! + * @brief Maximum size of "short" key in bytes. + */ #define XXH3_MIDSIZE_MAX 240 -XXH_NO_INLINE XXH64_hash_t +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) @@ -3628,13 +4742,17 @@ XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, #define XXH3_MIDSIZE_LASTOFFSET 17 { xxh_u64 acc = len * XXH_PRIME64_1; - int const nbRounds = (int)len / 16; - int i; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); for (i=0; i<8; i++) { acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); } - acc = XXH3_avalanche(acc); + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); #if defined(__clang__) /* Clang */ \ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ @@ -3661,11 +4779,13 @@ XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, #pragma clang loop vectorize(disable) #endif for (i=8 ; i < nbRounds; i++) { - acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); } - /* last bytes */ - acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); - return XXH3_avalanche(acc); + return XXH3_avalanche(acc + acc_end); } } @@ -3681,6 +4801,47 @@ XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, # define ACC_NB XXH_ACC_NB #endif +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) { if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); @@ -3749,7 +4910,7 @@ XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, /* data_key = data_vec ^ key_vec; */ __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); /* data_key_lo = data_key >> 32; */ - __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); /* xacc[0] += swap(data_vec); */ @@ -3759,6 +4920,7 @@ XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, *xacc = _mm512_add_epi64(product, sum); } } +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) /* * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. @@ -3792,13 +4954,12 @@ XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) /* xacc[0] ^= (xacc[0] >> 47) */ __m512i const acc_vec = *xacc; __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); - __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted); /* xacc[0] ^= secret; */ __m512i const key_vec = _mm512_loadu_si512 (secret); - __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); /* xacc[0] *= XXH_PRIME32_1; */ - __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); @@ -3813,7 +4974,8 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) XXH_ASSERT(((size_t)customSecret & 63) == 0); (void)(&XXH_writeLE64); { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); - __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64)); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); __m512i* const dest = ( __m512i*) customSecret; @@ -3821,14 +4983,7 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ XXH_ASSERT(((size_t)dest & 63) == 0); for (i=0; i < nbRounds; ++i) { - /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*', - * this will warn "discards 'const' qualifier". */ - union { - const __m512i* cp; - void* p; - } remote_const_void; - remote_const_void.cp = src + i; - dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); } } } @@ -3864,7 +5019,7 @@ XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, /* data_key = data_vec ^ key_vec; */ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); /* data_key_lo = data_key >> 32; */ - __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); /* xacc[i] += swap(data_vec); */ @@ -3874,6 +5029,7 @@ XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, xacc[i] = _mm256_add_epi64(product, sum); } } } +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) @@ -3896,7 +5052,7 @@ XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); /* xacc[i] *= XXH_PRIME32_1; */ - __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); @@ -3928,12 +5084,12 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTR XXH_ASSERT(((size_t)dest & 31) == 0); /* GCC -O2 need unroll loop manually */ - dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed); - dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed); - dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed); - dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed); - dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed); - dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed); + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); } } @@ -3980,6 +5136,7 @@ XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, xacc[i] = _mm_add_epi64(product, sum); } } } +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) @@ -4058,14 +5215,28 @@ XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, /*! * @internal - * @brief The bulk processing loop for NEON. + * @brief The bulk processing loop for NEON and WASM SIMD128. * * The NEON code path is actually partially scalar when running on AArch64. This * is to optimize the pipelining and can have up to 15% speedup depending on the * CPU, and it also mitigates some GCC codegen issues. * * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. */ + XXH_FORCE_INLINE void XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, const void* XXH_RESTRICT input, @@ -4073,101 +5244,182 @@ XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, { XXH_ASSERT((((size_t)acc) & 15) == 0); XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); - { - uint64x2_t* const xacc = (uint64x2_t *) acc; + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ - uint8_t const* const xinput = (const uint8_t *) input; - uint8_t const* const xsecret = (const uint8_t *) secret; + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; size_t i; - /* NEON for the first few lanes (these loops are normally interleaved) */ - for (i=0; i < XXH3_NEON_LANES / 2; i++) { +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { /* data_vec = xinput[i]; */ - uint8x16_t data_vec = vld1q_u8(xinput + (i * 16)); + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); /* key_vec = xsecret[i]; */ - uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); - uint64x2_t data_key; - uint32x2_t data_key_lo, data_key_hi; - /* xacc[i] += swap(data_vec); */ - uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); - uint64x2_t const swapped = vextq_u64(data64, data64, 1); - xacc[i] = vaddq_u64 (xacc[i], swapped); + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); /* data_key = data_vec ^ key_vec; */ - data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); - /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); - * data_key_hi = (uint32x2_t) (data_key >> 32); - * data_key = UNDEFINED; */ - XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); - /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ - xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); } - /* Scalar for the remainder. This may be a zero iteration loop. */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarRound(acc, input, secret, i); + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); } } } +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); - { uint64x2_t* xacc = (uint64x2_t*) acc; + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; uint8_t const* xsecret = (uint8_t const*) secret; - uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1); size_t i; - /* NEON for the first few lanes (these loops are normally interleaved) */ + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } for (i=0; i < XXH3_NEON_LANES / 2; i++) { /* xacc[i] ^= (xacc[i] >> 47); */ uint64x2_t acc_vec = xacc[i]; - uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47); - uint64x2_t data_vec = veorq_u64 (acc_vec, shifted); + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); /* xacc[i] ^= xsecret[i]; */ - uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16)); - uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec)); - + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); /* xacc[i] *= XXH_PRIME32_1 */ - uint32x2_t data_key_lo, data_key_hi; - /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF); - * data_key_hi = (uint32x2_t) (xacc[i] >> 32); - * xacc[i] = UNDEFINED; */ - XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); - { /* - * prod_hi = (data_key >> 32) * XXH_PRIME32_1; - * - * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will - * incorrectly "optimize" this: - * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b)); - * shifted = vshll_n_u32(tmp, 32); - * to this: - * tmp = "vmulq_u64"(a, b); // no such thing! - * shifted = vshlq_n_u64(tmp, 32); - * - * However, unlike SSE, Clang lacks a 64-bit multiply routine - * for NEON, and it scalarizes two 64-bit multiplies instead. - * - * vmull_u32 has the same timing as vmul_u32, and it avoids - * this bug completely. - * See https://bugs.llvm.org/show_bug.cgi?id=39967 - */ - uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime); - /* xacc[i] = prod_hi << 32; */ - xacc[i] = vshlq_n_u64(prod_hi, 32); - /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */ - xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime); - } - } - /* Scalar for the remainder. This may be a zero iteration loop. */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarScrambleRound(acc, secret, i); +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif } } } - #endif #if (XXH_VECTOR == XXH_VSX) @@ -4178,23 +5430,23 @@ XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { /* presumed aligned */ - unsigned int* const xacc = (unsigned int*) acc; - xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */ - xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ xxh_u64x2 const v32 = { 32, 32 }; size_t i; for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { /* data_vec = xinput[i]; */ - xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i); + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); /* key_vec = xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); xxh_u64x2 const data_key = data_vec ^ key_vec; /* shuffled = (data_key << 32) | (data_key >> 32); */ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); /* acc_vec = xacc[i]; */ - xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i); + xxh_u64x2 acc_vec = xacc[i]; acc_vec += product; /* swap high and low halves */ @@ -4203,18 +5455,18 @@ XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, #else acc_vec += vec_xxpermdi(data_vec, data_vec, 2); #endif - /* xacc[i] = acc_vec; */ - vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i); + xacc[i] = acc_vec; } } +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); - { xxh_u64x2* const xacc = (xxh_u64x2*) acc; - const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret; + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* constants */ xxh_u64x2 const v32 = { 32, 32 }; xxh_u64x2 const v47 = { 47, 47 }; @@ -4226,7 +5478,7 @@ XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); /* xacc[i] ^= xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); xxh_u64x2 const data_key = data_vec ^ key_vec; /* xacc[i] *= XXH_PRIME32_1 */ @@ -4240,8 +5492,148 @@ XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) #endif +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + /* scalar variants - universal */ +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + /*! * @internal * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). @@ -4264,7 +5656,7 @@ XXH3_scalarRound(void* XXH_RESTRICT acc, xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ - xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); } } @@ -4278,10 +5670,18 @@ XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif for (i=0; i < XXH_ACC_NB; i++) { XXH3_scalarRound(acc, input, secret, i); } } +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) /*! * @internal @@ -4333,10 +5733,10 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) const xxh_u8* kSecretPtr = XXH3_kSecret; XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); -#if defined(__clang__) && defined(__aarch64__) +#if defined(__GNUC__) && defined(__aarch64__) /* * UGLY HACK: - * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are * placed sequentially, in order, at the top of the unrolled loop. * * While MOVK is great for generating constants (2 cycles for a 64-bit @@ -4351,7 +5751,7 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) * ADD * SUB STR * STR - * By forcing loads from memory (as the asm line causes Clang to assume + * By forcing loads from memory (as the asm line causes the compiler to assume * that XXH3_kSecretPtr has been changed), the pipelines are used more * efficiently: * I L S @@ -4368,17 +5768,11 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) */ XXH_COMPILER_GUARD(kSecretPtr); #endif - /* - * Note: in debug mode, this overrides the asm optimization - * and Clang will emit MOVK chains again. - */ - XXH_ASSERT(kSecretPtr == XXH3_kSecret); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; int i; for (i=0; i < nbRounds; i++) { /* - * The asm hack causes Clang to assume that kSecretPtr aliases with + * The asm hack causes the compiler to assume that kSecretPtr aliases with * customSecret, and on aarch64, this prevented LDP from merging two * loads together for free. Putting the loads together before the stores * properly generates LDP. @@ -4391,7 +5785,7 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) } -typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*); +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); @@ -4399,82 +5793,63 @@ typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); #if (XXH_VECTOR == XXH_AVX512) #define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 #elif (XXH_VECTOR == XXH_AVX2) #define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 #elif (XXH_VECTOR == XXH_SSE2) #define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 #elif (XXH_VECTOR == XXH_NEON) #define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon #define XXH3_scrambleAcc XXH3_scrambleAcc_neon #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar #elif (XXH_VECTOR == XXH_VSX) #define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #else /* scalar */ #define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar #endif - - -#ifndef XXH_PREFETCH_DIST -# ifdef __clang__ -# define XXH_PREFETCH_DIST 320 -# else -# if (XXH_VECTOR == XXH_AVX512) -# define XXH_PREFETCH_DIST 512 -# else -# define XXH_PREFETCH_DIST 384 -# endif -# endif /* __clang__ */ -#endif /* XXH_PREFETCH_DIST */ - -/* - * XXH3_accumulate() - * Loops over XXH3_accumulate_512(). - * Assumption: nbStripes will not overflow the secret size - */ -XXH_FORCE_INLINE void -XXH3_accumulate( xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, - size_t nbStripes, - XXH3_f_accumulate_512 f_acc512) -{ - size_t n; - for (n = 0; n < nbStripes; n++ ) { - const xxh_u8* const in = input + n*XXH_STRIPE_LEN; - XXH_PREFETCH(in + XXH_PREFETCH_DIST); - f_acc512(acc, - in, - secret + n*XXH_SECRET_CONSUME_RATE); - } -} +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif XXH_FORCE_INLINE void XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; @@ -4486,7 +5861,7 @@ XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); for (n = 0; n < nb_blocks; n++) { - XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512); + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); } @@ -4494,12 +5869,12 @@ XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, XXH_ASSERT(len > XXH_STRIPE_LEN); { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); - XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); /* last stripe */ { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ - f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); } } } @@ -4544,12 +5919,12 @@ XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secre XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, const void* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble); + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); /* converge into final hash */ XXH_STATIC_ASSERT(sizeof(acc) == 64); @@ -4563,13 +5938,15 @@ XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, * It's important for performance to transmit secret's size (when it's static) * so that the compiler can properly optimize the vectorized loop. * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. */ -XXH_FORCE_INLINE XXH64_hash_t +XXH3_WITH_SECRET_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) { (void)seed64; - return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc); + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); } /* @@ -4578,12 +5955,12 @@ XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, * Note that inside this no_inline function, we do inline the internal loop, * and provide a statically defined secret size to allow optimization of vector loop. */ -XXH_NO_INLINE XXH64_hash_t +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) { (void)seed64; (void)secret; (void)secretLen; - return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc); + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); } /* @@ -4600,18 +5977,20 @@ XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, XXH64_hash_t seed, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble, XXH3_f_initCustomSecret f_initSec) { +#if XXH_SIZE_OPT <= 0 if (seed == 0) return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc512, f_scramble); + f_acc, f_scramble); +#endif { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; f_initSec(secret, seed); return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), - f_acc512, f_scramble); + f_acc, f_scramble); } } @@ -4619,12 +5998,12 @@ XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, * It's important for performance that XXH3_hashLong is not inlined. */ XXH_NO_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSeed(const void* input, size_t len, - XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen) +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) { (void)secret; (void)secretLen; return XXH3_hashLong_64b_withSeed_internal(input, len, seed, - XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); } @@ -4656,37 +6035,37 @@ XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, /* === Public entry point === */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) { - return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) { - return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) { - return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); } XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) { - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); - return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize); + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); } /* === XXH3 streaming === */ - +#ifndef XXH_NO_STREAM /* * Malloc's a pointer that is always aligned to align. * @@ -4710,7 +6089,7 @@ XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, * * Align must be a power of 2 and 8 <= align <= 128. */ -static void* XXH_alignedMalloc(size_t s, size_t align) +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) { XXH_ASSERT(align <= 128 && align >= 8); /* range check */ XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ @@ -4752,7 +6131,15 @@ static void XXH_alignedFree(void* p) XXH_free(base); } } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * @return An allocated pointer of @ref XXH3_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH3_freeState(). + */ XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) { XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); @@ -4761,16 +6148,25 @@ XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) return state; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * + * @return @ref XXH_OK. + * + * @note Must be allocated with XXH3_createState(). + */ XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) { XXH_alignedFree(statePtr); return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API void -XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) { XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); } @@ -4802,18 +6198,18 @@ XXH3_reset_internal(XXH3_state_t* statePtr, statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset(XXH3_state_t* statePtr) +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) { if (statePtr == NULL) return XXH_ERROR; XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) { if (statePtr == NULL) return XXH_ERROR; XXH3_reset_internal(statePtr, 0, secret, secretSize); @@ -4822,9 +6218,9 @@ XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) { if (statePtr == NULL) return XXH_ERROR; if (seed==0) return XXH3_64bits_reset(statePtr); @@ -4834,9 +6230,9 @@ XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64) +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) { if (statePtr == NULL) return XXH_ERROR; if (secret == NULL) return XXH_ERROR; @@ -4846,35 +6242,61 @@ XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, return XXH_OK; } -/* Note : when XXH3_consumeStripes() is invoked, - * there must be a guarantee that at least one more byte must be consumed from input - * so that the function can blindly consume all stripes using the "normal" secret segment */ -XXH_FORCE_INLINE void +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, const xxh_u8* XXH_RESTRICT input, size_t nbStripes, const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { - XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */ - XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); - if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) { - /* need a scrambling operation */ - size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr; - size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock; - XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512); - f_scramble(acc, secret + secretLimit); - XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512); - *nbStripesSoFarPtr = nbStripesAfterBlock; - } else { - XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512); + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; *nbStripesSoFarPtr += nbStripes; } + /* Return end pointer */ + return input; } #ifndef XXH3_STREAM_USE_STACK -# ifndef __clang__ /* clang doesn't need additional stack space */ +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ # define XXH3_STREAM_USE_STACK 1 # endif #endif @@ -4884,7 +6306,7 @@ XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t* XXH_RESTRICT const state, const xxh_u8* XXH_RESTRICT input, size_t len, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { if (input==NULL) { @@ -4900,7 +6322,8 @@ XXH3_update(XXH3_state_t* XXH_RESTRICT const state, * when operating accumulators directly into state. * Operating into stack space seems to enable proper optimization. * clang, on the other hand, doesn't seem to need this trick */ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc)); + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); #else xxh_u64* XXH_RESTRICT const acc = state->acc; #endif @@ -4908,7 +6331,7 @@ XXH3_update(XXH3_state_t* XXH_RESTRICT const state, XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); /* small input : just fill in tmp buffer */ - if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { XXH_memcpy(state->buffer + state->bufferedSize, input, len); state->bufferedSize += (XXH32_hash_t)len; return XXH_OK; @@ -4930,57 +6353,20 @@ XXH3_update(XXH3_state_t* XXH_RESTRICT const state, &state->nbStripesSoFar, state->nbStripesPerBlock, state->buffer, XXH3_INTERNALBUFFER_STRIPES, secret, state->secretLimit, - f_acc512, f_scramble); + f_acc, f_scramble); state->bufferedSize = 0; } XXH_ASSERT(input < bEnd); - - /* large input to consume : ingest per full block */ - if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) { + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; - XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar); - /* join to current block's end */ - { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar; - XXH_ASSERT(nbStripesToEnd <= nbStripes); - XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512); - f_scramble(acc, secret + state->secretLimit); - state->nbStripesSoFar = 0; - input += nbStripesToEnd * XXH_STRIPE_LEN; - nbStripes -= nbStripesToEnd; - } - /* consume per entire blocks */ - while(nbStripes >= state->nbStripesPerBlock) { - XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512); - f_scramble(acc, secret + state->secretLimit); - input += state->nbStripesPerBlock * XXH_STRIPE_LEN; - nbStripes -= state->nbStripesPerBlock; - } - /* consume last partial block */ - XXH3_accumulate(acc, input, secret, nbStripes, f_acc512); - input += nbStripes * XXH_STRIPE_LEN; - XXH_ASSERT(input < bEnd); /* at least some bytes left */ - state->nbStripesSoFar = nbStripes; - /* buffer predecessor of last partial stripe */ - XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); - XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN); - } else { - /* content to consume <= block size */ - /* Consume input by a multiple of internal buffer size */ - if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { - const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; - do { - XXH3_consumeStripes(acc, + input = XXH3_consumeStripes(acc, &state->nbStripesSoFar, state->nbStripesPerBlock, - input, XXH3_INTERNALBUFFER_STRIPES, - secret, state->secretLimit, - f_acc512, f_scramble); - input += XXH3_INTERNALBUFFER_SIZE; - } while (inputbuffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); - } - } + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + } /* Some remaining input (always) : buffer it */ XXH_ASSERT(input < bEnd); XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); @@ -4989,19 +6375,19 @@ XXH3_update(XXH3_state_t* XXH_RESTRICT const state, state->bufferedSize = (XXH32_hash_t)(bEnd-input); #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 /* save stack accumulators into state */ - memcpy(state->acc, acc, sizeof(acc)); + XXH_memcpy(state->acc, acc, sizeof(acc)); #endif } return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) { return XXH3_update(state, (const xxh_u8*)input, len, - XXH3_accumulate_512, XXH3_scrambleAcc); + XXH3_accumulate, XXH3_scrambleAcc); } @@ -5010,37 +6396,40 @@ XXH3_digest_long (XXH64_hash_t* acc, const XXH3_state_t* state, const unsigned char* secret) { + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + /* * Digest on a local copy. This way, the state remains unaltered, and it can * continue ingesting more input afterwards. */ XXH_memcpy(acc, state->acc, sizeof(state->acc)); if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; size_t nbStripesSoFar = state->nbStripesSoFar; XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock, state->buffer, nbStripes, secret, state->secretLimit, - XXH3_accumulate_512, XXH3_scrambleAcc); - /* last stripe */ - XXH3_accumulate_512(acc, - state->buffer + state->bufferedSize - XXH_STRIPE_LEN, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; } else { /* bufferedSize < XXH_STRIPE_LEN */ - xxh_u8 lastStripe[XXH_STRIPE_LEN]; + /* Copy to temp buffer */ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); - XXH3_accumulate_512(acc, - lastStripe, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); + lastStripePtr = lastStripe; } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) { const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; if (state->totalLen > XXH3_MIDSIZE_MAX) { @@ -5056,7 +6445,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), secret, state->secretLimit + XXH_STRIPE_LEN); } - +#endif /* !XXH_NO_STREAM */ /* ========================================== @@ -5076,7 +6465,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). */ -XXH_FORCE_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { /* A doubled version of 1to3_64b with different constants. */ @@ -5105,7 +6494,7 @@ XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_ } } -XXH_FORCE_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); @@ -5125,14 +6514,14 @@ XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_ m128.low64 ^= (m128.high64 >> 3); m128.low64 = XXH_xorshift64(m128.low64, 35); - m128.low64 *= 0x9FB21C651E98DF25ULL; + m128.low64 *= PRIME_MX2; m128.low64 = XXH_xorshift64(m128.low64, 28); m128.high64 = XXH3_avalanche(m128.high64); return m128; } } -XXH_FORCE_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); @@ -5207,7 +6596,7 @@ XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64 /* * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN */ -XXH_FORCE_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) { XXH_ASSERT(len <= 16); @@ -5238,7 +6627,7 @@ XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, } -XXH_FORCE_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) @@ -5249,6 +6638,16 @@ XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, { XXH128_hash_t acc; acc.low64 = len * XXH_PRIME64_1; acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else if (len > 32) { if (len > 64) { if (len > 96) { @@ -5259,6 +6658,7 @@ XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); } acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif { XXH128_hash_t h128; h128.low64 = acc.low64 + acc.high64; h128.high64 = (acc.low64 * XXH_PRIME64_1) @@ -5271,7 +6671,7 @@ XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, } } -XXH_NO_INLINE XXH128_hash_t +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) @@ -5280,25 +6680,34 @@ XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); { XXH128_hash_t acc; - int const nbRounds = (int)len / 32; - int i; + unsigned i; acc.low64 = len * XXH_PRIME64_1; acc.high64 = 0; - for (i=0; i<4; i++) { + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { acc = XXH128_mix32B(acc, - input + (32 * i), - input + (32 * i) + 16, - secret + (32 * i), + input + i - 32, + input + i - 16, + secret + i - 32, seed); } acc.low64 = XXH3_avalanche(acc.low64); acc.high64 = XXH3_avalanche(acc.high64); - XXH_ASSERT(nbRounds >= 4); - for (i=4 ; i < nbRounds; i++) { + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { acc = XXH128_mix32B(acc, - input + (32 * i), - input + (32 * i) + 16, - secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)), + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, seed); } /* last bytes */ @@ -5306,7 +6715,7 @@ XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, input + len - 16, input + len - 32, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, - 0ULL - seed); + (XXH64_hash_t)0 - seed); { XXH128_hash_t h128; h128.low64 = acc.low64 + acc.high64; @@ -5323,12 +6732,12 @@ XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble); + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); /* converge into final hash */ XXH_STATIC_ASSERT(sizeof(acc) == 64); @@ -5346,47 +6755,50 @@ XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance that XXH3_hashLong() is not inlined. */ -XXH_NO_INLINE XXH128_hash_t +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) { (void)seed64; (void)secret; (void)secretLen; return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_accumulate_512, XXH3_scrambleAcc); + XXH3_accumulate, XXH3_scrambleAcc); } /* - * It's important for performance to pass @secretLen (when it's static) + * It's important for performance to pass @p secretLen (when it's static) * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. */ -XXH_FORCE_INLINE XXH128_hash_t +XXH3_WITH_SECRET_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) { (void)seed64; return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, - XXH3_accumulate_512, XXH3_scrambleAcc); + XXH3_accumulate, XXH3_scrambleAcc); } XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, - XXH3_f_accumulate_512 f_acc512, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble, XXH3_f_initCustomSecret f_initSec) { if (seed64 == 0) return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc512, f_scramble); + f_acc, f_scramble); { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; f_initSec(secret, seed64); return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), - f_acc512, f_scramble); + f_acc, f_scramble); } } @@ -5399,7 +6811,7 @@ XXH3_hashLong_128b_withSeed(const void* input, size_t len, { (void)secret; (void)secretLen; return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, - XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); } typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, @@ -5429,94 +6841,93 @@ XXH3_128bits_internal(const void* input, size_t len, /* === Public XXH128 API === */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) { return XXH3_128bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_default); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) { return XXH3_128bits_internal(input, len, 0, (const xxh_u8*)secret, secretSize, XXH3_hashLong_128b_withSecret); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) { return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_withSeed); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) { if (len <= XXH3_MIDSIZE_MAX) return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH128(const void* input, size_t len, XXH64_hash_t seed) +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) { return XXH3_128bits_withSeed(input, len, seed); } /* === XXH3 128-bit streaming === */ - +#ifndef XXH_NO_STREAM /* * All initialization and update functions are identical to 64-bit streaming variant. * The only difference is the finalization routine. */ -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset(XXH3_state_t* statePtr) +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) { return XXH3_64bits_reset(statePtr); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) { return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) { return XXH3_64bits_reset_withSeed(statePtr, seed); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed) +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) { return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) { - return XXH3_update(state, (const xxh_u8*)input, len, - XXH3_accumulate_512, XXH3_scrambleAcc); + return XXH3_64bits_update(state, input, len); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) { const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; if (state->totalLen > XXH3_MIDSIZE_MAX) { @@ -5540,13 +6951,11 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), secret, state->secretLimit + XXH_STRIPE_LEN); } - +#endif /* !XXH_NO_STREAM */ /* 128-bit utility functions */ -#include /* memcmp, memcpy */ - /* return : 1 is equal, 0 if different */ -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) { /* note : XXH128_hash_t is compact, it has no padding byte */ @@ -5554,11 +6963,11 @@ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) } /* This prototype is compatible with stdlib's qsort(). - * return : >0 if *h128_1 > *h128_2 - * <0 if *h128_1 < *h128_2 - * =0 if *h128_1 == *h128_2 */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) { XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; @@ -5570,9 +6979,9 @@ XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) /*====== Canonical representation ======*/ -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API void -XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) { @@ -5583,9 +6992,9 @@ XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(const XXH128_canonical_t* src) +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) { XXH128_hash_t h; h.high64 = XXH_readBE64(src); @@ -5607,9 +7016,9 @@ XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize) +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) { #if (XXH_DEBUGLEVEL >= 1) XXH_ASSERT(secretBuffer != NULL); @@ -5652,9 +7061,9 @@ XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSee return XXH_OK; } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API void -XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed) +XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed) { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; XXH3_initCustomSecret(secret, seed); @@ -5667,20 +7076,19 @@ XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed) /* Pop our optimization override from above */ #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ # pragma GCC pop_options #endif -#endif /* XXH_NO_LONG_LONG */ +#if defined (__cplusplus) +} /* extern "C" */ +#endif + +#endif /* XXH_NO_LONG_LONG */ #endif /* XXH_NO_XXH3 */ /*! * @} */ #endif /* XXH_IMPLEMENTATION */ - - -#if defined (__cplusplus) -} -#endif diff --git a/lib/common/zstd_common.c b/lib/common/zstd_common.c index 3d7e35b309b..98542061870 100644 --- a/lib/common/zstd_common.c +++ b/lib/common/zstd_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,6 @@ * Dependencies ***************************************/ #define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ #include "error_private.h" #include "zstd_internal.h" @@ -48,36 +47,11 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } - - -/*=************************************************************** -* Custom allocator -****************************************************************/ -void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) - return customMem.customAlloc(customMem.opaque, size); - return ZSTD_malloc(size); -} - -void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) { - /* calloc implemented as malloc+memset; - * not as efficient as calloc, but next best guess for custom malloc */ - void* const ptr = customMem.customAlloc(customMem.opaque, size); - ZSTD_memset(ptr, 0, size); - return ptr; - } - return ZSTD_calloc(1, size); -} - -void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) +int ZSTD_isDeterministicBuild(void) { - if (ptr!=NULL) { - if (customMem.customFree) - customMem.customFree(customMem.opaque, ptr); - else - ZSTD_free(ptr); - } +#if ZSTD_IS_DETERMINISTIC_BUILD + return 1; +#else + return 0; +#endif } diff --git a/lib/common/zstd_deps.h b/lib/common/zstd_deps.h index 14211344a02..8a9c7cc5313 100644 --- a/lib/common/zstd_deps.h +++ b/lib/common/zstd_deps.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -24,6 +24,18 @@ #ifndef ZSTD_DEPS_COMMON #define ZSTD_DEPS_COMMON +/* Even though we use qsort_r only for the dictionary builder, the macro + * _GNU_SOURCE has to be declared *before* the inclusion of any standard + * header and the script 'combine.sh' combines the whole zstd source code + * in a single file. + */ +#if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \ + defined(__CYGWIN__) || defined(__MSYS__) +#if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */ +#define _GNU_SOURCE +#endif +#endif + #include #include #include diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h index e8922670283..86a0fc5c809 100644 --- a/lib/common/zstd_internal.h +++ b/lib/common/zstd_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,6 @@ #include "../zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" -#define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ @@ -40,10 +39,6 @@ # define ZSTD_TRACE 0 #endif -#if defined (__cplusplus) -extern "C" { -#endif - /* ---- static assert (debug) --- */ #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) #define ZSTD_isError ERR_isError /* for inlining */ @@ -94,8 +89,9 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ +#define MIN_LITERALS_FOR_4_STREAMS 6 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; +typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e; #define LONGNBSEQ 0x7F00 @@ -113,6 +109,8 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy #define LLFSELog 9 #define OffFSELog 8 #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) +#define MaxMLBits 16 +#define MaxLLBits 16 #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ /* Each table cannot take more than #symbols * FSELog bits */ @@ -170,13 +168,13 @@ static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; * Shared functions to include for inlining *********************************************/ static void ZSTD_copy8(void* dst, const void* src) { -#if defined(ZSTD_ARCH_ARM_NEON) +#if defined(ZSTD_ARCH_ARM_NEON) && !defined(__aarch64__) vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); #else ZSTD_memcpy(dst, src, 8); #endif } -#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } +#define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0) /* Need to use memmove here since the literal buffer can now be located within the dst buffer. In circumstances where the op "catches up" to where the @@ -187,6 +185,8 @@ static void ZSTD_copy16(void* dst, const void* src) { vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); #elif defined(ZSTD_ARCH_X86_SSE2) _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); +#elif defined(ZSTD_ARCH_RISCV_RVV) + __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vle8_v_u8m1((const uint8_t*)src, 16), 16); #elif defined(__clang__) ZSTD_memmove(dst, src, 16); #else @@ -196,7 +196,7 @@ static void ZSTD_copy16(void* dst, const void* src) { ZSTD_memcpy(dst, copy16_buf, 16); #endif } -#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; } +#define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0) #define WILDCOPY_OVERLENGTH 32 #define WILDCOPY_VECLEN 16 @@ -215,7 +215,7 @@ typedef enum { * The src buffer must be before the dst buffer. */ MEM_STATIC FORCE_INLINE_ATTR -void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype) +void ZSTD_wildcopy(void* dst, const void* src, size_t length, ZSTD_overlap_e const ovtype) { ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; const BYTE* ip = (const BYTE*)src; @@ -225,7 +225,7 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { /* Handle short offset copies. */ do { - COPY8(op, ip) + COPY8(op, ip); } while (op < oend); } else { assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); @@ -276,62 +276,6 @@ typedef enum { /*-******************************************* * Private declarations *********************************************/ -typedef struct seqDef_s { - U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ - U16 litLength; - U16 mlBase; /* mlBase == matchLength - MINMATCH */ -} seqDef; - -/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */ -typedef enum { - ZSTD_llt_none = 0, /* no longLengthType */ - ZSTD_llt_literalLength = 1, /* represents a long literal */ - ZSTD_llt_matchLength = 2 /* represents a long match */ -} ZSTD_longLengthType_e; - -typedef struct { - seqDef* sequencesStart; - seqDef* sequences; /* ptr to end of sequences */ - BYTE* litStart; - BYTE* lit; /* ptr to end of literals */ - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; - size_t maxNbSeq; - size_t maxNbLit; - - /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength - * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment - * the existing value of the litLength or matchLength by 0x10000. - */ - ZSTD_longLengthType_e longLengthType; - U32 longLengthPos; /* Index of the sequence to apply long length modification to */ -} seqStore_t; - -typedef struct { - U32 litLength; - U32 matchLength; -} ZSTD_sequenceLength; - -/** - * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences - * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. - */ -MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) -{ - ZSTD_sequenceLength seqLen; - seqLen.litLength = seq->litLength; - seqLen.matchLength = seq->mlBase + MINMATCH; - if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { - if (seqStore->longLengthType == ZSTD_llt_literalLength) { - seqLen.litLength += 0x10000; - } - if (seqStore->longLengthType == ZSTD_llt_matchLength) { - seqLen.matchLength += 0x10000; - } - } - return seqLen; -} /** * Contains the compressed frame size and an upper-bound for the decompressed frame size. @@ -340,19 +284,11 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { + size_t nbBlocks; size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - -/* custom memory allocation functions */ -void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem); -void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem); -void ZSTD_customFree(void* ptr, ZSTD_customMem customMem); - - /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; @@ -368,13 +304,13 @@ typedef struct { /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ -/* Used by: decompress, fullbench (does not get its definition from here) */ +/* Used by: decompress, fullbench */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr); /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ -/* Used by: decompress, fullbench (does not get its definition from here) */ +/* Used by: zstd_decompress_block, fullbench */ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); @@ -387,8 +323,4 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void) return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); } -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/lib/common/zstd_trace.h b/lib/common/zstd_trace.h index 6215f1e70cf..d8eec100758 100644 --- a/lib/common/zstd_trace.h +++ b/lib/common/zstd_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,23 +11,20 @@ #ifndef ZSTD_TRACE_H #define ZSTD_TRACE_H -#if defined (__cplusplus) -extern "C" { -#endif - #include /* weak symbol support * For now, enable conservatively: * - Only GNUC * - Only ELF - * - Only x86-64, i386 and aarch64 + * - Only x86-64, i386, aarch64 and risc-v. * Also, explicitly disable on platforms known not to work so they aren't * forgotten in the future. */ #if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ defined(__GNUC__) && defined(__ELF__) && \ - (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ + defined(_M_IX86) || defined(__aarch64__) || defined(__riscv)) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ !defined(__CYGWIN__) && !defined(_AIX) # define ZSTD_HAVE_WEAK_SYMBOLS 1 @@ -64,7 +61,7 @@ typedef struct { /** * Non-zero if streaming (de)compression is used. */ - unsigned streaming; + int streaming; /** * The dictionary ID. */ @@ -73,7 +70,7 @@ typedef struct { * Is the dictionary cold? * Only set on decompression. */ - unsigned dictionaryIsCold; + int dictionaryIsCold; /** * The dictionary size or zero if no dictionary. */ @@ -156,8 +153,4 @@ ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end( #endif /* ZSTD_TRACE */ -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_TRACE_H */ diff --git a/lib/compress/clevels.h b/lib/compress/clevels.h index 7ed2e00490b..c18da465f32 100644 --- a/lib/compress/clevels.h +++ b/lib/compress/clevels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c index 21be8c54fef..1ce3cf16ac1 100644 --- a/lib/compress/fse_compress.c +++ b/lib/compress/fse_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy encoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -25,7 +25,7 @@ #include "../common/error_private.h" #define ZSTD_DEPS_NEED_MALLOC #define ZSTD_DEPS_NEED_MATH64 -#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */ +#include "../common/zstd_deps.h" /* ZSTD_memset */ #include "../common/bits.h" /* ZSTD_highbit32 */ @@ -91,7 +91,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ @@ -225,8 +225,8 @@ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog + 4 /* bitCount initialized at 4 */ + 2 /* first two symbols may use one additional bit each */) / 8) - + 1 /* round up to whole nb bytes */ - + 2 /* additional two bytes for bitstream flush */; + + 1 /* round up to whole nb bytes */ + + 2 /* additional two bytes for bitstream flush */; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } @@ -255,7 +255,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, /* Init */ remaining = tableSize+1; /* +1 for extra accuracy */ threshold = tableSize; - nbBits = tableLog+1; + nbBits = (int)tableLog+1; while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */ if (previousIs0) { @@ -274,7 +274,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, } while (symbol >= start+3) { start+=3; - bitStream += 3 << bitCount; + bitStream += 3U << bitCount; bitCount += 2; } bitStream += (symbol-start) << bitCount; @@ -294,7 +294,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, count++; /* +1 for extra accuracy */ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ - bitStream += count << bitCount; + bitStream += (U32)count << bitCount; bitCount += nbBits; bitCount -= (count>8); out+= (bitCount+7) /8; - return (out-ostart); + assert(out >= ostart); + return (size_t)(out-ostart); } @@ -343,16 +344,6 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, * FSE Compression Code ****************************************************************/ -FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) -{ - size_t size; - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); - return (FSE_CTable*)ZSTD_malloc(size); -} - -void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); } - /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { @@ -533,40 +524,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, return tableLog; } - -/* fake FSE_CTable, for raw (uncompressed) input */ -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) -{ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - void* const ptr = ct; - U16* const tableU16 = ( (U16*) ptr) + 2; - void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */ - FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* header */ - tableU16[-2] = (U16) nbBits; - tableU16[-1] = (U16) maxSymbolValue; - - /* Build table */ - for (s=0; s not compressible */ - if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ - } - - tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) ); - - /* Write table description header */ - { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); - op += nc_err; - } - - /* Compress */ - CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); - if (cSize == 0) return 0; /* not enough space for compressed data */ - op += cSize; - } - - /* check compressibility */ - if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; - - return op-ostart; -} - -typedef struct { - FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - union { - U32 hist_wksp[HIST_WKSP_SIZE_U32]; - BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; - } workspace; -} fseWkspMax_t; - -size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) -{ - fseWkspMax_t scratchBuffer; - DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ - if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); - return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); -} - -size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); -} -#endif - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/compress/hist.c b/lib/compress/hist.c index 073c57e7527..3692bc250ca 100644 --- a/lib/compress/hist.c +++ b/lib/compress/hist.c @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,6 +19,12 @@ #include "../common/error_private.h" /* ERROR */ #include "hist.h" +#if defined(ZSTD_ARCH_ARM_SVE2) +#define HIST_FAST_THRESHOLD 500 +#else +#define HIST_FAST_THRESHOLD 1500 +#endif + /* --- Error management --- */ unsigned HIST_isError(size_t code) { return ERR_isError(code); } @@ -26,6 +32,16 @@ unsigned HIST_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * Histogram functions ****************************************************************/ +void HIST_add(unsigned* count, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + + while (ip *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall); + *maxSymbolValuePtr = maxSymbolValue; + + /* If the buffer size is not divisible by 16, the last elements of the final + * vector register read will be zeros, and these elements must be subtracted + * from the histogram. + */ + hh0 = svsub_n_u16_m(svptrue_pat_b32(SV_VL1), hh0, -sourceSize & 15); + + svst1_u32(svwhilelt_b32_u64( 0, maxCount), count + 0, svshllb_n_u32(hh0, 0)); + svst1_u32(svwhilelt_b32_u64( 4, maxCount), count + 4, svshllt_n_u32(hh0, 0)); + svst1_u32(svwhilelt_b32_u64( 8, maxCount), count + 8, svshllb_n_u32(hh1, 0)); + svst1_u32(svwhilelt_b32_u64(12, maxCount), count + 12, svshllt_n_u32(hh1, 0)); + svst1_u32(svwhilelt_b32_u64(16, maxCount), count + 16, svshllb_n_u32(hh2, 0)); + svst1_u32(svwhilelt_b32_u64(20, maxCount), count + 20, svshllt_n_u32(hh2, 0)); + svst1_u32(svwhilelt_b32_u64(24, maxCount), count + 24, svshllb_n_u32(hh3, 0)); + svst1_u32(svwhilelt_b32_u64(28, maxCount), count + 28, svshllt_n_u32(hh3, 0)); + svst1_u32(svwhilelt_b32_u64(32, maxCount), count + 32, svshllb_n_u32(hh4, 0)); + svst1_u32(svwhilelt_b32_u64(36, maxCount), count + 36, svshllt_n_u32(hh4, 0)); + svst1_u32(svwhilelt_b32_u64(40, maxCount), count + 40, svshllb_n_u32(hh5, 0)); + svst1_u32(svwhilelt_b32_u64(44, maxCount), count + 44, svshllt_n_u32(hh5, 0)); + svst1_u32(svwhilelt_b32_u64(48, maxCount), count + 48, svshllb_n_u32(hh6, 0)); + svst1_u32(svwhilelt_b32_u64(52, maxCount), count + 52, svshllt_n_u32(hh6, 0)); + svst1_u32(svwhilelt_b32_u64(56, maxCount), count + 56, svshllb_n_u32(hh7, 0)); + svst1_u32(svwhilelt_b32_u64(60, maxCount), count + 60, svshllt_n_u32(hh7, 0)); + + hh0 = svmax_u16_x(vl128, hh0, hh1); + hh2 = svmax_u16_x(vl128, hh2, hh3); + hh4 = svmax_u16_x(vl128, hh4, hh5); + hh6 = svmax_u16_x(vl128, hh6, hh7); + hh0 = svmax_u16_x(vl128, hh0, hh2); + hh4 = svmax_u16_x(vl128, hh4, hh6); + max = svmax_u16_x(vl128, hh0, hh4); + + maxSymbolValue = min_size(maxSymbolValue, maxCount); + if (maxSymbolValue >= 64) { + const svuint8_t c4 = svadd_n_u8_x(vl128, c0, 64); + const svuint8_t c5 = svadd_n_u8_x(vl128, c1, 64); + const svuint8_t c6 = svadd_n_u8_x(vl128, c2, 64); + const svuint8_t c7 = svadd_n_u8_x(vl128, c3, 64); + const svuint8_t c8 = svadd_n_u8_x(vl128, c0, 128); + const svuint8_t c9 = svadd_n_u8_x(vl128, c1, 128); + + max = HIST_count_6_sve2(ip, sourceSize, count + 64, c4, c5, c6, c7, + c8, c9, max, maxCount - 64); + + if (maxSymbolValue >= 160) { + const svuint8_t ca = svadd_n_u8_x(vl128, c2, 128); + const svuint8_t cb = svadd_n_u8_x(vl128, c3, 128); + const svuint8_t cc = svadd_n_u8_x(vl128, c4, 128); + const svuint8_t cd = svadd_n_u8_x(vl128, c5, 128); + const svuint8_t ce = svadd_n_u8_x(vl128, c6, 128); + const svuint8_t cf = svadd_n_u8_x(vl128, c7, 128); + + max = HIST_count_6_sve2(ip, sourceSize, count + 160, ca, cb, cc, + cd, ce, cf, max, maxCount - 160); + } else if (maxCount > 160) { + ZSTD_memset(count + 160, 0, (maxCount - 160) * sizeof(*count)); + } + } else if (maxCount > 64) { + ZSTD_memset(count + 64, 0, (maxCount - 64) * sizeof(*count)); + } + + return svmaxv_u16(vl128, max); + } +} +#endif + /* HIST_count_parallel_wksp() : * store histogram into 4 intermediate tables, recombined at the end. * this design makes better use of OoO cpus, @@ -63,8 +317,8 @@ typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. * @return : largest histogram frequency, * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ -static size_t HIST_count_parallel_wksp( - unsigned* count, unsigned* maxSymbolValuePtr, +static UNUSED_ATTR +size_t HIST_count_parallel_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, HIST_checkInput_e check, U32* const workSpace) @@ -141,11 +395,17 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { - if (sourceSize < 1500) /* heuristic threshold */ + if (sourceSize < HIST_FAST_THRESHOLD) /* heuristic threshold */ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); +#if defined(ZSTD_ARCH_ARM_SVE2) + (void)workSpace; + (void)workSpaceSize; + return HIST_count_sve2(count, maxSymbolValuePtr, source, sourceSize, trustInput); +#else if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); +#endif } /* HIST_count_wksp() : @@ -155,10 +415,15 @@ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { +#if defined(ZSTD_ARCH_ARM_SVE2) + if (*maxSymbolValuePtr < 255) + return HIST_count_sve2(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue); +#else if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); if (*maxSymbolValuePtr < 255) return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); +#endif *maxSymbolValuePtr = 255; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); } diff --git a/lib/compress/hist.h b/lib/compress/hist.h index 228ed48a71d..e526e9532a7 100644 --- a/lib/compress/hist.h +++ b/lib/compress/hist.h @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -35,7 +35,11 @@ unsigned HIST_isError(size_t code); /**< tells if a return value is an error co /* --- advanced histogram functions --- */ +#if defined(__ARM_FEATURE_SVE2) +#define HIST_WKSP_SIZE_U32 0 +#else #define HIST_WKSP_SIZE_U32 1024 +#endif #define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) /** HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. @@ -73,3 +77,10 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, */ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/*! HIST_add() : + * Lowest level: just add nb of occurrences of characters from @src into @count. + * @count is not reset. @count array is presumed large enough (i.e. 1 KB). + @ This function does not need any additional stack memory. + */ +void HIST_add(unsigned* count, const void* src, size_t srcSize); diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c index 5d90c162e73..6f2194e7934 100644 --- a/lib/compress/huf_compress.c +++ b/lib/compress/huf_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Huffman encoder, part of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -29,7 +29,6 @@ #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "../common/fse.h" /* header compression */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" #include "../common/bits.h" /* ZSTD_highbit32 */ @@ -221,6 +220,25 @@ static void HUF_setValue(HUF_CElt* elt, size_t value) } } +HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable) +{ + HUF_CTableHeader header; + ZSTD_memcpy(&header, ctable, sizeof(header)); + return header; +} + +static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue) +{ + HUF_CTableHeader header; + HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header)); + ZSTD_memset(&header, 0, sizeof(header)); + assert(tableLog < 256); + header.tableLog = (BYTE)tableLog; + assert(maxSymbolValue < 256); + header.maxSymbolValue = (BYTE)maxSymbolValue; + ZSTD_memcpy(ctable, &header, sizeof(header)); +} + typedef struct { HUF_CompressWeightsWksp wksp; BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ @@ -236,6 +254,11 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, U32 n; HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); + HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); + + assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); + assert(HUF_readCTableHeader(CTable).tableLog == huffLog); + /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); @@ -265,16 +288,6 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, return ((maxSymbolValue+1)/2) + 1; } -/*! HUF_writeCTable() : - `CTable` : Huffman tree to save, using huf representation. - @return : size of saved CTable */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) -{ - HUF_WriteCTableWksp wksp; - return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp)); -} - size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) { @@ -292,7 +305,9 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); - CTable[0] = tableLog; + *maxSymbolValuePtr = nbSymbols - 1; + + HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); /* Prepare base value per rank */ { U32 n, nextRankStart = 0; @@ -324,7 +339,6 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void { U32 n; for (n=0; n HUF_readCTableHeader(CTable).maxSymbolValue) + return 0; return (U32)HUF_getNbBits(ct[symbolValue]); } @@ -386,7 +402,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits /* renorm totalCost from 2^largestBits to 2^targetNbBits * note : totalCost is necessarily a multiple of baseCost */ - assert((totalCost & (baseCost - 1)) == 0); + assert(((U32)totalCost & (baseCost - 1)) == 0); totalCost >>= (largestBits - targetNbBits); assert(totalCost > 0); @@ -409,6 +425,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits * gain back half the rank. */ U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; + assert(nBitsToDecrease <= HUF_TABLELOG_MAX+1); for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; @@ -486,7 +503,7 @@ typedef struct { U16 curr; } rankPos; -typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; +typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; /* Number of buckets available for HUF_sort() */ #define RANK_POSITION_TABLE_SIZE 192 @@ -505,8 +522,8 @@ typedef struct { * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. */ #define RANK_POSITION_MAX_COUNT_LOG 32 -#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */ -#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */ +#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) +#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) /* Return the appropriate bucket index for a given count. See definition of * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. @@ -732,7 +749,8 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ for (n=0; n 11) @@ -1112,9 +1139,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); @@ -1125,28 +1152,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - (void)bmi2; + (void)flags; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) -{ - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, int bmi2) + const HUF_CElt* CTable, int flags) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; @@ -1160,7 +1182,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, op += 6; /* jumpTable */ assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; @@ -1168,7 +1190,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; @@ -1176,7 +1198,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; @@ -1185,7 +1207,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); assert(ip <= iend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; op += cSize; } @@ -1193,14 +1215,9 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, return (size_t)(op-ostart); } -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) -{ - return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -1208,11 +1225,11 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) + HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) { size_t const cSize = (nbStreams==HUF_singleStream) ? - HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : - HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); + HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : + HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; @@ -1222,15 +1239,6 @@ static size_t HUF_compressCTable_internal( return (size_t)(op-ostart); } - -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) -{ - unsigned tableLog = FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); - assert(tableLog <= HUF_TABLELOG_MAX); - - return tableLog; -} - typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; @@ -1244,6 +1252,81 @@ typedef struct { #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) +{ + unsigned cardinality = 0; + unsigned i; + + for (i = 0; i < maxSymbolValue + 1; i++) { + if (count[i] != 0) cardinality += 1; + } + + return cardinality; +} + +unsigned HUF_minTableLog(unsigned symbolCardinality) +{ + U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; +} + +unsigned HUF_optimalTableLog( + unsigned maxTableLog, + size_t srcSize, + unsigned maxSymbolValue, + void* workSpace, size_t wkspSize, + HUF_CElt* table, + const unsigned* count, + int flags) +{ + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); + + if (!(flags & HUF_flags_optimalDepth)) { + /* cheap evaluation, based on FSE */ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + } + + { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); + size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); + size_t hSize, newSize; + const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); + const unsigned minTableLog = HUF_minTableLog(symbolCardinality); + size_t optSize = ((size_t) ~0) - 1; + unsigned optLog = maxTableLog, optLogGuess; + + DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); + + /* Search until size increases */ + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { + DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); + + { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + if (ERR_isError(maxBits)) continue; + + if (maxBits < optLogGuess && optLogGuess > minTableLog) break; + + hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); + } + + if (ERR_isError(hSize)) continue; + + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + + if (newSize > optSize + 1) { + break; + } + + if (newSize < optSize) { + optSize = newSize; + optLog = optLogGuess; + } + } + assert(optLog <= HUF_TABLELOG_MAX); + return optLog; + } +} + /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ @@ -1253,8 +1336,7 @@ HUF_compress_internal (void* dst, size_t dstSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2, unsigned suspectUncompressible) + HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); BYTE* const ostart = (BYTE*)dst; @@ -1275,15 +1357,15 @@ HUF_compress_internal (void* dst, size_t dstSize, if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ - if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* If uncompressible data is suspected, do a smaller sampling first */ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); - if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { size_t largestTotal = 0; DEBUGLOG(5, "input suspected incompressible : sampling to check"); { unsigned maxSymbolValueBegin = maxSymbolValue; @@ -1311,14 +1393,14 @@ HUF_compress_internal (void* dst, size_t dstSize, *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ - if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); @@ -1326,12 +1408,6 @@ HUF_compress_internal (void* dst, size_t dstSize, huffLog = (U32)maxBits; DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); } - /* Zero unused symbols in CTable, so we can check it for validity */ - { - size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue); - size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt); - ZSTD_memset(table->CTable + ctableSize, 0, unusedSize); - } /* Write table description header */ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, @@ -1343,7 +1419,7 @@ HUF_compress_internal (void* dst, size_t dstSize, if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } } /* Use the new huffman table */ @@ -1355,96 +1431,35 @@ HUF_compress_internal (void* dst, size_t dstSize, } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, table->CTable, bmi2); -} - - -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + nbStreams, table->CTable, flags); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, - int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2, suspectUncompressible); -} - -/* HUF_compress4X_repeat(): - * compress input using 4 streams. - * provide workspace to generate compression tables */ -size_t HUF_compress4X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - DEBUGLOG(5, "HUF_compress4X_wksp (srcSize = %zu)", srcSize); - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + repeat, flags); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * consider skipping quickly - * re-use an existing huffman compression table */ + * reuse an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); -} - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -/** HUF_buildCTable() : - * @return : maxNbBits - * Note : count is used before tree is written, so they can safely overlap - */ -size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) -{ - HUF_buildCTable_wksp_tables workspace; - return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace)); + hufTable, repeat, flags); } - -size_t HUF_compress1X (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress2 (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); -} -#endif diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index 8a0c2f19165..c06f2e1bb98 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,12 +11,13 @@ /*-************************************* * Dependencies ***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ #include "../common/mem.h" +#include "../common/error_private.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" @@ -27,7 +28,7 @@ #include "zstd_opt.h" #include "zstd_ldm.h" #include "zstd_compress_superblock.h" -#include "../common/bits.h" /* ZSTD_highbit32 */ +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */ /* *************************************************************** * Tuning parameters @@ -48,25 +49,36 @@ * in log format, aka 17 => 1 << 17 == 128Ki positions. * This structure is only used in zstd_opt. * Since allocation is centralized for all strategies, it has to be known here. - * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, + * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3, * so that zstd_opt.c doesn't need to know about this constant. */ #ifndef ZSTD_HASHLOG3_MAX # define ZSTD_HASHLOG3_MAX 17 #endif + +/*-************************************* +* Forward declarations +***************************************/ +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, + size_t nbSequences); + + /*-************************************* * Helper functions ***************************************/ /* ZSTD_compressBound() - * Note that the result from this function is only compatible with the "normal" - * full-block strategy. - * When there are a lot of small blocks due to frequent flush in streaming mode - * the overhead of headers can make the compressed data to be larger than the - * return value of ZSTD_compressBound(). + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { - return ZSTD_COMPRESSBOUND(srcSize); + size_t const r = ZSTD_COMPRESSBOUND(srcSize); + if (r==0) return ERROR(srcSize_wrong); + return r; } @@ -79,12 +91,12 @@ struct ZSTD_CDict_s { ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ ZSTD_cwksp workspace; - ZSTD_matchState_t matchState; + ZSTD_MatchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ - ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use + ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use * row-based matchfinder. Unless the cdict is reloaded, we will use * the same greedy/lazy matchfinder at compression time. */ @@ -134,11 +146,12 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; - /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ - if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; + /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */ + if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); - cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); + cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); + cctx->tmpWkspSize = TMP_WORKSPACE_SIZE; cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } @@ -175,6 +188,7 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { + DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx); if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); @@ -213,7 +227,7 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) } /* private API call, for dictBuilder only */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } +const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } /* Returns true if the strategy supports using a row based matchfinder */ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { @@ -223,32 +237,31 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder * for this compression. */ -static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { +static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) { assert(mode != ZSTD_ps_auto); return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); } /* Returns row matchfinder usage given an initial mode and cParams */ -static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode, const ZSTD_compressionParameters* const cParams) { -#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) - int const kHasSIMD128 = 1; +#ifdef ZSTD_LINUX_KERNEL + /* The Linux Kernel does not use SIMD, and 128KB is a very common size, e.g. in BtrFS. + * The row match finder is slower for this size without SIMD, so disable it. + */ + const unsigned kWindowLogLowerBound = 17; #else - int const kHasSIMD128 = 0; + const unsigned kWindowLogLowerBound = 14; #endif if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ mode = ZSTD_ps_disable; if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; - if (kHasSIMD128) { - if (cParams->windowLog > 14) mode = ZSTD_ps_enable; - } else { - if (cParams->windowLog > 17) mode = ZSTD_ps_enable; - } + if (cParams->windowLog > kWindowLogLowerBound) mode = ZSTD_ps_enable; return mode; } /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ -static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode, const ZSTD_compressionParameters* const cParams) { if (mode != ZSTD_ps_auto) return mode; return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; @@ -256,7 +269,7 @@ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, - const ZSTD_paramSwitch_e useRowMatchFinder, + const ZSTD_ParamSwitch_e useRowMatchFinder, const U32 forDDSDict) { assert(useRowMatchFinder != ZSTD_ps_auto); /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. @@ -265,16 +278,38 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); } -/* Returns 1 if compression parameters are such that we should +/* Returns ZSTD_ps_enable if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns 0 otherwise. + * Returns ZSTD_ps_disable otherwise. */ -static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, +static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode, const ZSTD_compressionParameters* const cParams) { if (mode != ZSTD_ps_auto) return mode; return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } +static int ZSTD_resolveExternalSequenceValidation(int mode) { + return mode; +} + +/* Resolves maxBlockSize to the default if no value is present. */ +static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { + if (maxBlockSize == 0) { + return ZSTD_BLOCKSIZE_MAX; + } else { + return maxBlockSize; + } +} + +static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) { + if (value != ZSTD_ps_auto) return value; + if (cLevel < 10) { + return ZSTD_ps_disable; + } else { + return ZSTD_ps_enable; + } +} + /* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { @@ -296,8 +331,12 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); assert(cctxParams.ldmParams.hashRateLog < 32); } - cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); + cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel); assert(!ZSTD_checkCParams(cParams)); return cctxParams; } @@ -343,10 +382,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) #define ZSTD_NO_CLEVEL 0 /** - * Initializes the cctxParams from params and compressionLevel. + * Initializes `cctxParams` from `params` and `compressionLevel`. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ -static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) +static void +ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, + const ZSTD_parameters* params, + int compressionLevel) { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); @@ -357,10 +399,13 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par */ cctxParams->compressionLevel = compressionLevel; cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); - cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); + cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams); cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", - cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); + cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm); } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) @@ -373,7 +418,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete /** * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param param Validated zstd parameters. + * @param params Validated zstd parameters. */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) @@ -482,8 +527,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_enableLongDistanceMatching: - bounds.lowerBound = 0; - bounds.upperBound = 1; + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_ldmHashLog: @@ -561,11 +606,16 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = 1; return bounds; - case ZSTD_c_useBlockSplitter: + case ZSTD_c_splitAfterSequences: bounds.lowerBound = (int)ZSTD_ps_auto; bounds.upperBound = (int)ZSTD_ps_disable; return bounds; + case ZSTD_c_blockSplitterLevel: + bounds.lowerBound = 0; + bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX; + return bounds; + case ZSTD_c_useRowMatchFinder: bounds.lowerBound = (int)ZSTD_ps_auto; bounds.upperBound = (int)ZSTD_ps_disable; @@ -581,6 +631,21 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = (int)ZSTD_ps_disable; return bounds; + case ZSTD_c_enableSeqProducerFallback: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_repcodeResolution: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + default: bounds.error = ERROR(parameter_unsupported); return bounds; @@ -599,10 +664,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) return 0; } -#define BOUNDCHECK(cParam, val) { \ - RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ - parameter_outOfBound, "Param out of bounds"); \ -} +#define BOUNDCHECK(cParam, val) \ + do { \ + RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ + parameter_outOfBound, "Param out of bounds"); \ + } while (0) static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) @@ -616,6 +682,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: + case ZSTD_c_blockSplitterLevel: return 1; case ZSTD_c_format: @@ -642,10 +709,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_useBlockSplitter: + case ZSTD_c_splitAfterSequences: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_repcodeResolution: default: return 0; } @@ -658,7 +728,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { - RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); + RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); } } switch(param) @@ -698,10 +768,14 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_useBlockSplitter: + case ZSTD_c_splitAfterSequences: + case ZSTD_c_blockSplitterLevel: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_repcodeResolution: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); @@ -793,14 +867,14 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - BOUNDCHECK(ZSTD_c_forceAttachDict, pref); + BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { - const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); + const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value; + BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } @@ -812,7 +886,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); CCtxParams->nbWorkers = value; - return CCtxParams->nbWorkers; + return (size_t)(CCtxParams->nbWorkers); #endif case ZSTD_c_jobSize : @@ -825,7 +899,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, value = ZSTDMT_JOBSIZE_MIN; FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); assert(value >= 0); - CCtxParams->jobSize = value; + CCtxParams->jobSize = (size_t)value; return CCtxParams->jobSize; #endif @@ -836,7 +910,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->overlapLog = value; - return CCtxParams->overlapLog; + return (size_t)CCtxParams->overlapLog; #endif case ZSTD_c_rsyncable : @@ -846,7 +920,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->rsyncable = value; - return CCtxParams->rsyncable; + return (size_t)CCtxParams->rsyncable; #endif case ZSTD_c_enableDedicatedDictSearch : @@ -854,7 +928,8 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return (size_t)CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : - CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; + BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); + CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : @@ -882,8 +957,10 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : - if (value!=0) /* 0 ==> default */ + if (value!=0) { /* 0 ==> default */ + value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN); BOUNDCHECK(ZSTD_c_targetCBlockSize, value); + } CCtxParams->targetCBlockSize = (U32)value; return CCtxParams->targetCBlockSize; @@ -905,34 +982,56 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, case ZSTD_c_blockDelimiters: BOUNDCHECK(ZSTD_c_blockDelimiters, value); - CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; + CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value; return CCtxParams->blockDelimiters; case ZSTD_c_validateSequences: BOUNDCHECK(ZSTD_c_validateSequences, value); CCtxParams->validateSequences = value; - return CCtxParams->validateSequences; + return (size_t)CCtxParams->validateSequences; - case ZSTD_c_useBlockSplitter: - BOUNDCHECK(ZSTD_c_useBlockSplitter, value); - CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; - return CCtxParams->useBlockSplitter; + case ZSTD_c_splitAfterSequences: + BOUNDCHECK(ZSTD_c_splitAfterSequences, value); + CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value; + return CCtxParams->postBlockSplitter; + + case ZSTD_c_blockSplitterLevel: + BOUNDCHECK(ZSTD_c_blockSplitterLevel, value); + CCtxParams->preBlockSplitter_level = value; + return (size_t)CCtxParams->preBlockSplitter_level; case ZSTD_c_useRowMatchFinder: BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); - CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; + CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value; return CCtxParams->useRowMatchFinder; case ZSTD_c_deterministicRefPrefix: BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); CCtxParams->deterministicRefPrefix = !!value; - return CCtxParams->deterministicRefPrefix; + return (size_t)CCtxParams->deterministicRefPrefix; case ZSTD_c_prefetchCDictTables: BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); - CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; + CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value; return CCtxParams->prefetchCDictTables; + case ZSTD_c_enableSeqProducerFallback: + BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); + CCtxParams->enableMatchFinderFallback = value; + return (size_t)CCtxParams->enableMatchFinderFallback; + + case ZSTD_c_maxBlockSize: + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_maxBlockSize, value); + assert(value>=0); + CCtxParams->maxBlockSize = (size_t)value; + return CCtxParams->maxBlockSize; + + case ZSTD_c_repcodeResolution: + BOUNDCHECK(ZSTD_c_repcodeResolution, value); + CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value; + return CCtxParams->searchForExternalRepcodes; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } @@ -948,7 +1047,7 @@ size_t ZSTD_CCtxParams_getParameter( switch(param) { case ZSTD_c_format : - *value = CCtxParams->format; + *value = (int)CCtxParams->format; break; case ZSTD_c_compressionLevel : *value = CCtxParams->compressionLevel; @@ -963,16 +1062,16 @@ size_t ZSTD_CCtxParams_getParameter( *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : - *value = CCtxParams->cParams.searchLog; + *value = (int)CCtxParams->cParams.searchLog; break; case ZSTD_c_minMatch : - *value = CCtxParams->cParams.minMatch; + *value = (int)CCtxParams->cParams.minMatch; break; case ZSTD_c_targetLength : - *value = CCtxParams->cParams.targetLength; + *value = (int)CCtxParams->cParams.targetLength; break; case ZSTD_c_strategy : - *value = (unsigned)CCtxParams->cParams.strategy; + *value = (int)CCtxParams->cParams.strategy; break; case ZSTD_c_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; @@ -987,10 +1086,10 @@ size_t ZSTD_CCtxParams_getParameter( *value = CCtxParams->forceWindow; break; case ZSTD_c_forceAttachDict : - *value = CCtxParams->attachDictPref; + *value = (int)CCtxParams->attachDictPref; break; case ZSTD_c_literalCompressionMode : - *value = CCtxParams->literalCompressionMode; + *value = (int)CCtxParams->literalCompressionMode; break; case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD @@ -1024,19 +1123,19 @@ size_t ZSTD_CCtxParams_getParameter( *value = CCtxParams->enableDedicatedDictSearch; break; case ZSTD_c_enableLongDistanceMatching : - *value = CCtxParams->ldmParams.enableLdm; + *value = (int)CCtxParams->ldmParams.enableLdm; break; case ZSTD_c_ldmHashLog : - *value = CCtxParams->ldmParams.hashLog; + *value = (int)CCtxParams->ldmParams.hashLog; break; case ZSTD_c_ldmMinMatch : - *value = CCtxParams->ldmParams.minMatchLength; + *value = (int)CCtxParams->ldmParams.minMatchLength; break; case ZSTD_c_ldmBucketSizeLog : - *value = CCtxParams->ldmParams.bucketSizeLog; + *value = (int)CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_c_ldmHashRateLog : - *value = CCtxParams->ldmParams.hashRateLog; + *value = (int)CCtxParams->ldmParams.hashRateLog; break; case ZSTD_c_targetCBlockSize : *value = (int)CCtxParams->targetCBlockSize; @@ -1056,8 +1155,11 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_validateSequences : *value = (int)CCtxParams->validateSequences; break; - case ZSTD_c_useBlockSplitter : - *value = (int)CCtxParams->useBlockSplitter; + case ZSTD_c_splitAfterSequences : + *value = (int)CCtxParams->postBlockSplitter; + break; + case ZSTD_c_blockSplitterLevel : + *value = CCtxParams->preBlockSplitter_level; break; case ZSTD_c_useRowMatchFinder : *value = (int)CCtxParams->useRowMatchFinder; @@ -1068,6 +1170,15 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_prefetchCDictTables: *value = (int)CCtxParams->prefetchCDictTables; break; + case ZSTD_c_enableSeqProducerFallback: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_c_maxBlockSize: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_c_repcodeResolution: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; @@ -1094,9 +1205,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( return 0; } +size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) +{ + ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setCParams"); + /* only update if all parameters are valid */ + FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), ""); + return 0; +} + +size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) +{ + ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setFParams"); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); + return 0; +} + +size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParams"); + /* First check cParams, because we want to update all or none. */ + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + /* Next set fParams, because this could fail if the cctx isn't in init stage. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); + /* Finally set cParams, which should succeed. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); + return 0; +} + size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { - DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; @@ -1112,9 +1261,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams); /** - * Initializes the local dict using the requested parameters. - * NOTE: This does not use the pledged src size, because it may be used for more - * than one compression. + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { @@ -1127,8 +1276,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) return 0; } if (dl->cdict != NULL) { - assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ + assert(cctx->cdict == dl->cdict); return 0; } assert(dl->dictSize > 0); @@ -1148,26 +1297,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) } size_t ZSTD_CCtx_loadDictionary_advanced( - ZSTD_CCtx* cctx, const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't load a dictionary when ctx is not in init stage."); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); - ZSTD_clearAllDicts(cctx); /* in case one already exists */ - if (dict == NULL || dictSize == 0) /* no dictionary mode */ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't load a dictionary when cctx is not in init stage."); + ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */ + if (dict == NULL || dictSize == 0) /* no dictionary */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { + /* copy dictionary content inside CCtx to own its lifetime */ void* dictBuffer; RETURN_ERROR_IF(cctx->staticSize, memory_allocation, - "no malloc for static CCtx"); + "static CCtx can't allocate for an internal copy of dictionary"); dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); - RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); + RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, + "allocation failed for dictionary content"); ZSTD_memcpy(dictBuffer, dict, dictSize); - cctx->localDict.dictBuffer = dictBuffer; - cctx->localDict.dict = dictBuffer; + cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */ + cctx->localDict.dict = dictBuffer; /* read-only reference */ } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; @@ -1237,7 +1390,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't reset parameters only when not in init stage."); + "Reset parameters is only possible during init stage."); ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } @@ -1256,7 +1409,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); - BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); + BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy); return 0; } @@ -1266,11 +1419,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { -# define CLAMP_TYPE(cParam, val, type) { \ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ - if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ - } +# define CLAMP_TYPE(cParam, val, type) \ + do { \ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ + if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ + } while (0) # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); @@ -1328,19 +1482,62 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) * optimize `cPar` for a specified input (`srcSize` and `dictSize`). * mostly downsize to reduce memory consumption and initialization latency. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. - * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, - ZSTD_cParamMode_e mode) + ZSTD_CParamMode_e mode, + ZSTD_ParamSwitch_e useRowMatchFinder) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); + /* Cascade the selected strategy down to the next-highest one built into + * this binary. */ +#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btultra2) { + cPar.strategy = ZSTD_btultra; + } + if (cPar.strategy == ZSTD_btultra) { + cPar.strategy = ZSTD_btopt; + } +#endif +#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btopt) { + cPar.strategy = ZSTD_btlazy2; + } +#endif +#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_btlazy2) { + cPar.strategy = ZSTD_lazy2; + } +#endif +#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_lazy2) { + cPar.strategy = ZSTD_lazy; + } +#endif +#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_lazy) { + cPar.strategy = ZSTD_greedy; + } +#endif +#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_greedy) { + cPar.strategy = ZSTD_dfast; + } +#endif +#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + if (cPar.strategy == ZSTD_dfast) { + cPar.strategy = ZSTD_fast; + cPar.targetLength = 0; + } +#endif + switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: @@ -1369,8 +1566,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, } /* resize windowLog if input is small enough, to use less memory */ - if ( (srcSize < maxWindowResize) - && (dictSize < maxWindowResize) ) { + if ( (srcSize <= maxWindowResize) + && (dictSize <= maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : @@ -1388,11 +1585,40 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ + /* We can't use more than 32 bits of hash in total, so that means that we require: + * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 + */ if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; if (cPar.hashLog > maxShortCacheHashLog) { cPar.hashLog = maxShortCacheHashLog; } + if (cPar.chainLog > maxShortCacheHashLog) { + cPar.chainLog = maxShortCacheHashLog; + } + } + + + /* At this point, we aren't 100% sure if we are using the row match finder. + * Unless it is explicitly disabled, conservatively assume that it is enabled. + * In this case it will only be disabled for small sources, so shrinking the + * hash log a little bit shouldn't result in any ratio loss. + */ + if (useRowMatchFinder == ZSTD_ps_auto) + useRowMatchFinder = ZSTD_ps_enable; + + /* We can't hash more than 32-bits in total. So that means that we require: + * (hashLog - rowLog + 8) <= 32 + */ + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); + U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; + U32 const maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) { + cPar.hashLog = maxHashLog; + } } return cPar; @@ -1405,11 +1631,11 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar, { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); } -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); static void ZSTD_overrideCParams( ZSTD_compressionParameters* cParams, @@ -1425,24 +1651,25 @@ static void ZSTD_overrideCParams( } ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) { ZSTD_compressionParameters cParams; if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { - srcSizeHint = CCtxParams->srcSizeHint; + assert(CCtxParams->srcSizeHint>=0); + srcSizeHint = (U64)CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, - const ZSTD_paramSwitch_e useRowMatchFinder, - const U32 enableDedicatedDictSearch, + const ZSTD_ParamSwitch_e useRowMatchFinder, + const int enableDedicatedDictSearch, const U32 forCCtx) { /* chain table size should be 0 for fast or row-hash strategies */ @@ -1458,14 +1685,14 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, + hSize * sizeof(U32) + h3Size * sizeof(U32); size_t const optPotentialSpace = - ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((1<strategy, useRowMatchFinder) - ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16)) + ? ZSTD_cwksp_aligned64_alloc_size(hSize) : 0; size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) ? optPotentialSpace @@ -1481,30 +1708,38 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; } +/* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ +static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { + U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; + return blockSize / divider; +} + static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, const int isStatic, - const ZSTD_paramSwitch_e useRowMatchFinder, + const ZSTD_ParamSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, - const U64 pledgedSrcSize) + const U64 pledgedSrcSize, + int useSequenceProducer, + size_t maxBlockSize) { size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (cParams->minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) - + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) + + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); - size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); + size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE); size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? - ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; + ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) @@ -1512,15 +1747,21 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + size_t const externalSeqSpace = useSequenceProducer + ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) + : 0; + size_t const neededSpace = cctxSpace + - entropySpace + + tmpWorkSpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + - bufferSpace; + bufferSpace + + externalSeqSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; @@ -1530,15 +1771,19 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, + ldmParams_t ldmParams = params->ldmParams; + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &cParams); RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); + if (ldmParams.enableLdm == ZSTD_ps_enable) { + ZSTD_ldm_adjustParameters(&ldmParams, &cParams); + } /* estimateCCtxSize is for one-shot compression. So no buffers should * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); + &cParams, &ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) @@ -1588,18 +1833,22 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + ldmParams_t ldmParams = params->ldmParams; + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); + if (ldmParams.enableLdm == ZSTD_ps_enable) { + ZSTD_ldm_adjustParameters(&ldmParams, &cParams); + } return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, - ZSTD_CONTENTSIZE_UNKNOWN); + &cParams, &ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, + ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); } } @@ -1705,7 +1954,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ -static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) +static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) { ZSTD_window_clear(&ms->window); @@ -1742,12 +1991,25 @@ typedef enum { ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; +/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ +static U64 ZSTD_bitmix(U64 val, U64 len) { + val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); + val *= 0x9FB21C651E98DF25ULL; + val ^= (val >> 35) + len ; + val *= 0x9FB21C651E98DF25ULL; + return val ^ (val >> 28); +} + +/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ +static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); +} static size_t -ZSTD_reset_matchState(ZSTD_matchState_t* ms, +ZSTD_reset_matchState(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, - const ZSTD_paramSwitch_e useRowMatchFinder, + const ZSTD_ParamSwitch_e useRowMatchFinder, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) @@ -1769,6 +2031,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, } ms->hashLog3 = hashLog3; + ms->lazySkipping = 0; ZSTD_invalidateMatchState(ms); @@ -1790,22 +2053,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp_clean_tables(ws); } - /* opt parser space */ - if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { - DEBUGLOG(4, "reserving optimal parser space"); - ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); - ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); - ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); - ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); - ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); - } - if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { - { /* Row match finder needs an additional table of hashes ("tags") */ - size_t const tagTableSize = hSize*sizeof(U16); - ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize); - if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); + /* Row match finder needs an additional table of hashes ("tags") */ + size_t const tagTableSize = hSize; + /* We want to generate a new salt in case we reset a Cctx, but we always want to use + * 0 when we reset a Cdict */ + if(forWho == ZSTD_resetTarget_CCtx) { + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); + ZSTD_advanceHashSalt(ms); + } else { + /* When we are not salting we want to always memset the memory */ + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); + ZSTD_memset(ms->tagTable, 0, tagTableSize); + ms->hashSalt = 0; } { /* Switch to 32-entry rows if searchLog is 5 (or more) */ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); @@ -1814,6 +2074,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, } } + /* opt parser space */ + if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { + DEBUGLOG(4, "reserving optimal parser space"); + ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned)); + ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned)); + ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned)); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); + } + ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, @@ -1859,7 +2130,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, { ZSTD_cwksp* const ws = &zc->workspace; DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", - (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); + (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zc->isFirstBlock = 1; @@ -1871,8 +2142,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, params = &zc->appliedParams; assert(params->useRowMatchFinder != ZSTD_ps_auto); - assert(params->useBlockSplitter != ZSTD_ps_auto); + assert(params->postBlockSplitter != ZSTD_ps_auto); assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + assert(params->maxBlockSize != 0); if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); @@ -1881,9 +2153,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (params->cParams.minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(params->maxBlockSize, windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params)); size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; @@ -1900,8 +2171,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, - buffInSize, buffOutSize, pledgedSrcSize); - int resizeWorkspace; + buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize); FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); @@ -1910,7 +2180,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, { /* Check if workspace is large enough, alloc a new one if needed */ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); - resizeWorkspace = workspaceTooSmall || workspaceWasteful; + int resizeWorkspace = workspaceTooSmall || workspaceWasteful; DEBUGLOG(4, "Need %zu B workspace", neededSpace); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); @@ -1928,15 +2198,16 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, DEBUGLOG(5, "reserving object space"); /* Statically sized space. - * entropyWorkspace never moves, + * tmpWorkspace never moves, * though prev/next block swap places */ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); - zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); - RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); + zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE); + RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace"); + zc->tmpWkspSize = TMP_WORKSPACE_SIZE; } } ZSTD_cwksp_clear(ws); @@ -1951,7 +2222,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); - zc->blockSize = blockSize; + zc->blockSizeMax = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; @@ -1960,13 +2231,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + ¶ms->cParams, + params->useRowMatchFinder, + crp, + needsIndexReset, + ZSTD_resetTarget_CCtx), ""); + + zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef)); + + /* ldm hash table */ + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { + /* TODO: avoid memset? */ + size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t)); + ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq)); + zc->maxNbLdmSequences = maxNbLdmSeq; + + ZSTD_window_init(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; + } + + /* reserve space for block-level external sequences */ + if (ZSTD_hasExtSeqProd(params)) { + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->extSeqBufCapacity = maxNbExternalSeq; + zc->extSeqBuf = + (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); + } + + /* buffers */ + /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; - /* buffers */ zc->bufferedPolicy = zbuff; zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); @@ -1989,32 +2293,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); - - FORWARD_IF_ERROR(ZSTD_reset_matchState( - &zc->blockState.matchState, - ws, - ¶ms->cParams, - params->useRowMatchFinder, - crp, - needsIndexReset, - ZSTD_resetTarget_CCtx), ""); - - /* ldm hash table */ - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* TODO: avoid memset? */ - size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; - zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); - ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); - zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); - zc->maxNbLdmSequences = maxNbLdmSeq; - - ZSTD_window_init(&zc->ldmState.window); - zc->ldmState.loadedDictEnd = 0; - } DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace)); zc->initialized = 1; @@ -2086,7 +2367,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, - cdict->dictContentSize, ZSTD_cpm_attachDict); + cdict->dictContentSize, ZSTD_cpm_attachDict, + params.useRowMatchFinder); params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, @@ -2188,15 +2470,17 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, } /* copy tag table */ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { - size_t const tagTableSize = hSize*sizeof(U16); + size_t const tagTableSize = hSize; ZSTD_memcpy(cctx->blockState.matchState.tagTable, - cdict->matchState.tagTable, - tagTableSize); + cdict->matchState.tagTable, + tagTableSize); + cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; } } /* Zero the hashTable3, since the cdict never fills it */ - { int const h3log = cctx->blockState.matchState.hashLog3; + assert(cctx->blockState.matchState.hashLog3 <= 31); + { U32 const h3log = cctx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; assert(cdict->matchState.hashLog3 == 0); ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); @@ -2205,8 +2489,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, ZSTD_cwksp_mark_tables_clean(&cctx->workspace); /* copy dictionary offsets */ - { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; - ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; + { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState; + ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; @@ -2264,12 +2548,13 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); - assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto); assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; - params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; + params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; + params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff); @@ -2289,7 +2574,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) : 0; size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; - int const h3log = srcCCtx->blockState.matchState.hashLog3; + U32 const h3log = srcCCtx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, @@ -2307,8 +2592,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, /* copy dictionary offsets */ { - const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; - ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; + const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; @@ -2357,10 +2642,10 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ - assert(size < (1U<<31)); /* can be casted to int */ + assert(size < (1U<<31)); /* can be cast to int */ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the table re-use logic is sound, and that we don't + /* To validate that the table reuse logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table * space every time we mark it dirty. * @@ -2402,7 +2687,7 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ -static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) +static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) { { U32 const hSize = (U32)1 << params->cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); @@ -2429,26 +2714,32 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par /* See doc/zstd_compression_format.md for detailed format description */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr) { - const seqDef* const sequences = seqStorePtr->sequencesStart; + const SeqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; + int longOffsets = 0; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u= STREAM_ACCUMULATOR_MIN)); + if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) + longOffsets = 1; } if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; + return longOffsets; } /* ZSTD_useTargetCBlockSize(): @@ -2468,9 +2759,9 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) * Returns 1 if true, 0 otherwise. */ static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) { - DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); - assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); - return (cctxParams->useBlockSplitter == ZSTD_ps_enable); + DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter); + assert(cctxParams->postBlockSplitter != ZSTD_ps_auto); + return (cctxParams->postBlockSplitter == ZSTD_ps_enable); } /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types @@ -2482,6 +2773,7 @@ typedef struct { U32 MLtype; size_t size; size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + int longOffsets; } ZSTD_symbolEncodingTypeStats_t; /* ZSTD_buildSequencesStatistics(): @@ -2492,11 +2784,13 @@ typedef struct { * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, - const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, - BYTE* dst, const BYTE* const dstEnd, - ZSTD_strategy strategy, unsigned* countWorkspace, - void* entropyWorkspace, size_t entropyWkspSize) { +ZSTD_buildSequencesStatistics( + const SeqStore_t* seqStorePtr, size_t nbSeq, + const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, + BYTE* dst, const BYTE* const dstEnd, + ZSTD_strategy strategy, unsigned* countWorkspace, + void* entropyWorkspace, size_t entropyWkspSize) +{ BYTE* const ostart = dst; const BYTE* const oend = dstEnd; BYTE* op = ostart; @@ -2510,7 +2804,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, stats.lastCountSize = 0; /* convert length/distances into codes */ - ZSTD_seqToCodes(seqStorePtr); + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); assert(op <= oend); assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ /* build CTable for Literal Lengths */ @@ -2527,7 +2821,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), - CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype, + CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype, countWorkspace, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->litlengthCTable, @@ -2548,7 +2842,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, size_t const mostFrequent = HIST_countFast_wksp( countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ - ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, @@ -2559,7 +2853,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), - CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype, + CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype, countWorkspace, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->offcodeCTable, @@ -2589,7 +2883,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), - CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype, + CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype, countWorkspace, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->matchlengthCTable, @@ -2615,22 +2909,23 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, */ #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t -ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - void* entropyWorkspace, size_t entropyWkspSize, - const int bmi2) +ZSTD_entropyCompressSeqStore_internal( + void* dst, size_t dstCapacity, + const void* literals, size_t litSize, + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) { - const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; - const seqDef* const sequences = seqStorePtr->sequencesStart; - const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + const SeqDef* const sequences = seqStorePtr->sequencesStart; + const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; @@ -2638,6 +2933,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t lastCountSize; + int longOffsets = 0; entropyWorkspace = count + (MaxSeq + 1); entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); @@ -2647,20 +2943,18 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, assert(entropyWkspSize >= HUF_WORKSPACE_SIZE); /* Compress literals */ - { const BYTE* const literals = seqStorePtr->litStart; - size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; - size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; + { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ - unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); - size_t const litSize = (size_t)(seqStorePtr->lit - literals); + int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); + size_t const cSize = ZSTD_compressLiterals( - &prevEntropy->huf, &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, - bmi2, suspectUncompressible); + &prevEntropy->huf, &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, bmi2); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; @@ -2698,6 +2992,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); lastCountSize = stats.lastCountSize; op += stats.size; + longOffsets = stats.longOffsets; } { size_t const bitstreamSize = ZSTD_encodeSequences( @@ -2731,106 +3026,146 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, return (size_t)(op - ostart); } -MEM_STATIC size_t -ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - size_t srcSize, - void* entropyWorkspace, size_t entropyWkspSize, - int bmi2) +static size_t +ZSTD_entropyCompressSeqStore_wExtLitBuffer( + void* dst, size_t dstCapacity, + const void* literals, size_t litSize, + size_t blockSize, + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) { size_t const cSize = ZSTD_entropyCompressSeqStore_internal( - seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, + literals, litSize, + seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) { + if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) { DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); return 0; /* block not compressed */ } FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ - { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); + { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. + * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. + */ + assert(cSize < ZSTD_BLOCKSIZE_MAX); return cSize; } +static size_t +ZSTD_entropyCompressSeqStore( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) +{ + return ZSTD_entropyCompressSeqStore_wExtLitBuffer( + dst, dstCapacity, + seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart), + srcSize, + seqStorePtr, + prevEntropy, nextEntropy, + cctxParams, + entropyWorkspace, entropyWkspSize, + bmi2); +} + /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) +ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { - static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { + static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, - ZSTD_compressBlock_doubleFast, - ZSTD_compressBlock_greedy, - ZSTD_compressBlock_lazy, - ZSTD_compressBlock_lazy2, - ZSTD_compressBlock_btlazy2, - ZSTD_compressBlock_btopt, - ZSTD_compressBlock_btultra, - ZSTD_compressBlock_btultra2 }, + ZSTD_COMPRESSBLOCK_DOUBLEFAST, + ZSTD_COMPRESSBLOCK_GREEDY, + ZSTD_COMPRESSBLOCK_LAZY, + ZSTD_COMPRESSBLOCK_LAZY2, + ZSTD_COMPRESSBLOCK_BTLAZY2, + ZSTD_COMPRESSBLOCK_BTOPT, + ZSTD_COMPRESSBLOCK_BTULTRA, + ZSTD_COMPRESSBLOCK_BTULTRA2 + }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_doubleFast_extDict, - ZSTD_compressBlock_greedy_extDict, - ZSTD_compressBlock_lazy_extDict, - ZSTD_compressBlock_lazy2_extDict, - ZSTD_compressBlock_btlazy2_extDict, - ZSTD_compressBlock_btopt_extDict, - ZSTD_compressBlock_btultra_extDict, - ZSTD_compressBlock_btultra_extDict }, + ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT, + ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT, + ZSTD_COMPRESSBLOCK_LAZY_EXTDICT, + ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT, + ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT, + ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT, + ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT, + ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT + }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_doubleFast_dictMatchState, - ZSTD_compressBlock_greedy_dictMatchState, - ZSTD_compressBlock_lazy_dictMatchState, - ZSTD_compressBlock_lazy2_dictMatchState, - ZSTD_compressBlock_btlazy2_dictMatchState, - ZSTD_compressBlock_btopt_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState }, + ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE, + ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE + }, { NULL /* default for 0 */, NULL, NULL, - ZSTD_compressBlock_greedy_dedicatedDictSearch, - ZSTD_compressBlock_lazy_dedicatedDictSearch, - ZSTD_compressBlock_lazy2_dedicatedDictSearch, + ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH, + ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH, + ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH, NULL, NULL, NULL, NULL } }; - ZSTD_blockCompressor selectedCompressor; + ZSTD_BlockCompressor_f selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); - DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); + DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { - static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { - { ZSTD_compressBlock_greedy_row, - ZSTD_compressBlock_lazy_row, - ZSTD_compressBlock_lazy2_row }, - { ZSTD_compressBlock_greedy_extDict_row, - ZSTD_compressBlock_lazy_extDict_row, - ZSTD_compressBlock_lazy2_extDict_row }, - { ZSTD_compressBlock_greedy_dictMatchState_row, - ZSTD_compressBlock_lazy_dictMatchState_row, - ZSTD_compressBlock_lazy2_dictMatchState_row }, - { ZSTD_compressBlock_greedy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } + static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = { + { + ZSTD_COMPRESSBLOCK_GREEDY_ROW, + ZSTD_COMPRESSBLOCK_LAZY_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW, + ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW, + ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW + }, + { + ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW, + ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW, + ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW + } }; - DEBUGLOG(4, "Selecting a row-based matchfinder"); + DEBUGLOG(5, "Selecting a row-based matchfinder"); assert(useRowMatchFinder != ZSTD_ps_auto); selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; } else { @@ -2840,25 +3175,119 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS return selectedCompressor; } -static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, +static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } -void ZSTD_resetSeqStore(seqStore_t* ssPtr) +void ZSTD_resetSeqStore(SeqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthType = ZSTD_llt_none; } -typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ +static size_t ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize +) { + RETURN_ERROR_IF( + nbExternalSeqs > outSeqsCapacity, + sequenceProducer_failed, + "External sequence producer returned error code %lu", + (unsigned long)nbExternalSeqs + ); + + RETURN_ERROR_IF( + nbExternalSeqs == 0 && srcSize > 0, + sequenceProducer_failed, + "Got zero sequences from external sequence producer for a non-empty src buffer!" + ); + + if (srcSize == 0) { + ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); + return 1; + } + + { + ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; + + /* We can return early if lastSeq is already a block delimiter. */ + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { + return nbExternalSeqs; + } + + /* This error condition is only possible if the external matchfinder + * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ + RETURN_ERROR_IF( + nbExternalSeqs == outSeqsCapacity, + sequenceProducer_failed, + "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" + ); + + /* lastSeq is not a block delimiter, so we need to append one. */ + ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); + return nbExternalSeqs + 1; + } +} + +/* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ +static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { + size_t matchLenSum, litLenSum, i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } + return litLenSum + matchLenSum; +} + +/** + * Function to validate sequences produced by a block compressor. + */ +static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams) +{ +#if DEBUGLEVEL >= 1 + const SeqDef* seq = seqStore->sequencesStart; + const SeqDef* const seqEnd = seqStore->sequences; + size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4; + for (; seq < seqEnd; ++seq) { + const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq); + assert(seqLength.matchLength >= matchLenLowerBound); + (void)seqLength; + (void)matchLenLowerBound; + } +#else + (void)seqStore; + (void)cParams; +#endif +} + +static size_t +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch); + +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { - ZSTD_matchState_t* const ms = &zc->blockState.matchState; + ZSTD_MatchState_t* const ms = &zc->blockState.matchState; DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ @@ -2901,6 +3330,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(&zc->appliedParams), + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2910,7 +3348,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { - rawSeqStore_t ldmSeqStore = kNullRawSeqStore; + RawSeqStore_t ldmSeqStore = kNullRawSeqStore; + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(&zc->appliedParams), + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; @@ -2926,42 +3372,116 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); - } else { /* not long range mode */ - ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, - zc->appliedParams.useRowMatchFinder, - dictMode); + } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) { + assert( + zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize) + ); + assert(zc->appliedParams.extSeqProdFunc != NULL); + + { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; + + size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)( + zc->appliedParams.extSeqProdState, + zc->extSeqBuf, + zc->extSeqBufCapacity, + src, srcSize, + NULL, 0, /* dict and dictSize, currently not supported */ + zc->appliedParams.compressionLevel, + windowSize + ); + + size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->extSeqBuf, + nbExternalSeqs, + zc->extSeqBufCapacity, + srcSize + ); + + /* Return early if there is no error, since we don't need to worry about last literals */ + if (!ZSTD_isError(nbPostProcessedSeqs)) { + ZSTD_SequencePosition seqPos = {0,0,0}; + size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); + RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); + FORWARD_IF_ERROR( + ZSTD_transferSequences_wBlockDelim( + zc, &seqPos, + zc->extSeqBuf, nbPostProcessedSeqs, + src, srcSize, + zc->appliedParams.searchForExternalRepcodes + ), + "Failed to copy external sequences to seqStore!" + ); + ms->ldmSeqStore = NULL; + DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); + return ZSTDbss_compress; + } + + /* Propagate the error if fallback is disabled */ + if (!zc->appliedParams.enableMatchFinderFallback) { + return nbPostProcessedSeqs; + } + + /* Fallback to software matchfinder */ + { ZSTD_BlockCompressor_f const blockCompressor = + ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); + ms->ldmSeqStore = NULL; + DEBUGLOG( + 5, + "External sequence producer returned error code %lu. Falling back to internal parser.", + (unsigned long)nbExternalSeqs + ); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } } + } else { /* not long range mode and no external matchfinder */ + ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); ms->ldmSeqStore = NULL; lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } + ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); return ZSTDbss_compress; } -static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) +static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) { - const seqStore_t* seqStore = ZSTD_getSeqStore(zc); - const seqDef* seqStoreSeqs = seqStore->sequencesStart; - size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs; - size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart); - size_t literalsRead = 0; - size_t lastLLSize; + const SeqDef* inSeqs = seqStore->sequencesStart; + const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs); + const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); - ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; + ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; + const size_t nbOutSequences = nbInSequences + 1; + size_t nbOutLiterals = 0; + Repcodes_t repcodes; size_t i; - repcodes_t updatedRepcodes; - - assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); - /* Ensure we have enough space for last literals "sequence" */ - assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); - ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - for (i = 0; i < seqStoreSeqSize; ++i) { - U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM; - outSeqs[i].litLength = seqStoreSeqs[i].litLength; - outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH; + + /* Bounds check that we have enough space for every input sequence + * and the block delimiter + */ + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + RETURN_ERROR_IF( + nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex), + dstSize_tooSmall, + "Not enough space to copy sequences"); + + ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes)); + for (i = 0; i < nbInSequences; ++i) { + U32 rawOffset; + outSeqs[i].litLength = inSeqs[i].litLength; + outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH; outSeqs[i].rep = 0; + /* Handle the possible single length >= 64K + * There can only be one because we add MINMATCH to every match length, + * and blocks are at most 128K. + */ if (i == seqStore->longLengthPos) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { outSeqs[i].litLength += 0x10000; @@ -2970,50 +3490,75 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) } } - if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) { - /* Derive the correct offset corresponding to a repcode */ - outSeqs[i].rep = seqStoreSeqs[i].offBase; + /* Determine the raw offset given the offBase, which may be a repcode. */ + if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) { + const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase); + assert(repcode > 0); + outSeqs[i].rep = repcode; if (outSeqs[i].litLength != 0) { - rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1]; + rawOffset = repcodes.rep[repcode - 1]; } else { - if (outSeqs[i].rep == 3) { - rawOffset = updatedRepcodes.rep[0] - 1; + if (repcode == 3) { + assert(repcodes.rep[0] > 1); + rawOffset = repcodes.rep[0] - 1; } else { - rawOffset = updatedRepcodes.rep[outSeqs[i].rep]; + rawOffset = repcodes.rep[repcode]; } } + } else { + rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase); } outSeqs[i].offset = rawOffset; - /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode - so we provide seqStoreSeqs[i].offset - 1 */ - ZSTD_updateRep(updatedRepcodes.rep, - seqStoreSeqs[i].offBase, - seqStoreSeqs[i].litLength == 0); - literalsRead += outSeqs[i].litLength; + + /* Update repcode history for the sequence */ + ZSTD_updateRep(repcodes.rep, + inSeqs[i].offBase, + inSeqs[i].litLength == 0); + + nbOutLiterals += outSeqs[i].litLength; } /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker * for the block boundary, according to the API. */ - assert(seqStoreLiteralsSize >= literalsRead); - lastLLSize = seqStoreLiteralsSize - literalsRead; - outSeqs[i].litLength = (U32)lastLLSize; - outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0; - seqStoreSeqSize++; - zc->seqCollector.seqIndex += seqStoreSeqSize; + assert(nbInLiterals >= nbOutLiterals); + { + const size_t lastLLSize = nbInLiterals - nbOutLiterals; + outSeqs[nbInSequences].litLength = (U32)lastLLSize; + outSeqs[nbInSequences].matchLength = 0; + outSeqs[nbInSequences].offset = 0; + assert(nbOutSequences == nbInSequences + 1); + } + seqCollector->seqIndex += nbOutSequences; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + + return 0; } size_t ZSTD_sequenceBound(size_t srcSize) { - return (srcSize / ZSTD_MINMATCH_MIN) + 1; + const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1; + const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1; + return maxNbSeq + maxNbDelims; } size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { const size_t dstCapacity = ZSTD_compressBound(srcSize); - void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); + void* dst; /* Make C90 happy. */ SeqCollector seqCollector; + { + int targetCBlockSize; + FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), ""); + RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0"); + } + { + int nbWorkers; + FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), ""); + RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); + } + dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); seqCollector.collectSequences = 1; @@ -3022,8 +3567,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, seqCollector.maxSequences = outSeqsSize; zc->seqCollector = seqCollector; - ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); - ZSTD_customFree(dst, ZSTD_defaultCMem); + { + const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); + ZSTD_customFree(dst, ZSTD_defaultCMem); + FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed"); + } + assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); return zc->seqCollector.seqIndex; } @@ -3052,19 +3601,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; - size_t u; if (length == 1) return 1; /* Check if prefix is RLE first before using unrolled loop */ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { return 0; } for (i = prefixLength; i != length; i += unrollSize) { + size_t u; for (u = 0; u < unrollSize; u += sizeof(size_t)) { if (MEM_readST(ip + i + u) != valueST) { return 0; - } - } - } + } } } return 1; } @@ -3072,7 +3619,7 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { * This is just a heuristic based on the compressibility. * It may return both false positives and false negatives. */ -static int ZSTD_maybeRLE(seqStore_t const* seqStore) +static int ZSTD_maybeRLE(SeqStore_t const* seqStore) { size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); @@ -3080,7 +3627,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore) return nbSeqs < 4 && nbLits < 10; } -static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) +static void +ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) { ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; bs->prevCBlock = bs->nextCBlock; @@ -3088,12 +3636,14 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c } /* Writes the block header */ -static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) { +static void +writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) +{ U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader); - DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); + DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); } /** ZSTD_buildBlockEntropyStats_literals() : @@ -3101,13 +3651,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * Requires ENTROPY_WORKSPACE_SIZE workspace - * @return : size of huffman description table or error code */ -static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, - const ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - const int literalsCompressionIsDisabled, - void* workspace, size_t wkspSize) + * @return : size of huffman description table, or an error code + */ +static size_t +ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + const int literalsCompressionIsDisabled, + void* workspace, size_t wkspSize, + int hufFlags) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; @@ -3132,74 +3685,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi /* small ? don't even attempt compression (speed opt) */ #ifndef COMPRESS_LITERALS_SIZE_MIN -#define COMPRESS_LITERALS_SIZE_MIN 63 +# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ #endif { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Scan input and build symbol stats */ - { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); + { size_t const largest = + HIST_count_wksp (countWksp, &maxSymbolValue, + (const BYTE*)src, srcSize, + workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { + /* only one literal symbol */ DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { + /* heuristic: likely not compressible */ DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Validate the previous Huffman table */ - if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { + if (repeat == HUF_repeat_check + && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); assert(huffLog <= LitHufLog); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; - { /* Build and write the CTable */ - size_t const newCSize = HUF_estimateCompressedSize( - (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); - size_t const hSize = HUF_writeCTable_wksp( - hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), - (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, - nodeWksp, nodeWkspSize); - /* Check against repeating the previous CTable */ - if (repeat != HUF_repeat_none) { - size_t const oldCSize = HUF_estimateCompressedSize( - (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); - if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { - DEBUGLOG(5, "set_repeat - smaller"); - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_repeat; - return 0; - } - } - if (newCSize + hSize >= srcSize) { - DEBUGLOG(5, "set_basic - no gains"); + } + { /* Build and write the CTable */ + size_t const newCSize = HUF_estimateCompressedSize( + (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); + size_t const hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), + (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + /* Check against repeating the previous CTable */ + if (repeat != HUF_repeat_none) { + size_t const oldCSize = HUF_estimateCompressedSize( + (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { + DEBUGLOG(5, "set_repeat - smaller"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_basic; + hufMetadata->hType = set_repeat; return 0; - } - DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); - hufMetadata->hType = set_compressed; - nextHuf->repeatMode = HUF_repeat_check; - return hSize; + } } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; } + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; + return hSize; } } @@ -3209,8 +3765,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi * and updates nextEntropy to the appropriate repeatMode. */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { - ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0}; +ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) +{ + ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; nextEntropy->litlength_repeatMode = FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat_none; nextEntropy->matchlength_repeatMode = FSE_repeat_none; @@ -3221,16 +3778,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { * Builds entropy for the sequences. * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. - * @return : size of fse tables or error code */ -static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, - const ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize) + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( + const SeqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize) { ZSTD_strategy const strategy = cctxParams->cParams.strategy; - size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; @@ -3246,9 +3805,9 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, entropyWorkspace, entropyWorkspaceSize) : ZSTD_buildDummySequencesStatistics(nextEntropy); FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); - fseMetadata->llType = (symbolEncodingType_e) stats.LLtype; - fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype; - fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype; + fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype; + fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype; + fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype; fseMetadata->lastCountSize = stats.lastCountSize; return stats.size; } @@ -3257,23 +3816,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * Requires workspace size ENTROPY_WORKSPACE_SIZE - * - * @return : 0 on success or error code + * @return : 0 on success, or an error code + * Note : also employed in superblock */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize) -{ - size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; +size_t ZSTD_buildBlockEntropyStats( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize) +{ + size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); + int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); + int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), - workspace, wkspSize); + workspace, wkspSize, hufFlags); + FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, @@ -3286,11 +3850,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, } /* Returns the size estimate for the literals section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, - const ZSTD_hufCTables_t* huf, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; @@ -3312,12 +3877,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ -static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, - const BYTE* codeTable, size_t nbSeq, unsigned maxCode, - const FSE_CTable* fseCTable, - const U8* additionalBits, - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) +static size_t +ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, + const BYTE* codeTable, size_t nbSeq, unsigned maxCode, + const FSE_CTable* fseCTable, + const U8* additionalBits, + short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; @@ -3349,107 +3915,115 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, } /* Returns the size estimate for the sequences section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, - fseTables->offcodeCTable, NULL, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - workspace, wkspSize); + fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, - fseTables->litlengthCTable, LL_bits, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - workspace, wkspSize); + fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, - fseTables->matchlengthCTable, ML_bits, - ML_defaultNorm, ML_defaultNormLog, MaxML, - workspace, wkspSize); + fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, MaxML, + workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } /* Returns the size estimate for a given stream of literals, of, ll, ml */ -static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, - const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) { +static size_t +ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) +{ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, - nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, - workspace, wkspSize, writeSeqEntropy); + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); return seqSize + literalsSize + ZSTD_blockHeaderSize; } /* Builds entropy statistics and uses them for blocksize estimation. * - * Returns the estimated compressed size of the seqStore, or a zstd error. + * @return: estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; +static size_t +ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc) +{ + ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); - return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), + zc->tmpWorkspace, zc->tmpWkspSize), ""); + return ZSTD_estimateBlockSize( + seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->tmpWorkspace, zc->tmpWkspSize, (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore) +{ size_t literalsBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; + SeqDef const seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { literalsBytes += 0x10000; - } - } + } } return literalsBytes; } /* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore) +{ size_t matchBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; + SeqDef seq = seqStore->sequencesStart[i]; matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; - } - } + } } return matchBytes; } /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). * Stores the result in resultSeqStore. */ -static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, - const seqStore_t* originalSeqStore, +static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, + const SeqStore_t* originalSeqStore, size_t startIdx, size_t endIdx) { *resultSeqStore = *originalSeqStore; @@ -3490,6 +4064,7 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, c U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ assert(OFFBASE_IS_REPCODE(offBase)); if (adjustedRepCode == ZSTD_REP_NUM) { + assert(ll0); /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 * This is only valid if it results in a valid offset value, aka > 0. * Note : it may happen that `rep[0]==1` in exceptional circumstances. @@ -3515,14 +4090,17 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, c * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ -static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, - seqStore_t* const seqStore, U32 const nbSeq) { +static void +ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes, + const SeqStore_t* const seqStore, U32 const nbSeq) +{ U32 idx = 0; + U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; for (; idx < nbSeq; ++idx) { - seqDef* const seq = seqStore->sequencesStart + idx; - U32 const ll0 = (seq->litLength == 0); + SeqDef* const seq = seqStore->sequencesStart + idx; + U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); U32 const offBase = seq->offBase; - assert(seq->offBase > 0); + assert(offBase > 0); if (OFFBASE_IS_REPCODE(offBase)) { U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); @@ -3531,7 +4109,7 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * repcode history. */ if (dRawOffset != cRawOffset) { - seq->offBase = cRawOffset + ZSTD_REP_NUM; + seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); } } /* Compression repcode history is always updated with values directly from the unmodified seqStore. @@ -3548,10 +4126,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * Returns the total size of that block (including header) or a ZSTD error code. */ static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, - repcodes_t* const dRep, repcodes_t* const cRep, +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, + const SeqStore_t* const seqStore, + Repcodes_t* const dRep, Repcodes_t* const cRep, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, + const void* src, size_t srcSize, U32 lastBlock, U32 isPartition) { const U32 rleMaxLength = 25; @@ -3561,7 +4140,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, size_t cSeqsSize; /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ - repcodes_t const dRepOriginal = *dRep; + Repcodes_t const dRepOriginal = *dRep; DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); if (isPartition) ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); @@ -3572,7 +4151,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, &zc->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, srcSize, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, zc->bmi2); FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); @@ -3586,8 +4165,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, cSeqsSize = 1; } + /* Sequence collection not supported when block splitting */ if (zc->seqCollector.collectSequences) { - ZSTD_copyBlockSequences(zc); + FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed"); ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } @@ -3595,18 +4175,18 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, if (cSeqsSize == 0) { cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "Nocompress block failed"); - DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize); + DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize); *dRep = dRepOriginal; /* reset simulated decompression repcode history */ } else if (cSeqsSize == 1) { cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "RLE compress block failed"); - DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize); + DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize); *dRep = dRepOriginal; /* reset simulated decompression repcode history */ } else { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); cSize = ZSTD_blockHeaderSize + cSeqsSize; - DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); + DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize); } if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) @@ -3625,10 +4205,11 @@ typedef struct { /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then - * we do not recurse. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * @@ -3638,21 +4219,22 @@ typedef struct { */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, - ZSTD_CCtx* zc, const seqStore_t* origSeqStore) + ZSTD_CCtx* zc, const SeqStore_t* origSeqStore) { - seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; + DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + assert(endIdx >= startIdx); if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { - DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); return; } - DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); @@ -3673,15 +4255,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end } } -/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio. +/* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. * - * Returns the number of splits made (which equals the size of the partition table - 1). + * @return: number of splits made (which equals the size of the partition table - 1). */ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) { - seqStoreSplits splits = {partitions, 0}; + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; if (nbSeq <= 4) { - DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split"); + DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); /* Refuse to try and split anything with less than 4 sequences */ return 0; } @@ -3697,18 +4282,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ static size_t -ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, - const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t blockSize, + U32 lastBlock, U32 nbSeq) { size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; size_t i = 0; size_t srcBytesTotal = 0; - U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ - seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; - seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; - size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; + size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3724,25 +4311,27 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac * * See ZSTD_seqStore_resolveOffCodes() for more details. */ - repcodes_t dRep; - repcodes_t cRep; - ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); + Repcodes_t dRep; + Repcodes_t cRep; + ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t)); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); if (numSplits == 0) { - size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, - &dRep, &cRep, - op, dstCapacity, - ip, blockSize, - lastBlock, 0 /* isPartition */); + size_t cSizeSingleBlock = + ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, + &dRep, &cRep, + op, dstCapacity, + ip, blockSize, + lastBlock, 0 /* isPartition */); FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); - assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX); + assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); return cSizeSingleBlock; } @@ -3767,7 +4356,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); + DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", + ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; @@ -3775,12 +4365,12 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac dstCapacity -= cSizeChunk; cSize += cSizeChunk; *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); } - /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes - * for the next block. + /* cRep and dRep may have diverged during the compression. + * If so, we use the dRep repcodes for the next block. */ - ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); + ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t)); return cSize; } @@ -3789,21 +4379,20 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); - assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock"); + assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block"); return cSize; } nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); @@ -3833,11 +4422,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); - if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } + if (bss == ZSTDbss_noCompress) { + RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); + cSize = 0; + goto out; + } } if (zc->seqCollector.collectSequences) { - ZSTD_copyBlockSequences(zc); + FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed"); ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } @@ -3848,7 +4441,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, &zc->appliedParams, dst, dstCapacity, srcSize, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, zc->bmi2); if (frame && @@ -3913,10 +4506,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ - { - size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + { size_t const cSize = + ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { - size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + size_t const maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); @@ -3924,7 +4518,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, } } } - } + } /* if (bss == ZSTDbss_compress)*/ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. @@ -3953,7 +4547,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, return cSize; } -static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, +static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, void const* ip, @@ -3977,41 +4571,82 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, } } +#include "zstd_preSplit.h" + +static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings) +{ + /* split level based on compression strategy, from `fast` to `btultra2` */ + static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; + /* note: conservatively only split full blocks (128 KB) currently. + * While it's possible to go lower, let's keep it simple for a first implementation. + * Besides, benefits of splitting are reduced when blocks are already small. + */ + if (srcSize < 128 KB || blockSizeMax < 128 KB) + return MIN(srcSize, blockSizeMax); + /* do not split incompressible data though: + * require verified savings to allow pre-splitting. + * Note: as a consequence, the first full block is not split. + */ + if (savings < 3) { + DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings); + return 128 KB; + } + /* apply @splitLevel, or use default value (which depends on @strat). + * note that splitting heuristic is still conditioned by @savings >= 3, + * so the first block will not reach this code path */ + if (splitLevel == 1) return 128 KB; + if (splitLevel == 0) { + assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2); + splitLevel = splitLevels[strat]; + } else { + assert(2 <= splitLevel && splitLevel <= 6); + splitLevel -= 2; + } + return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); +} + /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) -* @return : compressed size, or an error code +* @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { - size_t blockSize = cctx->blockSize; + size_t blockSizeMax = cctx->blockSizeMax; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; + S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize; assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); - DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); + DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax); if (cctx->appliedParams.fParams.checksumFlag && srcSize) XXH64_update(&cctx->xxhState, src, srcSize); while (remaining) { - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; + size_t const blockSize = ZSTD_optimalBlockSize(cctx, + ip, remaining, + blockSizeMax, + cctx->appliedParams.preBlockSplitter_level, + cctx->appliedParams.cParams.strategy, + savings); + U32 const lastBlock = lastFrameChunk & (blockSize == remaining); + assert(blockSize <= remaining); /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding * additional 1. We need to revisit and change this logic to be more consistent */ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, dstSize_tooSmall, "not enough space to store compressed block"); - if (remaining < blockSize) blockSize = remaining; ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); @@ -4047,8 +4682,23 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } - } - + } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ + + /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data. + * Without splitting, the maximum expansion is 3 bytes per full block. + * An adversarial input could attempt to fudge the split detector, + * and make it split incompressible data, resulting in more block headers. + * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block, + * and the splitter never creates blocks that small (current lower limit is 8 KB), + * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit. + * But if the goal is to not expand by more than 3-bytes per 128 KB full block, + * then yes, it becomes possible to make the block splitter oversplit incompressible data. + * Using @savings, we enforce an even more conservative condition, + * requiring the presence of enough savings (at least 3 bytes) to authorize splitting, + * otherwise only full blocks are used. + * But being conservative is fine, + * since splitting barely compressible blocks is not fruitful anyway */ + savings += (S64)blockSize - (S64)cSize; ip += blockSize; assert(remaining >= blockSize); @@ -4067,8 +4717,10 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, - const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) -{ BYTE* const op = (BYTE*)dst; + const ZSTD_CCtx_params* params, + U64 pledgedSrcSize, U32 dictID) +{ + BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params->fParams.checksumFlag>0; @@ -4149,19 +4801,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) } } -size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) +void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) { - RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong, - "wrong cctx stage"); - RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable, - parameter_unsupported, - "incompatible with ldm"); + assert(cctx->stage == ZSTDcs_init); + assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; cctx->externSeqStore.pos = 0; cctx->externSeqStore.posInSequence = 0; - return 0; } @@ -4170,7 +4818,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; + ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", @@ -4205,7 +4853,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, src, (BYTE const*)src + srcSize); } - DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); + DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax); { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); @@ -4226,41 +4874,62 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, } } -size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } +/* NOTE: Must just wrap ZSTD_compressContinue_public() */ +size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); +} -size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); - return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); + return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); } -size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ + return ZSTD_getBlockSize_deprecated(cctx); +} + +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); - { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); + { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); +} + /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ -static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, - ldmState_t* ls, - ZSTD_cwksp* ws, - ZSTD_CCtx_params const* params, - const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp) +static size_t +ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + const void* src, size_t srcSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; @@ -4294,43 +4963,62 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ip = iend - maxDictSize; src = ip; srcSize = maxDictSize; - } } + } + } if (srcSize > ZSTD_CHUNKSIZE_MAX) { /* We must have cleared our windows when our source is this large. */ assert(ZSTD_window_isEmpty(ms->window)); if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); } - - DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); - ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); - ms->forceNonContiguous = params->deterministicRefPrefix; - if (loadLdmDict) { + DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder); + + if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ + DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict"); ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); + ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); + DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes"); + } + + /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ + { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31); + if (srcSize > maxDictSize) { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } } + ms->nextToUpdate = (U32)(ip - ms->window.base); + ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); + ms->forceNonContiguous = params->deterministicRefPrefix; + if (srcSize <= HASH_READ_SIZE) return 0; ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); - if (loadLdmDict) - ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); - switch(params->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, dtlm, tfp); break; case ZSTD_dfast: +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) assert(srcSize >= HASH_READ_SIZE); if (ms->dedicatedDictSearch) { assert(ms->chainTable != NULL); @@ -4338,7 +5026,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, } else { assert(params->useRowMatchFinder != ZSTD_ps_auto); if (params->useRowMatchFinder == ZSTD_ps_enable) { - size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); + size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog); ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); DEBUGLOG(4, "Using row-based hash table for lazy dict"); @@ -4347,14 +5035,24 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, DEBUGLOG(4, "Using chain-based hash table for lazy dict"); } } +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif break; case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) assert(srcSize >= HASH_READ_SIZE); + DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize); ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif break; default: @@ -4397,20 +5095,19 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, { unsigned maxSymbolValue = 255; unsigned hasZeroWeights = 1; size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, - dictEnd-dictPtr, &hasZeroWeights); + (size_t)(dictEnd-dictPtr), &hasZeroWeights); /* We only set the loaded table as valid if it contains all non-zero * weights. Otherwise, we set it to check */ - if (!hasZeroWeights) + if (!hasZeroWeights && maxSymbolValue == 255) bs->entropy.huf.repeatMode = HUF_repeat_valid; RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, ""); dictPtr += hufHeaderSize; } { unsigned offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); /* fill all offset symbols to avoid garbage at end of table */ @@ -4425,7 +5122,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( @@ -4439,7 +5136,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( @@ -4473,7 +5170,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); } } } - return dictPtr - (const BYTE*)dict; + return (size_t)(dictPtr - (const BYTE*)dict); } /* Dictionary format : @@ -4486,7 +5183,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, * dictSize supposed >= 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, @@ -4519,7 +5216,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, * @return : dictID, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, const ZSTD_CCtx_params* params, @@ -4596,11 +5293,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, - ZSTD_tfp_forCCtx, cctx->entropyWorkspace) + ZSTD_tfp_forCCtx, cctx->tmpWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, - dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace); + dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; @@ -4641,8 +5338,8 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, &cctxParams, pledgedSrcSize); } -size_t -ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +static size_t +ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); @@ -4653,9 +5350,15 @@ ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } +size_t +ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); +} + size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { - return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); + return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel); } @@ -4666,14 +5369,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; - size_t fhSize = 0; DEBUGLOG(4, "ZSTD_writeEpilogue"); RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); + size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); dstCapacity -= fhSize; op += fhSize; @@ -4683,8 +5385,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); - MEM_writeLE32(op, cBlockHeader24); + ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3); + RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue"); + MEM_writeLE24(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } @@ -4698,7 +5401,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ - return op-ostart; + return (size_t)(op-ostart); } void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) @@ -4725,9 +5428,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) #endif } -size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, @@ -4751,6 +5454,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, return cSize + endResult; } +/* NOTE: Must just wrap ZSTD_compressEnd_public() */ +size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} + size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, @@ -4779,7 +5490,7 @@ size_t ZSTD_compress_advanced_internal( FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, @@ -4914,14 +5625,16 @@ static size_t ZSTD_initCDict_internal( return 0; } -static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_compressionParameters cParams, - ZSTD_paramSwitch_e useRowMatchFinder, - U32 enableDedicatedDictSearch, - ZSTD_customMem customMem) +static ZSTD_CDict* +ZSTD_createCDict_advanced_internal(size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_compressionParameters cParams, + ZSTD_ParamSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; + DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize); { size_t const workspaceSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + @@ -4958,6 +5671,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, { ZSTD_CCtx_params cctxParams; ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); + DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); ZSTD_CCtxParams_init(&cctxParams, 0); cctxParams.cParams = cParams; cctxParams.customMem = customMem; @@ -4978,7 +5692,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( ZSTD_compressionParameters cParams; ZSTD_CDict* cdict; - DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); + DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if (cctxParams.enableDedicatedDictSearch) { @@ -4997,7 +5711,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); } - DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch); + DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch); cctxParams.cParams = cParams; cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); @@ -5006,7 +5720,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, customMem); - if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cctxParams) )) { @@ -5060,7 +5774,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict) * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level - * into its relevants cParams. + * into its relevant cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. @@ -5072,7 +5786,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); + ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) @@ -5083,6 +5797,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( ZSTD_CDict* cdict; ZSTD_CCtx_params params; + DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize); if ((size_t)workspace & 7) return NULL; /* 8-aligned */ { @@ -5093,14 +5808,13 @@ const ZSTD_CDict* ZSTD_initStaticCDict( ZSTD_cwksp_move(&cdict->workspace, &ws); } - DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", - (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; ZSTD_CCtxParams_init(¶ms, 0); params.cParams = cParams; params.useRowMatchFinder = useRowMatchFinder; cdict->useRowMatchFinder = useRowMatchFinder; + cdict->compressionLevel = ZSTD_NO_CLEVEL; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, @@ -5180,12 +5894,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced( /* ZSTD_compressBegin_usingCDict() : * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); +} + /*! ZSTD_compress_usingCDict_internal(): * Implementation of various ZSTD_compress_usingCDict* functions. */ @@ -5195,7 +5914,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict_advanced(): @@ -5261,7 +5980,7 @@ size_t ZSTD_CStreamOutSize(void) return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } -static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) +static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) { if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) return ZSTD_cpm_attachDict; @@ -5393,11 +6112,11 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { - return cctx->blockSize - cctx->stableIn_notConsumed; + return cctx->blockSizeMax - cctx->stableIn_notConsumed; } assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize==0) hintInSize = cctx->blockSize; + if (hintInSize==0) hintInSize = cctx->blockSizeMax; return hintInSize; } } @@ -5424,7 +6143,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { assert(input->pos >= zcs->stableIn_notConsumed); input->pos -= zcs->stableIn_notConsumed; - ip -= zcs->stableIn_notConsumed; + if (ip) ip -= zcs->stableIn_notConsumed; zcs->stableIn_notConsumed = 0; } if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { @@ -5449,12 +6168,13 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, case zcss_load: if ( (flushMode == ZSTD_e_end) - && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */ + && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ - size_t const cSize = ZSTD_compressEnd(zcs, - op, oend-op, ip, iend-ip); + size_t const cSize = ZSTD_compressEnd_public(zcs, + op, (size_t)(oend-op), + ip, (size_t)(iend-ip)); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); ip = iend; @@ -5468,7 +6188,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, - ip, iend-ip); + ip, (size_t)(iend-ip)); zcs->inBuffPos += loaded; if (ip) ip += loaded; if ( (flushMode == ZSTD_e_continue) @@ -5484,7 +6204,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, } else { assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); if ( (flushMode == ZSTD_e_continue) - && ( (size_t)(iend - ip) < zcs->blockSize) ) { + && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) { /* can't compress a full block : stop here */ zcs->stableIn_notConsumed = (size_t)(iend - ip); ip = iend; /* pretend to have consumed input */ @@ -5501,9 +6221,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); void* cDst; size_t cSize; - size_t oSize = oend-op; + size_t oSize = (size_t)(oend-op); size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress - : MIN((size_t)(iend - ip), zcs->blockSize); + : MIN((size_t)(iend - ip), zcs->blockSizeMax); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else @@ -5511,16 +6231,16 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, if (inputBuffered) { unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); cSize = lastBlock ? - ZSTD_compressEnd(zcs, cDst, oSize, + ZSTD_compressEnd_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : - ZSTD_compressContinue(zcs, cDst, oSize, + ZSTD_compressContinue_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; /* prepare next block */ - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; if (zcs->inBuffTarget > zcs->inBuffSize) - zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); if (!lastBlock) @@ -5529,8 +6249,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, } else { /* !inputBuffered, hence ZSTD_bm_stable */ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); cSize = lastBlock ? - ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : - ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); + ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : + ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); /* Consume the input prior to error checking to mirror buffered mode. */ if (ip) ip += iSize; FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); @@ -5584,8 +6304,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, } } - input->pos = ip - istart; - output->pos = op - ostart; + input->pos = (size_t)(ip - istart); + output->pos = (size_t)(op - ostart); if (zcs->frameEnded) return 0; return ZSTD_nextInputSizeHint(zcs); } @@ -5645,6 +6365,11 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, return 0; } +/* + * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. + * Otherwise, it's ignored. + * @return: 0 on success, or a ZSTD_error code otherwise. + */ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, size_t inSize) @@ -5661,30 +6386,40 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, */ params.compressionLevel = cctx->cdict->compressionLevel; } - DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); + DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage"); if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); - ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); + ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); params.cParams = ZSTD_getCParamsFromCCtxParams( ¶ms, cctx->pledgedSrcSizePlusOne-1, dictSize, mode); } - params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); + params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams); params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); + params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); + params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); + params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); #ifdef ZSTD_MULTITHREAD + /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */ + RETURN_ERROR_IF( + ZSTD_hasExtSeqProd(¶ms) && params.nbWorkers >= 1, + parameter_combination_unsupported, + "External sequence producer isn't supported with nbWorkers >= 1" + ); + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } if (params.nbWorkers > 0) { -#if ZSTD_TRACE +# if ZSTD_TRACE cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0; -#endif +# endif /* mt context creation */ if (cctx->mtctx == NULL) { DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", @@ -5720,7 +6455,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, /* for small input: avoid automatic flush on reaching end of block, since * it would require to add a 3-bytes null block to end frame */ - cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); + cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize); } else { cctx->inBuffTarget = 0; } @@ -5838,13 +6573,20 @@ size_t ZSTD_compressStream2_simpleArgs ( const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } size_t ZSTD_compress2(ZSTD_CCtx* cctx, @@ -5867,6 +6609,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; + FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); @@ -5877,29 +6620,25 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, } } -typedef struct { - U32 idx; /* Index in array of ZSTD_Sequence */ - U32 posInSequence; /* Position within sequence at idx */ - size_t posInSrc; /* Number of bytes given by sequences provided so far */ -} ZSTD_sequencePosition; - /* ZSTD_validateSequence() : - * @offCode : is presumed to follow format required by ZSTD_storeSeq() + * @offBase : must use the format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, - size_t posInSrc, U32 windowLog, size_t dictSize) +ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch, + size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) { - U32 const windowSize = 1 << windowLog; + U32 const windowSize = 1u << windowLog; /* posInSrc represents the amount of data the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; - RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), corruption_detected, "Offset too large!"); - RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); + size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; + RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); + /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ + RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); return 0; } @@ -5920,22 +6659,27 @@ static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 return offBase; } -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. +/* This function scans through an array of ZSTD_Sequence, + * storing the sequences it reads, until it reaches a block delimiter. + * Note that the block delimiter includes the last literals of the block. + * @blockSize must be == sum(sequence_lengths). + * @returns @blockSize on success, and a ZSTD_error otherwise. */ static size_t -ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, - ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) +ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch) { U32 idx = seqPos->idx; + U32 const startIdx = idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; - repcodes_t updatedRepcodes; + Repcodes_t updatedRepcodes; U32 dictSize; - DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize); + DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize); if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; @@ -5944,27 +6688,60 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, } else { dictSize = 0; } - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { U32 const litLength = inSeqs[idx].litLength; - U32 const ll0 = (litLength == 0); U32 const matchLength = inSeqs[idx].matchLength; - U32 const offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + U32 offBase; + + if (externalRepSearch == ZSTD_ps_disable) { + offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); + } else { + U32 const ll0 = (litLength == 0); + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams)), "Sequence validation failed"); } - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; } - ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); + RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found."); + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + assert(externalRepSearch != ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { + U32* const rep = updatedRepcodes.rep; + U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ + + if (lastSeqIdx >= startIdx + 2) { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (lastSeqIdx == startIdx + 1) { + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; + } + } + + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); if (inSeqs[idx].litLength) { DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); @@ -5972,38 +6749,43 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } - RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); + RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; - return 0; + return blockSize; } -/* Returns the number of bytes to move the current read position back by. - * Only non-zero if we ended up splitting a sequence. - * Otherwise, it may return a ZSTD error if something went wrong. - * - * This function will attempt to scan through blockSize bytes +/* + * This function attempts to scan through @blockSize bytes in @src * represented by the sequences in @inSeqs, * storing any (partial) sequences. * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. + * Occasionally, we may want to reduce the actual number of bytes consumed from @src + * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. + * + * @returns the number of bytes consumed from @src, necessarily <= @blockSize. + * Otherwise, it may return a ZSTD error if something went wrong. */ static size_t -ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) +ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; size_t dictSize; - BYTE const* ip = (BYTE const*)(src); - BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ - repcodes_t updatedRepcodes; + const BYTE* const istart = (const BYTE*)(src); + const BYTE* ip = istart; + const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ + Repcodes_t updatedRepcodes; U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; + /* TODO(embg) support fast parsing mode in noBlockDelim mode */ + (void)externalRepSearch; + if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -6011,9 +6793,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* } else { dictSize = 0; } - DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); + DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { const ZSTD_Sequence currSeq = inSeqs[idx]; U32 litLength = currSeq.litLength; @@ -6033,7 +6815,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; - idx++; } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ @@ -6079,49 +6860,56 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), "Sequence validation failed"); } DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; + if (!finalMatchSplit) + idx++; /* Next Sequence */ } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); seqPos->idx = idx; seqPos->posInSequence = endPosInSequence; - ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); iend -= bytesAdjustment; if (ip != iend) { /* Store any last literals */ - U32 lastLLSize = (U32)(iend - ip); + U32 const lastLLSize = (U32)(iend - ip); assert(ip <= iend); DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); seqPos->posInSrc += lastLLSize; } - return bytesAdjustment; + return (size_t)(iend-istart); } -typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize); -static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) +/* @seqPos represents a position within @inSeqs, + * it is read and updated by this function, + * once the goal to produce a block of size @blockSize is reached. + * @return: nb of bytes consumed from @src, necessarily <= @blockSize. + */ +typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx, + ZSTD_SequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, + ZSTD_ParamSwitch_e externalRepSearch); + +static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode) { - ZSTD_sequenceCopier sequenceCopier = NULL; - assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); + assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode)); if (mode == ZSTD_sf_explicitBlockDelimiters) { - return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; - } else if (mode == ZSTD_sf_noBlockDelimiters) { - return ZSTD_copySequencesToSeqStoreNoBlockDelim; + return ZSTD_transferSequences_wBlockDelim; } - assert(sequenceCopier != NULL); - return sequenceCopier; + assert(mode == ZSTD_sf_noBlockDelimiters); + return ZSTD_transferSequences_noDelim; } /* Discover the size of next block by searching for the delimiter. @@ -6129,7 +6917,7 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) * otherwise it's an input error. * The block size retrieved will be later compared to ensure it remains within bounds */ static size_t -blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) +blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos) { int end = 0; size_t blockSize = 0; @@ -6141,41 +6929,38 @@ blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; if (end) { if (inSeqs[spos].matchLength != 0) - RETURN_ERROR(corruption_detected, "delimiter format error : both matchlength and offset must be == 0"); + RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); break; } spos++; } if (!end) - RETURN_ERROR(corruption_detected, "Reached end of sequences without finding a block delimiter"); + RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); return blockSize; } -/* More a "target" block size */ -static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining) -{ - int const lastBlock = (remaining <= blockSize); - return lastBlock ? remaining : blockSize; -} - -static size_t determine_blockSize(ZSTD_sequenceFormat_e mode, +static size_t determine_blockSize(ZSTD_SequenceFormat_e mode, size_t blockSize, size_t remaining, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + ZSTD_SequencePosition seqPos) { DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); - if (mode == ZSTD_sf_noBlockDelimiters) - return blockSize_noDelimiter(blockSize, remaining); + if (mode == ZSTD_sf_noBlockDelimiters) { + /* Note: more a "target" block size */ + return MIN(remaining, blockSize); + } + assert(mode == ZSTD_sf_explicitBlockDelimiters); { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); if (explicitBlockSize > blockSize) - RETURN_ERROR(corruption_detected, "sequences incorrectly define a too large block"); + RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); if (explicitBlockSize > remaining) - RETURN_ERROR(srcSize_wrong, "sequences define a frame longer than source"); + RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); return explicitBlockSize; } } -/* Compress, block-by-block, all of the sequences given. +/* Compress all provided sequences, block-by-block. * * Returns the cumulative size of all compressed blocks (including their headers), * otherwise a ZSTD error. @@ -6188,11 +6973,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, { size_t cSize = 0; size_t remaining = srcSize; - ZSTD_sequencePosition seqPos = {0, 0, 0}; + ZSTD_SequencePosition seqPos = {0, 0, 0}; - BYTE const* ip = (BYTE const*)src; + const BYTE* ip = (BYTE const*)src; BYTE* op = (BYTE*)dst; - ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); /* Special case: empty frame */ @@ -6208,19 +6993,19 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, while (remaining) { size_t compressedSeqsSize; size_t cBlockSize; - size_t additionalByteAdjustment; size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, - cctx->blockSize, remaining, + cctx->blockSizeMax, remaining, inSeqs, inSeqsSize, seqPos); U32 const lastBlock = (blockSize == remaining); FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); assert(blockSize <= remaining); ZSTD_resetSeqStore(&cctx->seqStore); - DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize); - additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); - FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); - blockSize -= additionalByteAdjustment; + blockSize = sequenceCopier(cctx, + &seqPos, inSeqs, inSeqsSize, + ip, blockSize, + cctx->appliedParams.searchForExternalRepcodes); + FORWARD_IF_ERROR(blockSize, "Bad sequence copy"); /* If blocks are too small, emit as a nocompress block */ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding @@ -6228,7 +7013,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); + DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; @@ -6243,18 +7028,18 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, &cctx->appliedParams, op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, blockSize, - cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, + cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && - ZSTD_isRLE((BYTE const*)src, srcSize)) { - /* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 - */ + ZSTD_isRLE(ip, blockSize)) { + /* Note: don't emit the first block as RLE even if it qualifies because + * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error + * "should consume all input error." + */ compressedSeqsSize = 1; } @@ -6306,30 +7091,36 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, { BYTE* op = (BYTE*)dst; size_t cSize = 0; - size_t compressedBlocksSize = 0; - size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ - DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity); + DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); + /* Begin writing output, starting with frame header */ - frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); - op += frameHeaderSize; - dstCapacity -= frameHeaderSize; - cSize += frameHeaderSize; + { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, + &cctx->appliedParams, srcSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } if (cctx->appliedParams.fParams.checksumFlag && srcSize) { XXH64_update(&cctx->xxhState, src, srcSize); } - /* cSize includes block header size and compressed sequences size */ - compressedBlocksSize = ZSTD_compressSequences_internal(cctx, + + /* Now generate compressed blocks */ + { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, src, srcSize); - FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); - cSize += compressedBlocksSize; - dstCapacity -= compressedBlocksSize; + FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + /* Complete with frame checksum, if needed */ if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); @@ -6342,6 +7133,1033 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, return cSize; } + +#if defined(ZSTD_ARCH_X86_AVX2) + +#include /* AVX2 intrinsics */ + +/* + * Convert 2 sequences per iteration, using AVX2 intrinsics: + * - offset -> offBase = offset + 2 + * - litLength -> (U16) litLength + * - matchLength -> (U16)(matchLength - 3) + * - rep is ignored + * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). + * + * At the end, instead of extracting two __m128i, + * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1, + * then store the lower 16 bytes in one go. + * + * @returns 0 on succes, with no long length detected + * @returns > 0 if there is one long length (> 65535), + * indicating the position, and type. + */ +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + /* + * addition: + * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0) + */ + const __m256i addition = _mm256_setr_epi32( + ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */ + ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */ + ); + + /* limit: check if there is a long length */ + const __m256i limit = _mm256_set1_epi32(65535); + + /* + * shuffle mask for byte-level rearrangement in each 128-bit half: + * + * Input layout (after addition) per 128-bit half: + * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ] + * We only need: + * offBase (4 bytes) = offset+2 + * litLength (2 bytes) = low 2 bytes of litLength + * mlBase (2 bytes) = low 2 bytes of (matchLength) + * => Bytes [0..3, 4..5, 8..9], zero the rest. + */ + const __m256i mask = _mm256_setr_epi8( + /* For the lower 128 bits => sequence i */ + 0, 1, 2, 3, /* offset+2 */ + 4, 5, /* litLength (16 bits) */ + 8, 9, /* matchLength (16 bits) */ + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + + /* For the upper 128 bits => sequence i+1 */ + 16,17,18,19, /* offset+2 */ + 20,21, /* litLength */ + 24,25, /* matchLength */ + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, + (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80 + ); + + /* + * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8). + * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3]. + * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1. + */ +#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */ + + size_t longLen = 0, i = 0; + + /* AVX permutation depends on the specific definition of target structures */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + /* Process 2 sequences per loop iteration */ + for (; i + 1 < nbSequences; i += 2) { + /* Load 2 ZSTD_Sequence (32 bytes) */ + __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]); + + /* Add {2, 0, -3, 0} in each 128-bit half */ + __m256i vadd = _mm256_add_epi32(vin, addition); + + /* Check for long length */ + __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */ + int ll_res = _mm256_movemask_epi8(ll_cmp); + + /* Shuffle bytes so each half gives us the 8 bytes we need */ + __m256i vshf = _mm256_shuffle_epi8(vadd, mask); + /* + * Now: + * Lane0 = seq0's 8 bytes + * Lane1 = 0 + * Lane2 = seq1's 8 bytes + * Lane3 = 0 + */ + + /* Permute 64-bit lanes => move Lane2 down into Lane1. */ + __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8); + /* + * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1]. + * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them. + */ + + /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */ + _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm)); + /* + * This writes out 16 bytes total: + * - offset 0..7 => seq0 (offBase, litLength, mlBase) + * - offset 8..15 => seq1 (offBase, litLength, mlBase) + */ + + /* check (unlikely) long lengths > 65535 + * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27] + * => combined mask = 0x0FF00FF0 + */ + if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) { + /* long length detected: let's figure out which one*/ + if (inSeqs[i].matchLength > 65535+MINMATCH) { + assert(longLen == 0); + longLen = i + 1; + } + if (inSeqs[i].litLength > 65535) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + if (inSeqs[i+1].matchLength > 65535+MINMATCH) { + assert(longLen == 0); + longLen = i + 1 + 1; + } + if (inSeqs[i+1].litLength > 65535) { + assert(longLen == 0); + longLen = i + 1 + nbSequences + 1; + } + } + } + + /* Handle leftover if @nbSequences is odd */ + if (i < nbSequences) { + /* process last sequence */ + assert(i == nbSequences - 1); + dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset); + dstSeqs[i].litLength = (U16)inSeqs[i].litLength; + dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH); + /* check (unlikely) long lengths > 65535 */ + if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) { + assert(longLen == 0); + longLen = i + 1; + } + if (UNLIKELY(inSeqs[i].litLength > 65535)) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + } + + return longLen; +} + +#elif defined (ZSTD_ARCH_RISCV_RVV) +#include +/* + * Convert `vl` sequences per iteration, using RVV intrinsics: + * - offset -> offBase = offset + 2 + * - litLength -> (U16) litLength + * - matchLength -> (U16)(matchLength - 3) + * - rep is ignored + * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). + * + * @returns 0 on succes, with no long length detected + * @returns > 0 if there is one long length (> 65535), + * indicating the position, and type. + */ +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) { + size_t longLen = 0; + size_t vl = 0; + typedef uint32_t __attribute__((may_alias)) aliased_u32; + /* RVV depends on the specific definition of target structures */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + for (size_t i = 0; i < nbSequences; i += vl) { + + vl = __riscv_vsetvl_e32m2(nbSequences-i); + { + // Loading structure member variables + vuint32m2x4_t v_tuple = __riscv_vlseg4e32_v_u32m2x4( + (const aliased_u32*)((const void*)&inSeqs[i]), + vl + ); + vuint32m2_t v_offset = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 0); + vuint32m2_t v_lit = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 1); + vuint32m2_t v_match = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 2); + // offset + ZSTD_REP_NUM + vuint32m2_t v_offBase = __riscv_vadd_vx_u32m2(v_offset, ZSTD_REP_NUM, vl); + // Check for integer overflow + // Cast to a 16-bit variable + vbool16_t lit_overflow = __riscv_vmsgtu_vx_u32m2_b16(v_lit, 65535, vl); + vuint16m1_t v_lit_clamped = __riscv_vncvt_x_x_w_u16m1(v_lit, vl); + + vbool16_t ml_overflow = __riscv_vmsgtu_vx_u32m2_b16(v_match, 65535+MINMATCH, vl); + vuint16m1_t v_ml_clamped = __riscv_vncvt_x_x_w_u16m1(__riscv_vsub_vx_u32m2(v_match, MINMATCH, vl), vl); + + // Pack two 16-bit fields into a 32-bit value (little-endian) + // The lower 16 bits contain litLength, and the upper 16 bits contain mlBase + vuint32m2_t v_lit_ml_combined = __riscv_vsll_vx_u32m2( + __riscv_vwcvtu_x_x_v_u32m2(v_ml_clamped, vl), // Convert matchLength to 32-bit + 16, + vl + ); + v_lit_ml_combined = __riscv_vor_vv_u32m2( + v_lit_ml_combined, + __riscv_vwcvtu_x_x_v_u32m2(v_lit_clamped, vl), + vl + ); + { + // Create a vector of SeqDef structures + // Store the offBase, litLength, and mlBase in a vector of SeqDef + vuint32m2x2_t store_data = __riscv_vcreate_v_u32m2x2( + v_offBase, + v_lit_ml_combined + ); + __riscv_vsseg2e32_v_u32m2x2( + (aliased_u32*)((void*)&dstSeqs[i]), + store_data, + vl + ); + } + { + // Find the first index where an overflow occurs + int first_ml = __riscv_vfirst_m_b16(ml_overflow, vl); + int first_lit = __riscv_vfirst_m_b16(lit_overflow, vl); + + if (UNLIKELY(first_ml != -1)) { + assert(longLen == 0); + longLen = i + first_ml + 1; + } + if (UNLIKELY(first_lit != -1)) { + assert(longLen == 0); + longLen = i + first_lit + 1 + nbSequences; + } + } + } + } + return longLen; +} + +/* the vector implementation could also be ported to SSSE3, + * but since this implementation is targeting modern systems (>= Sapphire Rapid), + * it's not useful to develop and maintain code for older pre-AVX2 platforms */ + +#elif defined(ZSTD_ARCH_ARM_SVE2) + +/* + * Checks if any active element in a signed 8-bit integer vector is greater + * than zero. + * + * @param g Governing predicate selecting active lanes. + * @param a Input vector of signed 8-bit integers. + * + * @return True if any active element in `a` is > 0, false otherwise. + */ +FORCE_INLINE_TEMPLATE int cmpgtz_any_s8(svbool_t g, svint8_t a) +{ + svbool_t ptest = svcmpgt_n_s8(g, a, 0); + return svptest_any(ptest, ptest); +} + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + /* Process the input with `8 * VL / element` lanes. */ + const size_t lanes = 8 * svcntb() / sizeof(ZSTD_Sequence); + size_t longLen = 0; + size_t n = 0; + + /* SVE permutation depends on the specific definition of target structures. */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + if (nbSequences >= lanes) { + const svbool_t ptrue = svptrue_b8(); + /* 16-bit of {ZSTD_REP_NUM, 0, -MINMATCH, 0} extended to 32-bit lanes. */ + const svuint32_t vaddition = svreinterpret_u32( + svunpklo_s32(svreinterpret_s16(svdup_n_u64(ZSTD_REP_NUM | (((U64)(U16)-MINMATCH) << 32))))); + /* For permutation of 16-bit units: 0, 1, 2, 4, 8, 9, 10, 12, ... */ + const svuint16_t vmask = svreinterpret_u16( + svindex_u64(0x0004000200010000, 0x0008000800080008)); + /* Upper bytes of `litLength` and `matchLength` will be packed into the + * middle of overflow check vector. */ + const svbool_t pmid = svcmpne_n_u8( + ptrue, svreinterpret_u8(svdup_n_u64(0x0000FFFFFFFF0000)), 0); + + do { + /* Load `lanes` number of `ZSTD_Sequence` into 8 vectors. */ + const svuint32_t vin0 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 0); + const svuint32_t vin1 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 1); + const svuint32_t vin2 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 2); + const svuint32_t vin3 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 3); + const svuint32_t vin4 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 4); + const svuint32_t vin5 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 5); + const svuint32_t vin6 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 6); + const svuint32_t vin7 = svld1_vnum_u32(ptrue, &inSeqs[n].offset, 7); + + /* Add {ZSTD_REP_NUM, 0, -MINMATCH, 0} to each structures. */ + const svuint16x2_t vadd01 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin0, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin1, vaddition))); + const svuint16x2_t vadd23 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin2, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin3, vaddition))); + const svuint16x2_t vadd45 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin4, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin5, vaddition))); + const svuint16x2_t vadd67 = svcreate2_u16( + svreinterpret_u16(svadd_u32_x(ptrue, vin6, vaddition)), + svreinterpret_u16(svadd_u32_x(ptrue, vin7, vaddition))); + + /* Shuffle and pack bytes so each vector contains SeqDef structures. */ + const svuint16_t vout01 = svtbl2_u16(vadd01, vmask); + const svuint16_t vout23 = svtbl2_u16(vadd23, vmask); + const svuint16_t vout45 = svtbl2_u16(vadd45, vmask); + const svuint16_t vout67 = svtbl2_u16(vadd67, vmask); + + /* Pack the upper 16-bits of 32-bit lanes for overflow check. */ + const svuint16_t voverflow01 = svuzp2_u16(svget2_u16(vadd01, 0), + svget2_u16(vadd01, 1)); + const svuint16_t voverflow23 = svuzp2_u16(svget2_u16(vadd23, 0), + svget2_u16(vadd23, 1)); + const svuint16_t voverflow45 = svuzp2_u16(svget2_u16(vadd45, 0), + svget2_u16(vadd45, 1)); + const svuint16_t voverflow67 = svuzp2_u16(svget2_u16(vadd67, 0), + svget2_u16(vadd67, 1)); + + /* We don't need the whole 16 bits of the overflow part. Only 1 bit + * is needed, so we pack tightly and merge multiple vectors to be + * able to use a single comparison to handle the overflow case. + * However, we also need to handle the possible negative values of + * matchLength parts, so we use signed comparison later. */ + const svint8_t voverflow = + svmax_s8_x(pmid, + svtrn1_s8(svreinterpret_s8(voverflow01), + svreinterpret_s8(voverflow23)), + svtrn1_s8(svreinterpret_s8(voverflow45), + svreinterpret_s8(voverflow67))); + + /* Store `lanes` number of `SeqDef` structures from 4 vectors. */ + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 0, svreinterpret_u32(vout01)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 1, svreinterpret_u32(vout23)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 2, svreinterpret_u32(vout45)); + svst1_vnum_u32(ptrue, &dstSeqs[n].offBase, 3, svreinterpret_u32(vout67)); + + /* Check if any enabled lanes of the overflow vector is larger than + * zero, only one such may happen. */ + if (UNLIKELY(cmpgtz_any_s8(pmid, voverflow))) { + /* Scalar search for long match is needed because we merged + * multiple overflow bytes with `max`. */ + size_t i; + for (i = n; i < n + lanes; i++) { + if (inSeqs[i].matchLength > 65535 + MINMATCH) { + assert(longLen == 0); + longLen = i + 1; + } + if (inSeqs[i].litLength > 65535) { + assert(longLen == 0); + longLen = i + nbSequences + 1; + } + } + } + + n += lanes; + } while(n <= nbSequences - lanes); + } + + /* Handle remaining elements. */ + for (; n < nbSequences; n++) { + dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); + dstSeqs[n].litLength = (U16)inSeqs[n].litLength; + dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); + /* Check for long length > 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535 + MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +#elif defined(ZSTD_ARCH_ARM_NEON) && (defined(__aarch64__) || defined(_M_ARM64)) + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n = 0; + + /* Neon permutation depends on the specific definition of target structures. */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + if (nbSequences > 3) { + static const ZSTD_ALIGNED(16) U32 constAddition[4] = { + ZSTD_REP_NUM, 0, -MINMATCH, 0 + }; + static const ZSTD_ALIGNED(16) U8 constMask[16] = { + 0, 1, 2, 3, 4, 5, 8, 9, 16, 17, 18, 19, 20, 21, 24, 25 + }; + static const ZSTD_ALIGNED(16) U16 constCounter[8] = { + 1, 1, 1, 1, 2, 2, 2, 2 + }; + + const uint32x4_t vaddition = vld1q_u32(constAddition); + const uint8x16_t vmask = vld1q_u8(constMask); + uint16x8_t vcounter = vld1q_u16(constCounter); + uint16x8_t vindex01 = vdupq_n_u16(0); + uint16x8_t vindex23 = vdupq_n_u16(0); + + do { + /* Load 4 ZSTD_Sequence (64 bytes). */ + const uint32x4_t vin0 = vld1q_u32(&inSeqs[n + 0].offset); + const uint32x4_t vin1 = vld1q_u32(&inSeqs[n + 1].offset); + const uint32x4_t vin2 = vld1q_u32(&inSeqs[n + 2].offset); + const uint32x4_t vin3 = vld1q_u32(&inSeqs[n + 3].offset); + + /* Add {ZSTD_REP_NUM, 0, -MINMATCH, 0} to each vector. */ + const uint8x16x2_t vadd01 = { { + vreinterpretq_u8_u32(vaddq_u32(vin0, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin1, vaddition)), + } }; + const uint8x16x2_t vadd23 = { { + vreinterpretq_u8_u32(vaddq_u32(vin2, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin3, vaddition)), + } }; + + /* Shuffle and pack bytes so each vector contains 2 SeqDef structures. */ + const uint8x16_t vout01 = vqtbl2q_u8(vadd01, vmask); + const uint8x16_t vout23 = vqtbl2q_u8(vadd23, vmask); + + /* Pack the upper 16-bits of 32-bit lanes for overflow check. */ + uint16x8_t voverflow01 = vuzp2q_u16(vreinterpretq_u16_u8(vadd01.val[0]), + vreinterpretq_u16_u8(vadd01.val[1])); + uint16x8_t voverflow23 = vuzp2q_u16(vreinterpretq_u16_u8(vadd23.val[0]), + vreinterpretq_u16_u8(vadd23.val[1])); + + /* Store 4 SeqDef structures. */ + vst1q_u32(&dstSeqs[n + 0].offBase, vreinterpretq_u32_u8(vout01)); + vst1q_u32(&dstSeqs[n + 2].offBase, vreinterpretq_u32_u8(vout23)); + + /* Create masks in case of overflow. */ + voverflow01 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow01)); + voverflow23 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow23)); + + /* Update overflow indices. */ + vindex01 = vbslq_u16(voverflow01, vcounter, vindex01); + vindex23 = vbslq_u16(voverflow23, vcounter, vindex23); + + /* Update counter for overflow check. */ + vcounter = vaddq_u16(vcounter, vdupq_n_u16(4)); + + n += 4; + } while(n < nbSequences - 3); + + /* Fixup indices in the second vector, we saved an additional counter + in the loop to update the second overflow index, we need to add 2 + here when the indices are not 0. */ + { uint16x8_t nonzero = vtstq_u16(vindex23, vindex23); + vindex23 = vsubq_u16(vindex23, nonzero); + vindex23 = vsubq_u16(vindex23, nonzero); + } + + /* Merge indices in the vectors, maximums are needed. */ + vindex01 = vmaxq_u16(vindex01, vindex23); + vindex01 = vmaxq_u16(vindex01, vextq_u16(vindex01, vindex01, 4)); + + /* Compute `longLen`, maximums of matchLength and litLength + with a preference on litLength. */ + { U64 maxLitMatchIndices = vgetq_lane_u64(vreinterpretq_u64_u16(vindex01), 0); + size_t maxLitIndex = (maxLitMatchIndices >> 16) & 0xFFFF; + size_t maxMatchIndex = (maxLitMatchIndices >> 32) & 0xFFFF; + longLen = maxLitIndex > maxMatchIndex ? maxLitIndex + nbSequences + : maxMatchIndex; + } + } + + /* Handle remaining elements. */ + for (; n < nbSequences; n++) { + dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); + dstSeqs[n].litLength = (U16)inSeqs[n].litLength; + dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); + /* Check for long length > 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535 + MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +#else /* No vectorization. */ + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n; + for (n=0; n 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +#endif + +/* + * Precondition: Sequences must end on an explicit Block Delimiter + * @return: 0 on success, or an error code. + * Note: Sequence validation functionality has been disabled (removed). + * This is helpful to generate a lean main pipeline, improving performance. + * It may be re-inserted later. + */ +size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, + const ZSTD_Sequence* const inSeqs, size_t nbSequences, + int repcodeResolution) +{ + Repcodes_t updatedRepcodes; + size_t seqNb = 0; + + DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences); + + RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid, + "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); + + ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + + /* check end condition */ + assert(nbSequences >= 1); + assert(inSeqs[nbSequences-1].matchLength == 0); + assert(inSeqs[nbSequences-1].offset == 0); + + /* Convert Sequences from public format to internal format */ + if (!repcodeResolution) { + size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1); + cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1; + if (longl) { + DEBUGLOG(5, "long length"); + assert(cctx->seqStore.longLengthType == ZSTD_llt_none); + if (longl <= nbSequences-1) { + DEBUGLOG(5, "long match length detected at pos %zu", longl-1); + cctx->seqStore.longLengthType = ZSTD_llt_matchLength; + cctx->seqStore.longLengthPos = (U32)(longl-1); + } else { + DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences); + assert(longl <= 2* (nbSequences-1)); + cctx->seqStore.longLengthType = ZSTD_llt_literalLength; + cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1); + } + } + } else { + for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) { + U32 const litLength = inSeqs[seqNb].litLength; + U32 const matchLength = inSeqs[seqNb].matchLength; + U32 const ll0 = (litLength == 0); + U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); + + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + } + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + if (!repcodeResolution && nbSequences > 1) { + U32* const rep = updatedRepcodes.rep; + + if (nbSequences >= 4) { + U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */ + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (nbSequences == 3) { + rep[2] = rep[0]; + rep[1] = inSeqs[0].offset; + rep[0] = inSeqs[1].offset; + } else { + assert(nbSequences == 2); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[0].offset; + } + } + + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); + + return 0; +} + +#if defined(ZSTD_ARCH_X86_AVX2) + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + size_t i; + __m256i const zeroVec = _mm256_setzero_si256(); + __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */ + ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */ + size_t mSum = 0, lSum = 0; + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + + /* Process 2 structs (32 bytes) at a time */ + for (i = 0; i + 2 <= nbSeqs; i += 2) { + /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */ + __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]); + /* check end of block signal */ + __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec); + int cmp_res = _mm256_movemask_epi8(cmp); + /* indices for match lengths correspond to bits [8..11], [24..27] + * => combined mask = 0x0F000F00 */ + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + if (cmp_res & 0x0F000F00) break; + /* Accumulate in sumVec */ + sumVec = _mm256_add_epi32(sumVec, data); + } + + /* Horizontal reduction */ + _mm256_store_si256((__m256i*)tmp, sumVec); + lSum = tmp[1] + tmp[5]; + mSum = tmp[2] + tmp[6]; + + /* Handle the leftover */ + for (; i < nbSeqs; i++) { + lSum += seqs[i].litLength; + mSum += seqs[i].matchLength; + if (seqs[i].matchLength == 0) break; /* end of block */ + } + + if (i==nbSeqs) { + /* reaching end of sequences: end of block signal was not present */ + BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } + { BlockSummary bs; + bs.nbSequences = i+1; + bs.blockSize = lSum + mSum; + bs.litSize = lSum; + return bs; + } +} + +#elif defined (ZSTD_ARCH_RISCV_RVV) + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + size_t totalMatchSize = 0; + size_t litSize = 0; + size_t i = 0; + int found_terminator = 0; + size_t vl_max = __riscv_vsetvlmax_e32m1(); + typedef uint32_t __attribute__((may_alias)) aliased_u32; + vuint32m1_t v_lit_sum = __riscv_vmv_v_x_u32m1(0, vl_max); + vuint32m1_t v_match_sum = __riscv_vmv_v_x_u32m1(0, vl_max); + + for (; i < nbSeqs; ) { + size_t vl = __riscv_vsetvl_e32m2(nbSeqs - i); + + vuint32m2x4_t v_tuple = __riscv_vlseg4e32_v_u32m2x4( + (const aliased_u32*)((const void*)&seqs[i]), + vl + ); + vuint32m2_t v_lit = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 1); + vuint32m2_t v_match = __riscv_vget_v_u32m2x4_u32m2(v_tuple, 2); + + // Check if any element has a matchLength of 0 + vbool16_t mask = __riscv_vmseq_vx_u32m2_b16(v_match, 0, vl); + int first_zero = __riscv_vfirst_m_b16(mask, vl); + + if (first_zero >= 0) { + // Find the first zero byte and set the effective length to that index + 1 to + // recompute the cumulative vector length of literals and matches + vl = first_zero + 1; + + // recompute the cumulative vector length of literals and matches + v_lit_sum = __riscv_vredsum_vs_u32m2_u32m1(__riscv_vslidedown_vx_u32m2(v_lit, 0, vl), v_lit_sum, vl); + v_match_sum = __riscv_vredsum_vs_u32m2_u32m1(__riscv_vslidedown_vx_u32m2(v_match, 0, vl), v_match_sum, vl); + + i += vl; + found_terminator = 1; + assert(seqs[i - 1].offset == 0); + break; + } else { + + v_lit_sum = __riscv_vredsum_vs_u32m2_u32m1(v_lit, v_lit_sum, vl); + v_match_sum = __riscv_vredsum_vs_u32m2_u32m1(v_match, v_match_sum, vl); + i += vl; + } + } + litSize = __riscv_vmv_x_s_u32m1_u32(v_lit_sum); + totalMatchSize = __riscv_vmv_x_s_u32m1_u32(v_match_sum); + + if (!found_terminator && i==nbSeqs) { + BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } + { BlockSummary bs; + bs.nbSequences = i; + bs.blockSize = litSize + totalMatchSize; + bs.litSize = litSize; + return bs; + } +} + +#else + +/* + * The function assumes `litMatchLength` is a packed 64-bit value where the + * lower 32 bits represent the match length. The check varies based on the + * system's endianness: + * - On little-endian systems, it verifies if the entire 64-bit value is at most + * 0xFFFFFFFF, indicating the match length (lower 32 bits) is zero. + * - On big-endian systems, it directly checks if the lower 32 bits are zero. + * + * @returns 1 if the match length is zero, 0 otherwise. + */ +FORCE_INLINE_TEMPLATE int matchLengthHalfIsZero(U64 litMatchLength) +{ + if (MEM_isLittleEndian()) { + return litMatchLength <= 0xFFFFFFFFULL; + } else { + return (U32)litMatchLength == 0; + } +} + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) +{ + /* Use multiple accumulators for efficient use of wide out-of-order machines. */ + U64 litMatchSize0 = 0; + U64 litMatchSize1 = 0; + U64 litMatchSize2 = 0; + U64 litMatchSize3 = 0; + size_t n = 0; + + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) + 4 == offsetof(ZSTD_Sequence, matchLength)); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) + 4 == offsetof(ZSTD_Sequence, rep)); + assert(seqs); + + if (nbSeqs > 3) { + /* Process the input in 4 independent streams to reach high throughput. */ + do { + /* Load `litLength` and `matchLength` as a packed `U64`. It is safe + * to use 64-bit unsigned arithmetic here because the sum of `litLength` + * and `matchLength` cannot exceed the block size, so the 32-bit + * subparts will never overflow. */ + U64 litMatchLength = MEM_read64(&seqs[n].litLength); + litMatchSize0 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + assert(seqs[n].offset == 0); + goto _out; + } + + litMatchLength = MEM_read64(&seqs[n + 1].litLength); + litMatchSize1 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 1; + assert(seqs[n].offset == 0); + goto _out; + } + + litMatchLength = MEM_read64(&seqs[n + 2].litLength); + litMatchSize2 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 2; + assert(seqs[n].offset == 0); + goto _out; + } + + litMatchLength = MEM_read64(&seqs[n + 3].litLength); + litMatchSize3 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + n += 3; + assert(seqs[n].offset == 0); + goto _out; + } + + n += 4; + } while(n < nbSeqs - 3); + } + + for (; n < nbSeqs; n++) { + U64 litMatchLength = MEM_read64(&seqs[n].litLength); + litMatchSize0 += litMatchLength; + if (matchLengthHalfIsZero(litMatchLength)) { + assert(seqs[n].offset == 0); + goto _out; + } + } + /* At this point n == nbSeqs, so no end terminator. */ + { BlockSummary bs; + bs.nbSequences = ERROR(externalSequences_invalid); + return bs; + } +_out: + litMatchSize0 += litMatchSize1 + litMatchSize2 + litMatchSize3; + { BlockSummary bs; + bs.nbSequences = n + 1; + if (MEM_isLittleEndian()) { + bs.litSize = (U32)litMatchSize0; + bs.blockSize = bs.litSize + (litMatchSize0 >> 32); + } else { + bs.litSize = litMatchSize0 >> 32; + bs.blockSize = bs.litSize + (U32)litMatchSize0; + } + return bs; + } +} +#endif + + +static size_t +ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t nbSequences, + const void* literals, size_t litSize, size_t srcSize) +{ + size_t remaining = srcSize; + size_t cSize = 0; + BYTE* op = (BYTE*)dst; + int const repcodeResolution = (cctx->appliedParams.searchForExternalRepcodes == ZSTD_ps_enable); + assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto); + + DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize); + RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block"); + + /* Special case: empty frame */ + if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) { + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); + RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header"); + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } + + while (nbSequences) { + size_t compressedSeqsSize, cBlockSize, conversionStatus; + BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences); + U32 const lastBlock = (block.nbSequences == nbSequences); + FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block"); + assert(block.nbSequences <= nbSequences); + RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer"); + ZSTD_resetSeqStore(&cctx->seqStore); + + conversionStatus = ZSTD_convertBlockSequences(cctx, + inSeqs, block.nbSequences, + repcodeResolution); + FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion"); + inSeqs += block.nbSequences; + nbSequences -= block.nbSequences; + remaining -= block.blockSize; + + /* Note: when blockSize is very small, other variant send it uncompressed. + * Here, we still send the sequences, because we don't have the original source to send it uncompressed. + * One could imagine in theory reproducing the source from the sequences, + * but that's complex and costly memory intensive, and goes against the objectives of this variant. */ + + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); + + compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( + op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, + literals, block.litSize, + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, + cctx->bmi2); + FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); + /* note: the spec forbids for any compressed block to be larger than maximum block size */ + if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0; + DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); + litSize -= block.litSize; + literals = (const char*)literals + block.litSize; + + /* Note: difficult to check source for RLE block when only Literals are provided, + * but it could be considered from analyzing the sequence directly */ + + if (compressedSeqsSize == 0) { + /* Sending uncompressed blocks is out of reach, because the source is not provided. + * In theory, one could use the sequences to regenerate the source, like a decompressor, + * but it's complex, and memory hungry, killing the purpose of this variant. + * Current outcome: generate an error code. + */ + RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block"); + } else { + U32 cBlockHeader; + assert(compressedSeqsSize > 1); /* no RLE */ + /* Error checking and repcodes update */ + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + /* Write block header into beginning of block*/ + cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); + } + + cSize += cBlockSize; + op += cBlockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); + + if (lastBlock) { + assert(nbSequences == 0); + break; + } + } + + RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed"); + RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize); + DEBUGLOG(4, "cSize final total: %zu", cSize); + return cSize; +} + +size_t +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* literals, size_t litSize, size_t litCapacity, + size_t decompressedSize) +{ + BYTE* op = (BYTE*)dst; + size_t cSize = 0; + + /* Transparent initialization stage, same as compressStream2() */ + DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity); + assert(cctx != NULL); + if (litCapacity < litSize) { + RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)"); + } + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed"); + + if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) { + RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters"); + } + if (cctx->appliedParams.validateSequences) { + RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation"); + } + if (cctx->appliedParams.fParams.checksumFlag) { + RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum"); + } + + /* Begin writing output, starting with frame header */ + { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, + &cctx->appliedParams, decompressedSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + + /* Now generate compressed blocks */ + { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, + op, dstCapacity, + inSeqs, inSeqsSize, + literals, litSize, decompressedSize); + FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + + DEBUGLOG(4, "Final compressed size: %zu", cSize); + return cSize; +} + /*====== Finalize ======*/ static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) @@ -6360,7 +8178,6 @@ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } - size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); @@ -6441,7 +8258,7 @@ static void ZSTD_dedicatedDictSearch_revertCParams( } } -static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) { switch (mode) { case ZSTD_cpm_unknown: @@ -6465,8 +8282,8 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. * Use dictSize == 0 for unknown or unused. - * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) + * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) { U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); @@ -6487,7 +8304,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); } } @@ -6504,7 +8321,9 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { +static ZSTD_parameters +ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) +{ ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); @@ -6518,7 +8337,34 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } + +void ZSTD_registerSequenceProducer( + ZSTD_CCtx* zc, + void* extSeqProdState, + ZSTD_sequenceProducer_F extSeqProdFunc) +{ + assert(zc != NULL); + ZSTD_CCtxParams_registerSequenceProducer( + &zc->requestedParams, extSeqProdState, extSeqProdFunc + ); +} + +void ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params* params, + void* extSeqProdState, + ZSTD_sequenceProducer_F extSeqProdFunc) +{ + assert(params != NULL); + if (extSeqProdFunc != NULL) { + params->extSeqProdFunc = extSeqProdFunc; + params->extSeqProdState = extSeqProdState; + } else { + params->extSeqProdFunc = NULL; + params->extSeqProdState = NULL; + } +} diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h index baa726f7dff..13a394b3816 100644 --- a/lib/compress/zstd_compress_internal.h +++ b/lib/compress/zstd_compress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -24,10 +24,7 @@ # include "zstdmt_compress.h" #endif #include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ - -#if defined (__cplusplus) -extern "C" { -#endif +#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */ /*-************************************* * Constants @@ -39,7 +36,7 @@ extern "C" { It's not a big deal though : candidate will just be sorted again. Additionally, candidate position 1 will be lost. But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. - The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy. + The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy. This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ @@ -82,6 +79,70 @@ typedef struct { ZSTD_fseCTables_t fse; } ZSTD_entropyCTables_t; +/*********************************************** +* Sequences * +***********************************************/ +typedef struct SeqDef_s { + U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ + U16 litLength; + U16 mlBase; /* mlBase == matchLength - MINMATCH */ +} SeqDef; + +/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ +typedef enum { + ZSTD_llt_none = 0, /* no longLengthType */ + ZSTD_llt_literalLength = 1, /* represents a long literal */ + ZSTD_llt_matchLength = 2 /* represents a long match */ +} ZSTD_longLengthType_e; + +typedef struct { + SeqDef* sequencesStart; + SeqDef* sequences; /* ptr to end of sequences */ + BYTE* litStart; + BYTE* lit; /* ptr to end of literals */ + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; + size_t maxNbSeq; + size_t maxNbLit; + + /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength + * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment + * the existing value of the litLength or matchLength by 0x10000. + */ + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; /* Index of the sequence to apply long length modification to */ +} SeqStore_t; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_SequenceLength; + +/** + * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences + * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. + */ +MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq) +{ + ZSTD_SequenceLength seqLen; + seqLen.litLength = seq->litLength; + seqLen.matchLength = seq->mlBase + MINMATCH; + if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { + if (seqStore->longLengthType == ZSTD_llt_literalLength) { + seqLen.litLength += 0x10000; + } + if (seqStore->longLengthType == ZSTD_llt_matchLength) { + seqLen.matchLength += 0x10000; + } + } + return seqLen; +} + +const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ +int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ + + /*********************************************** * Entropy buffer statistics structs and funcs * ***********************************************/ @@ -91,7 +152,7 @@ typedef struct { * hufDesSize refers to the size of huffman tree description in bytes. * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ typedef struct { - symbolEncodingType_e hType; + SymbolEncodingType_e hType; BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; size_t hufDesSize; } ZSTD_hufCTablesMetadata_t; @@ -102,9 +163,9 @@ typedef struct { * fseTablesSize refers to the size of fse tables in bytes. * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ typedef struct { - symbolEncodingType_e llType; - symbolEncodingType_e ofType; - symbolEncodingType_e mlType; + SymbolEncodingType_e llType; + SymbolEncodingType_e ofType; + SymbolEncodingType_e mlType; BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; size_t fseTablesSize; size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ @@ -118,12 +179,13 @@ typedef struct { /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * @return : 0 on success or error code */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize); +size_t ZSTD_buildBlockEntropyStats( + const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize); /********************************* * Compression internals structs * @@ -147,28 +209,29 @@ typedef struct { stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ size_t size; /* The number of sequences. <= capacity. */ size_t capacity; /* The capacity starting from `seq` pointer */ -} rawSeqStore_t; +} RawSeqStore_t; -UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; +UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; typedef struct { - int price; - U32 off; - U32 mlen; - U32 litlen; - U32 rep[ZSTD_REP_NUM]; + int price; /* price from beginning of segment to this position */ + U32 off; /* offset of previous match */ + U32 mlen; /* length of previous match */ + U32 litlen; /* nb of literals since previous match */ + U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */ } ZSTD_optimal_t; typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; +#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3) typedef struct { /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ unsigned* litFreq; /* table of literals statistics, of size 256 */ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ - ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ - ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ + ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */ + ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ U32 litSum; /* nb of literals */ U32 litLengthSum; /* nb of litLength codes */ @@ -180,7 +243,7 @@ typedef struct { U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ - ZSTD_paramSwitch_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; } optState_t; typedef struct { @@ -202,11 +265,11 @@ typedef struct { #define ZSTD_WINDOW_START_INDEX 2 -typedef struct ZSTD_matchState_t ZSTD_matchState_t; +typedef struct ZSTD_MatchState_t ZSTD_MatchState_t; #define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ -struct ZSTD_matchState_t { +struct ZSTD_MatchState_t { ZSTD_window_t window; /* State for window round buffer management */ U32 loadedDictEnd; /* index of end of dictionary, within context's referential. * When loadedDictEnd != 0, a dictionary is in use, and still valid. @@ -219,33 +282,42 @@ struct ZSTD_matchState_t { U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/ - U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ + BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */ + U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */ + U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */ U32* hashTable; U32* hashTable3; U32* chainTable; - U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ + int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ int dedicatedDictSearch; /* Indicates whether this matchState is using the * dedicated dictionary search structure. */ optState_t opt; /* optimal parser state */ - const ZSTD_matchState_t* dictMatchState; + const ZSTD_MatchState_t* dictMatchState; ZSTD_compressionParameters cParams; - const rawSeqStore_t* ldmSeqStore; + const RawSeqStore_t* ldmSeqStore; /* Controls prefetching in some dictMatchState matchfinders. * This behavior is controlled from the cctx ms. * This parameter has no effect in the cdict ms. */ int prefetchCDictTables; + + /* When == 0, lazy match finders insert every position. + * When != 0, lazy match finders only insert positions they search. + * This allows them to skip much faster over incompressible data, + * at a small cost to compression ratio. + */ + int lazySkipping; }; typedef struct { ZSTD_compressedBlockState_t* prevCBlock; ZSTD_compressedBlockState_t* nextCBlock; - ZSTD_matchState_t matchState; + ZSTD_MatchState_t matchState; } ZSTD_blockState_t; typedef struct { @@ -272,7 +344,7 @@ typedef struct { } ldmState_t; typedef struct { - ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ + ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ @@ -303,7 +375,7 @@ struct ZSTD_CCtx_params_s { * There is no guarantee that hint is close to actual source size */ ZSTD_dictAttachPref_e attachDictPref; - ZSTD_paramSwitch_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; /* Multithreading: used to pass parameters to mtctx */ int nbWorkers; @@ -322,14 +394,27 @@ struct ZSTD_CCtx_params_s { ZSTD_bufferMode_e outBufferMode; /* Sequence compression API */ - ZSTD_sequenceFormat_e blockDelimiters; + ZSTD_SequenceFormat_e blockDelimiters; int validateSequences; - /* Block splitting */ - ZSTD_paramSwitch_e useBlockSplitter; + /* Block splitting + * @postBlockSplitter executes split analysis after sequences are produced, + * it's more accurate but consumes more resources. + * @preBlockSplitter_level splits before knowing sequences, + * it's more approximative but also cheaper. + * Valid @preBlockSplitter_level values range from 0 to 6 (included). + * 0 means auto, 1 means do not split, + * then levels are sorted in increasing cpu budget, from 2 (fastest) to 6 (slowest). + * Highest @preBlockSplitter_level combines well with @postBlockSplitter. + */ + ZSTD_ParamSwitch_e postBlockSplitter; + int preBlockSplitter_level; + + /* Adjust the max block size*/ + size_t maxBlockSize; /* Param for deciding whether to use row-based matchfinder */ - ZSTD_paramSwitch_e useRowMatchFinder; + ZSTD_ParamSwitch_e useRowMatchFinder; /* Always load a dictionary in ext-dict mode (not prefix mode)? */ int deterministicRefPrefix; @@ -338,11 +423,25 @@ struct ZSTD_CCtx_params_s { ZSTD_customMem customMem; /* Controls prefetching in some dictMatchState matchfinders */ - ZSTD_paramSwitch_e prefetchCDictTables; + ZSTD_ParamSwitch_e prefetchCDictTables; + + /* Controls whether zstd will fall back to an internal matchfinder + * if the external matchfinder returns an error code. */ + int enableMatchFinderFallback; + + /* Parameters for the external sequence producer API. + * Users set these parameters through ZSTD_registerSequenceProducer(). + * It is not possible to set these parameters individually through the public API. */ + void* extSeqProdState; + ZSTD_sequenceProducer_F extSeqProdFunc; + + /* Controls repcode search in external sequence parsing */ + ZSTD_ParamSwitch_e searchForExternalRepcodes; }; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */ #define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) #define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE) +#define TMP_WORKSPACE_SIZE (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE)) /** * Indicates whether this compression proceeds directly from user-provided @@ -360,11 +459,11 @@ typedef enum { */ #define ZSTD_MAX_NB_BLOCK_SPLITS 196 typedef struct { - seqStore_t fullSeqStoreChunk; - seqStore_t firstHalfSeqStore; - seqStore_t secondHalfSeqStore; - seqStore_t currSeqStore; - seqStore_t nextSeqStore; + SeqStore_t fullSeqStoreChunk; + SeqStore_t firstHalfSeqStore; + SeqStore_t secondHalfSeqStore; + SeqStore_t currSeqStore; + SeqStore_t nextSeqStore; U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS]; ZSTD_entropyCTablesMetadata_t entropyMetadata; @@ -381,7 +480,7 @@ struct ZSTD_CCtx_s { size_t dictContentSize; ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */ - size_t blockSize; + size_t blockSizeMax; unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */ unsigned long long consumedSrcSize; unsigned long long producedCSize; @@ -393,13 +492,14 @@ struct ZSTD_CCtx_s { int isFirstBlock; int initialized; - seqStore_t seqStore; /* sequences storage ptrs */ + SeqStore_t seqStore; /* sequences storage ptrs */ ldmState_t ldmState; /* long distance matching state */ rawSeq* ldmSequences; /* Storage for the ldm output sequences */ size_t maxNbLdmSequences; - rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */ + RawSeqStore_t externSeqStore; /* Mutable reference to external sequences */ ZSTD_blockState_t blockState; - U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */ + void* tmpWorkspace; /* used as substitute of stack space - must be aligned for S64 type */ + size_t tmpWkspSize; /* Whether we are streaming or not */ ZSTD_buffered_policy_e bufferedPolicy; @@ -439,6 +539,10 @@ struct ZSTD_CCtx_s { /* Workspace for block splitter */ ZSTD_blockSplitCtx blockSplitCtx; + + /* Buffer for output from external sequence producer */ + ZSTD_Sequence* extSeqBuf; + size_t extSeqBufCapacity; }; typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e; @@ -464,17 +568,17 @@ typedef enum { * In this mode we take both the source size and the dictionary size * into account when selecting and adjusting the parameters. */ - ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. + ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. * We don't know what these parameters are for. We default to the legacy * behavior of taking both the source size and the dict size into account * when selecting and adjusting parameters. */ -} ZSTD_cParamMode_e; +} ZSTD_CParamMode_e; -typedef size_t (*ZSTD_blockCompressor) ( - ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +typedef size_t (*ZSTD_BlockCompressor_f) ( + ZSTD_MatchState_t* bs, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); +ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); MEM_STATIC U32 ZSTD_LLcode(U32 litLength) @@ -520,6 +624,25 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) return 1; } +/* ZSTD_selectAddr: + * @return index >= lowLimit ? candidate : backup, + * tries to force branchless codegen. */ +MEM_STATIC const BYTE* +ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup) +{ +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ( + "cmp %1, %2\n" + "cmova %3, %0\n" + : "+r"(candidate) + : "r"(index), "r"(lowLimit), "r"(backup) + ); + return candidate; +#else + return index >= lowLimit ? candidate : backup; +#endif +} + /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ @@ -555,7 +678,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); return (srcSize >> minlog) + 2; } @@ -584,7 +707,7 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con { assert(iend > ilimit_w); if (ip <= ilimit_w) { - ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); + ZSTD_wildcopy(op, ip, (size_t)(ilimit_w - ip), ZSTD_no_overlap); op += ilimit_w - ip; ip = ilimit_w; } @@ -602,14 +725,55 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con #define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) #define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ +/*! ZSTD_storeSeqOnly() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * Literals themselves are not copied, but @litPtr is updated. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH +*/ +HINT_INLINE UNUSED_ATTR void +ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, + size_t litLength, + U32 offBase, + size_t matchLength) +{ + assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + + /* literal Length */ + assert(litLength <= ZSTD_BLOCKSIZE_MAX); + if (UNLIKELY(litLength>0xFFFF)) { + assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ + seqStorePtr->longLengthType = ZSTD_llt_literalLength; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offBase = offBase; + + /* match Length */ + assert(matchLength <= ZSTD_BLOCKSIZE_MAX); + assert(matchLength >= MINMATCH); + { size_t const mlBase = matchLength - MINMATCH; + if (UNLIKELY(mlBase>0xFFFF)) { + assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ + seqStorePtr->longLengthType = ZSTD_llt_matchLength; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].mlBase = (U16)mlBase; + } + + seqStorePtr->sequences++; +} + /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). * @matchLength : must be >= MINMATCH * Allowed to over-read literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void -ZSTD_storeSeq(seqStore_t* seqStorePtr, +ZSTD_storeSeq(SeqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offBase, size_t matchLength) @@ -636,36 +800,14 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { - ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); + ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, litLength-16, ZSTD_no_overlap); } } else { ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); } seqStorePtr->lit += litLength; - /* literal Length */ - if (litLength>0xFFFF) { - assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ - seqStorePtr->longLengthType = ZSTD_llt_literalLength; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].litLength = (U16)litLength; - - /* match offset */ - seqStorePtr->sequences[0].offBase = offBase; - - /* match Length */ - assert(matchLength >= MINMATCH); - { size_t const mlBase = matchLength - MINMATCH; - if (mlBase>0xFFFF) { - assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ - seqStorePtr->longLengthType = ZSTD_llt_matchLength; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].mlBase = (U16)mlBase; - } - - seqStorePtr->sequences++; + ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); } /* ZSTD_updateRep() : @@ -694,12 +836,12 @@ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) typedef struct repcodes_s { U32 rep[3]; -} repcodes_t; +} Repcodes_t; -MEM_STATIC repcodes_t +MEM_STATIC Repcodes_t ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { - repcodes_t newReps; + Repcodes_t newReps; ZSTD_memcpy(&newReps, rep, sizeof(newReps)); ZSTD_updateRep(newReps.rep, offBase, ll0); return newReps; @@ -742,8 +884,8 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, size_t const matchLength = ZSTD_count(ip, match, vEnd); if (match + matchLength != mEnd) return matchLength; DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength); - DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match); - DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip); + DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match)); + DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip)); DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart); DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd)); return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd); @@ -754,28 +896,35 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, * Hashes ***************************************/ static const U32 prime3bytes = 506832829U; -static U32 ZSTD_hash3(U32 u, U32 h) { assert(h <= 32); return ((u << (32-24)) * prime3bytes) >> (32-h) ; } -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ +static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */ +MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); } static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { assert(h <= 32); return (u * prime4bytes) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h); } +static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); } +static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); } static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } -static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } +static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); } static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } +static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); } static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } -static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } +static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { assert(h <= 64); return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } +static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); } + MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) @@ -795,6 +944,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) } } +MEM_STATIC FORCE_INLINE_ATTR +size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + + switch(mls) + { + default: + case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt); + case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt); + case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt); + case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt); + case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt); + } +} + + /** ZSTD_ipow() : * Return base^exponent. */ @@ -856,11 +1023,12 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 /*-************************************* * Round buffer management ***************************************/ -#if (ZSTD_WINDOWLOG_MAX_64 > 31) -# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX" -#endif -/* Max current allowed */ -#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) +/* Max @current value allowed: + * In 32-bit mode: we want to avoid crossing the 2 GB limit, + * reducing risks of side effects in case of signed operations on indexes. + * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB) + * doesn't overflow U32 index capacity (4 GB) */ +#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB) /* Maximum chunk size before overflow correction needs to be called again */ #define ZSTD_CHUNKSIZE_MAX \ ( ((U32)-1) /* Maximum ending current index */ \ @@ -900,7 +1068,7 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) * Inspects the provided matchState and figures out what dictMode should be * passed to the compressor. */ -MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms) { return ZSTD_window_hasExtDict(ms->window) ? ZSTD_extDict : @@ -986,7 +1154,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, * The least significant cycleLog bits of the indices must remain the same, * which may be 0. Every index up to maxDist in the past must be valid. */ -MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 maxDist, void const* src) { /* preemptive overflow correction: @@ -1087,7 +1257,7 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) + const ZSTD_MatchState_t** dictMatchStatePtr) { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; @@ -1132,7 +1302,7 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) + const ZSTD_MatchState_t** dictMatchStatePtr) { assert(loadedDictEndPtr != NULL); assert(dictMatchStatePtr != NULL); @@ -1142,10 +1312,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); - if (blockEndIdx > loadedDictEnd + maxDist) { + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { /* On reaching window size, dictionaries are invalidated. * For simplification, if window size is reached anywhere within next block, * the dictionary is invalidated for the full block. + * + * We also have to invalidate the dictionary if ZSTD_window_update() has detected + * non-contiguous segments, which means that loadedDictEnd != window->dictLimit. + * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use + * dictMatchState, so setting it to NULL is not a problem. */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; @@ -1174,9 +1349,11 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { * forget about the extDict. Handles overlap of the prefix and extDict. * Returns non-zero if the segment is contiguous. */ -MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, - void const* src, size_t srcSize, - int forceNonContiguous) +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_window_update(ZSTD_window_t* window, + const void* src, size_t srcSize, + int forceNonContiguous) { BYTE const* const ip = (BYTE const*)src; U32 contiguous = 1; @@ -1203,8 +1380,9 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ if ( (ip+srcSize > window->dictBase + window->lowLimit) & (ip < window->dictBase + window->dictLimit)) { - ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; - U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase); + U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + assert(highInputIdx < UINT_MAX); window->lowLimit = lowLimitMax; DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); } @@ -1214,7 +1392,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, /** * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. */ -MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.lowLimit; @@ -1231,7 +1409,7 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, u /** * Returns the lowest allowed match index in the prefix. */ -MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.dictLimit; @@ -1244,6 +1422,13 @@ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, return matchLowest; } +/* index_safety_check: + * intentional underflow : ensure repIndex isn't overlapping dict + prefix + * @return 1 if values are not overlapping, + * 0 otherwise */ +MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) { + return ((U32)((prefixLowestIndex-1) - repIndex) >= 3); +} /* debug functions */ @@ -1314,10 +1499,6 @@ MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { return tag1 == tag2; } -#if defined (__cplusplus) -} -#endif - /* =============================================================== * Shared internal declarations * These prototypes may be called from sources not in lib/compress @@ -1333,6 +1514,25 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); +typedef struct { + U32 idx; /* Index in array of ZSTD_Sequence */ + U32 posInSequence; /* Position within sequence at idx */ + size_t posInSrc; /* Number of bytes given by sequences provided so far */ +} ZSTD_SequencePosition; + +/* for benchmark */ +size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, + const ZSTD_Sequence* const inSeqs, size_t nbSequences, + int repcodeResolution); + +typedef struct { + size_t nbSequences; + size_t blockSize; + size_t litSize; +} BlockSummary; + +BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs); + /* ============================================================== * Private declarations * These prototypes shall only be called from within lib/compress @@ -1344,7 +1544,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); * Note: srcSizeHint == 0 means 0! */ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); /*! ZSTD_initCStream_internal() : * Private use only. Init streaming operation. @@ -1356,7 +1556,7 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); -void ZSTD_resetSeqStore(seqStore_t* ssPtr); +void ZSTD_resetSeqStore(SeqStore_t* ssPtr); /*! ZSTD_getCParamsFromCDict() : * as the name implies */ @@ -1395,11 +1595,10 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity); * This cannot be used when long range matching is enabled. * Zstd will use these sequences, and pass the literals to a secondary block * compressor. - * @return : An error code on failure. * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory * access and data corruption. */ -size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq); +void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq); /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ @@ -1410,4 +1609,28 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); */ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); +/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ +MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) { + return params->extSeqProdFunc != NULL; +} + +/* =============================================================== + * Deprecated definitions that are still used internally to avoid + * deprecation warnings. These functions are exactly equivalent to + * their public variants, but avoid the deprecation warnings. + * =============================================================== */ + +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); + +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + + #endif /* ZSTD_COMPRESS_H */ diff --git a/lib/compress/zstd_compress_literals.c b/lib/compress/zstd_compress_literals.c index 15bde09e625..06036de5da5 100644 --- a/lib/compress/zstd_compress_literals.c +++ b/lib/compress/zstd_compress_literals.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -24,9 +24,9 @@ static size_t showHexa(const void* src, size_t srcSize) const BYTE* const ip = (const BYTE*)src; size_t u; for (u=0; u= 1); + assert(src != NULL); + { const BYTE b = ((const BYTE*)src)[0]; + size_t p; + for (p=1; p31) + (srcSize>4095); - (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + assert(dstCapacity >= 4); (void)dstCapacity; + assert(allBytesIdentical(src, srcSize)); switch(flSize) { @@ -88,24 +102,45 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* } ostart[flSize] = *(const BYTE*)src; - DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); + DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1); return flSize+1; } -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible) +/* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ +static size_t +ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) +{ + assert((int)strategy >= 0); + assert((int)strategy <= 9); + /* btultra2 : min 8 bytes; + * then 2x larger for each successive compression strategy + * max threshold 64 bytes */ + { int const shift = MIN(9-(int)strategy, 3); + size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift; + DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc); + return mintc; + } +} + +size_t ZSTD_compressLiterals ( + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2) { - size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; - symbolEncodingType_e hType = set_compressed; + SymbolEncodingType_e hType = set_compressed; size_t cLitSize; DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)", @@ -119,16 +154,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - /* small ? don't even attempt compression (speed opt) */ -# define COMPRESS_LITERALS_SIZE_MIN 63 - { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; - if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + /* if too small, don't even attempt compression (speed opt) */ + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); { HUF_repeat repeat = prevHuf->repeatMode; - int const preferRepeat = (strategy < ZSTD_lazy) ? srcSize <= 1024 : 0; - typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int, int, unsigned); + int const flags = 0 + | (bmi2 ? HUF_flags_bmi2 : 0) + | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0) + | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0) + | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0); + + typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int); huf_compress_f huf_compress; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat; @@ -137,23 +175,30 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, HUF_SYMBOLVALUE_MAX, LitHufLog, entropyWorkspace, entropyWorkspaceSize, (HUF_CElt*)nextHuf->CTable, - &repeat, preferRepeat, - bmi2, suspectUncompressible); + &repeat, flags); + DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize); if (repeat != HUF_repeat_none) { /* reused the existing table */ - DEBUGLOG(5, "Reusing previous huffman table"); + DEBUGLOG(5, "reusing statistics from previous huffman block"); hType = set_repeat; } } - if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + { size_t const minGain = ZSTD_minGain(srcSize, strategy); + if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } } if (cLitSize==1) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } + /* A return value of 1 signals that the alphabet consists of a single symbol. + * However, in some rare circumstances, it could be the compressed size (a single byte). + * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`. + * (it's also necessary to not generate statistics). + * Therefore, in such a case, actively check that all bytes are identical. */ + if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } } if (hType == set_compressed) { /* using a newly constructed table */ @@ -164,16 +209,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ + if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); diff --git a/lib/compress/zstd_compress_literals.h b/lib/compress/zstd_compress_literals.h index 9775fb97cb7..b060c8ad218 100644 --- a/lib/compress/zstd_compress_literals.h +++ b/lib/compress/zstd_compress_literals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,16 +16,24 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); +/* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); -/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, +/* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ +size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible); + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + int suspectUncompressible, + int bmi2); #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/lib/compress/zstd_compress_sequences.c b/lib/compress/zstd_compress_sequences.c index 2c1eee5676a..7beb9daa603 100644 --- a/lib/compress/zstd_compress_sequences.c +++ b/lib/compress/zstd_compress_sequences.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -153,13 +153,13 @@ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, return cost >> 8; } -symbolEncodingType_e +SymbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, - ZSTD_defaultPolicy_e const isDefaultAllowed, + ZSTD_DefaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy) { ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); @@ -241,7 +241,7 @@ typedef struct { size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, - FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, + FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, @@ -293,7 +293,7 @@ ZSTD_encodeSequences_body( FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) + SeqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; @@ -387,7 +387,7 @@ ZSTD_encodeSequences_default( FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) + SeqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, @@ -405,7 +405,7 @@ ZSTD_encodeSequences_bmi2( FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) + SeqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, @@ -421,7 +421,7 @@ size_t ZSTD_encodeSequences( FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) + SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) { DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); #if DYNAMIC_BMI2 diff --git a/lib/compress/zstd_compress_sequences.h b/lib/compress/zstd_compress_sequences.h index 7991364c2f7..4be8e91f248 100644 --- a/lib/compress/zstd_compress_sequences.h +++ b/lib/compress/zstd_compress_sequences.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,26 +11,27 @@ #ifndef ZSTD_COMPRESS_SEQUENCES_H #define ZSTD_COMPRESS_SEQUENCES_H +#include "zstd_compress_internal.h" /* SeqDef */ #include "../common/fse.h" /* FSE_repeat, FSE_CTable */ -#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */ +#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */ typedef enum { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1 -} ZSTD_defaultPolicy_e; +} ZSTD_DefaultPolicy_e; -symbolEncodingType_e +SymbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, - ZSTD_defaultPolicy_e const isDefaultAllowed, + ZSTD_DefaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy); size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, - FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, + FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, @@ -42,7 +43,7 @@ size_t ZSTD_encodeSequences( FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); + SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); size_t ZSTD_fseBitCost( FSE_CTable const* ctable, diff --git a/lib/compress/zstd_compress_superblock.c b/lib/compress/zstd_compress_superblock.c index eed58e7cfca..6f57345be62 100644 --- a/lib/compress/zstd_compress_superblock.c +++ b/lib/compress/zstd_compress_superblock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -51,11 +51,9 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, BYTE* const oend = ostart + dstSize; BYTE* op = ostart + lhSize; U32 const singleStream = lhSize == 3; - symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; + SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; - (void)bmi2; /* TODO bmi2... */ - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; @@ -77,9 +75,9 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } - /* TODO bmi2 */ - { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) - : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); + { int const flags = bmi2 ? HUF_flags_bmi2 : 0; + const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags) + : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { @@ -104,7 +102,7 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); + { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } @@ -124,30 +122,30 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, } *entropyWritten = 1; DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); - return op-ostart; + return (size_t)(op-ostart); } static size_t -ZSTD_seqDecompressedSize(seqStore_t const* seqStore, - const seqDef* sequences, size_t nbSeq, - size_t litSize, int lastSequence) +ZSTD_seqDecompressedSize(SeqStore_t const* seqStore, + const SeqDef* sequences, size_t nbSeqs, + size_t litSize, int lastSubBlock) { - const seqDef* const sstart = sequences; - const seqDef* const send = sequences + nbSeq; - const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; - (void)(litLengthSum); /* suppress unused variable warning on some environments */ - while (send-sp > 0) { - ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); + size_t n; + for (n=0; n>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { - return op - ostart; + return (size_t)(op - ostart); } /* seqHead : flags for FSE encoding type */ @@ -211,7 +209,7 @@ ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, } { size_t const bitstreamSize = ZSTD_encodeSequences( - op, oend - op, + op, (size_t)(oend - op), fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, @@ -255,7 +253,7 @@ ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, #endif *entropyWritten = 1; - return op - ostart; + return (size_t)(op - ostart); } /** ZSTD_compressSubBlock() : @@ -264,7 +262,7 @@ ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - const seqDef* sequences, size_t nbSeq, + const SeqDef* sequences, size_t nbSeq, const BYTE* literals, size_t litSize, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, @@ -281,7 +279,8 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, &entropyMetadata->hufMetadata, literals, litSize, - op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); + op, (size_t)(oend-op), + bmi2, writeLitEntropy, litEntropyWritten); FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); if (cLitSize == 0) return 0; op += cLitSize; @@ -291,18 +290,18 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, - op, oend-op, + op, (size_t)(oend-op), bmi2, writeSeqEntropy, seqEntropyWritten); FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); if (cSeqSize == 0) return 0; op += cSeqSize; } /* Write block header */ - { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; + { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize; U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(ostart, cBlockHeader24); } - return op-ostart; + return (size_t)(op-ostart); } static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, @@ -328,7 +327,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit return 0; } -static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, +static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, const BYTE* codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable* fseCTable, const U8* additionalBits, @@ -391,7 +390,11 @@ static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, return cSeqSizeEstimate + sequencesSectionHeaderSize; } -static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, +typedef struct { + size_t estLitSize; + size_t estBlockSize; +} EstimatedBlockSize; +static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, @@ -399,15 +402,17 @@ static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) { - size_t cSizeEstimate = 0; - cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); - cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, + int writeLitEntropy, int writeSeqEntropy) +{ + EstimatedBlockSize ebs; + ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); + ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); - return cSizeEstimate + ZSTD_blockHeaderSize; + ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; + return ebs; } static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) @@ -421,14 +426,57 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe return 0; } +static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount) +{ + size_t n, total = 0; + assert(sp != NULL); + for (n=0; n %zu bytes", seqCount, (const void*)sp, total); + return total; +} + +#define BYTESCALE 256 + +static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs, + size_t targetBudget, size_t avgLitCost, size_t avgSeqCost, + int firstSubBlock) +{ + size_t n, budget = 0, inSize=0; + /* entropy headers */ + size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */ + assert(firstSubBlock==0 || firstSubBlock==1); + budget += headerSize; + + /* first sequence => at least one sequence*/ + budget += sp[0].litLength * avgLitCost + avgSeqCost; + if (budget > targetBudget) return 1; + inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH); + + /* loop over sequences */ + for (n=1; n targetBudget) + /* though continue to expand until the sub-block is deemed compressible */ + && (budget < inSize * BYTESCALE) ) + break; + } + + return n; +} + /** ZSTD_compressSubBlock_multi() : * Breaks super-block into multiple sub-blocks and compresses them. - * Entropy will be written to the first block. - * The following blocks will use repeat mode to compress. - * All sub-blocks are compressed blocks (no raw or rle blocks). - * @return : compressed size of the super block (which is multiple ZSTD blocks) - * Or 0 if it failed to compress. */ -static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, + * Entropy will be written into the first block. + * The following blocks use repeat_mode to compress. + * Sub-blocks are all compressed, except the last one when beneficial. + * @return : compressed size of the super block (which features multiple ZSTD blocks) + * or 0 if it failed to compress. */ +static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr, const ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, @@ -438,12 +486,14 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, const int bmi2, U32 lastBlock, void* workspace, size_t wkspSize) { - const seqDef* const sstart = seqStorePtr->sequencesStart; - const seqDef* const send = seqStorePtr->sequences; - const seqDef* sp = sstart; + const SeqDef* const sstart = seqStorePtr->sequencesStart; + const SeqDef* const send = seqStorePtr->sequences; + const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ + size_t const nbSeqs = (size_t)(send - sstart); const BYTE* const lstart = seqStorePtr->litStart; const BYTE* const lend = seqStorePtr->lit; const BYTE* lp = lstart; + size_t const nbLiterals = (size_t)(lend - lstart); BYTE const* ip = (BYTE const*)src; BYTE const* const iend = ip + srcSize; BYTE* const ostart = (BYTE*)dst; @@ -452,97 +502,153 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, const BYTE* llCodePtr = seqStorePtr->llCode; const BYTE* mlCodePtr = seqStorePtr->mlCode; const BYTE* ofCodePtr = seqStorePtr->ofCode; - size_t targetCBlockSize = cctxParams->targetCBlockSize; - size_t litSize, seqCount; - int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; + size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */ + size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize); + int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed); int writeSeqEntropy = 1; - int lastSequence = 0; - - DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", - (unsigned)(lend-lp), (unsigned)(send-sstart)); - - litSize = 0; - seqCount = 0; - do { - size_t cBlockSizeEstimate = 0; - if (sstart == send) { - lastSequence = 1; - } else { - const seqDef* const sequence = sp + seqCount; - lastSequence = sequence == send - 1; - litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; - seqCount++; - } - if (lastSequence) { - assert(lp <= lend); - assert(litSize <= (size_t)(lend - lp)); - litSize = (size_t)(lend - lp); + + DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)", + (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart)); + + /* let's start by a general estimation for the full block */ + if (nbSeqs > 0) { + EstimatedBlockSize const ebs = + ZSTD_estimateSubBlockSize(lp, nbLiterals, + ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, + &nextCBlock->entropy, entropyMetadata, + workspace, wkspSize, + writeLitEntropy, writeSeqEntropy); + /* quick estimation */ + size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE; + size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs; + const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1); + size_t n, avgBlockBudget, blockBudgetSupp=0; + avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks; + DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes", + (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE, + (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE); + /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately + * this will result in the production of a single uncompressed block covering @srcSize.*/ + if (ebs.estBlockSize > srcSize) return 0; + + /* compress and write sub-blocks */ + assert(nbSubBlocks>0); + for (n=0; n < nbSubBlocks-1; n++) { + /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ + size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp), + avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0); + /* if reached last sequence : break to last sub-block (simplification) */ + assert(seqCount <= (size_t)(send-sp)); + if (sp + seqCount == send) break; + assert(seqCount > 0); + /* compress sub-block */ + { int litEntropyWritten = 0; + int seqEntropyWritten = 0; + size_t litSize = countLiterals(seqStorePtr, sp, seqCount); + const size_t decompressedSize = + ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); + size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, + sp, seqCount, + lp, litSize, + llCodePtr, mlCodePtr, ofCodePtr, + cctxParams, + op, (size_t)(oend-op), + bmi2, writeLitEntropy, writeSeqEntropy, + &litEntropyWritten, &seqEntropyWritten, + 0); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); + + /* check compressibility, update state components */ + if (cSize > 0 && cSize < decompressedSize) { + DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes", + (unsigned)decompressedSize, (unsigned)cSize); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + /* Entropy only needs to be written once */ + if (litEntropyWritten) { + writeLitEntropy = 0; + } + if (seqEntropyWritten) { + writeSeqEntropy = 0; + } + sp += seqCount; + blockBudgetSupp = 0; + } } + /* otherwise : do not compress yet, coalesce current sub-block with following one */ } - /* I think there is an optimization opportunity here. - * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful - * since it recalculates estimate from scratch. - * For example, it would recount literal distribution and symbol codes every time. - */ - cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, - &nextCBlock->entropy, entropyMetadata, - workspace, wkspSize, writeLitEntropy, writeSeqEntropy); - if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { - int litEntropyWritten = 0; - int seqEntropyWritten = 0; - const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); - const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, - sp, seqCount, - lp, litSize, - llCodePtr, mlCodePtr, ofCodePtr, - cctxParams, - op, oend-op, - bmi2, writeLitEntropy, writeSeqEntropy, - &litEntropyWritten, &seqEntropyWritten, - lastBlock && lastSequence); - FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); - if (cSize > 0 && cSize < decompressedSize) { - DEBUGLOG(5, "Committed the sub-block"); - assert(ip + decompressedSize <= iend); - ip += decompressedSize; - sp += seqCount; - lp += litSize; - op += cSize; - llCodePtr += seqCount; - mlCodePtr += seqCount; - ofCodePtr += seqCount; - litSize = 0; - seqCount = 0; - /* Entropy only needs to be written once */ - if (litEntropyWritten) { - writeLitEntropy = 0; - } - if (seqEntropyWritten) { - writeSeqEntropy = 0; - } + } /* if (nbSeqs > 0) */ + + /* write last block */ + DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp)); + { int litEntropyWritten = 0; + int seqEntropyWritten = 0; + size_t litSize = (size_t)(lend - lp); + size_t seqCount = (size_t)(send - sp); + const size_t decompressedSize = + ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); + size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, + sp, seqCount, + lp, litSize, + llCodePtr, mlCodePtr, ofCodePtr, + cctxParams, + op, (size_t)(oend-op), + bmi2, writeLitEntropy, writeSeqEntropy, + &litEntropyWritten, &seqEntropyWritten, + lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); + + /* update pointers, the nb of literals borrowed from next sequence must be preserved */ + if (cSize > 0 && cSize < decompressedSize) { + DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes", + (unsigned)decompressedSize, (unsigned)cSize); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + /* Entropy only needs to be written once */ + if (litEntropyWritten) { + writeLitEntropy = 0; + } + if (seqEntropyWritten) { + writeSeqEntropy = 0; } + sp += seqCount; } - } while (!lastSequence); + } + + if (writeLitEntropy) { - DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); + DEBUGLOG(5, "Literal entropy tables were never written"); ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); } if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { /* If we haven't written our entropy tables, then we've violated our contract and * must emit an uncompressed block. */ - DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); + DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block"); return 0; } + if (ip < iend) { - size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); - DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); + /* some data left : last part of the block sent uncompressed */ + size_t const rSize = (size_t)((iend - ip)); + size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock); + DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize)); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); assert(cSize != 0); op += cSize; /* We have to regenerate the repcodes because we've skipped some sequences */ if (sp < send) { - seqDef const* seq; - repcodes_t rep; + const SeqDef* seq; + Repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); @@ -550,14 +656,17 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } } - DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); - return op-ostart; + + DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u", + (unsigned)(op-ostart)); + return (size_t)(op-ostart); } size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, - void const* src, size_t srcSize, - unsigned lastBlock) { + const void* src, size_t srcSize, + unsigned lastBlock) +{ ZSTD_entropyCTablesMetadata_t entropyMetadata; FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, @@ -565,7 +674,7 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), ""); return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, @@ -575,5 +684,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); + zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */); } diff --git a/lib/compress/zstd_compress_superblock.h b/lib/compress/zstd_compress_superblock.h index 176f9b106f3..8e494f0d5e6 100644 --- a/lib/compress/zstd_compress_superblock.h +++ b/lib/compress/zstd_compress_superblock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h index 47afe3dc7b0..77518002d00 100644 --- a/lib/compress/zstd_cwksp.h +++ b/lib/compress/zstd_cwksp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,11 +14,10 @@ /*-************************************* * Dependencies ***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_internal.h" - -#if defined (__cplusplus) -extern "C" { -#endif +#include "../common/portability_macros.h" +#include "../common/compiler.h" /* ZS2_isPower2 */ /*-************************************* * Constants @@ -44,8 +43,9 @@ extern "C" { ***************************************/ typedef enum { ZSTD_cwksp_alloc_objects, - ZSTD_cwksp_alloc_buffers, - ZSTD_cwksp_alloc_aligned + ZSTD_cwksp_alloc_aligned_init_once, + ZSTD_cwksp_alloc_aligned, + ZSTD_cwksp_alloc_buffers } ZSTD_cwksp_alloc_phase_e; /** @@ -98,8 +98,8 @@ typedef enum { * * Workspace Layout: * - * [ ... workspace ... ] - * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] + * [ ... workspace ... ] + * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] * * The various objects that live in the workspace are divided into the * following categories, and are allocated separately: @@ -123,9 +123,18 @@ typedef enum { * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). * Their sizes depend on the cparams. These tables are 64-byte aligned. * - * - Aligned: these buffers are used for various purposes that require 4 byte - * alignment, but don't require any initialization before they're used. These - * buffers are each aligned to 64 bytes. + * - Init once: these buffers require to be initialized at least once before + * use. They should be used when we want to skip memory initialization + * while not triggering memory checkers (like Valgrind) when reading from + * from this memory without writing to it first. + * These buffers should be used carefully as they might contain data + * from previous compressions. + * Buffers are aligned to 64 bytes. + * + * - Aligned: these buffers don't require any initialization before they're + * used. The user of the buffer should make sure they write into a buffer + * location before reading from it. + * Buffers are aligned to 64 bytes. * * - Buffers: these buffers are used for various purposes that don't require * any alignment or initialization before they're used. This means they can @@ -137,8 +146,9 @@ typedef enum { * correctly packed into the workspace buffer. That order is: * * 1. Objects - * 2. Buffers - * 3. Aligned/Tables + * 2. Init once / Tables + * 3. Aligned / Tables + * 4. Buffers / Tables * * Attempts to reserve objects of different types out of order will fail. */ @@ -150,6 +160,7 @@ typedef struct { void* tableEnd; void* tableValidEnd; void* allocStart; + void* initOnceStart; BYTE allocFailed; int workspaceOversizedDuration; @@ -162,6 +173,7 @@ typedef struct { ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { (void)ws; @@ -171,14 +183,29 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { assert(ws->tableEnd <= ws->allocStart); assert(ws->tableValidEnd <= ws->allocStart); assert(ws->allocStart <= ws->workspaceEnd); + assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); + assert(ws->workspace <= ws->initOnceStart); +#if ZSTD_MEMORY_SANITIZER + { + intptr_t const offset = __msan_test_shadow(ws->initOnceStart, + (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); + (void)offset; +#if defined(ZSTD_MSAN_PRINT) + if(offset!=-1) { + __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); + } +#endif + assert(offset==-1); + }; +#endif } /** * Align must be a power of 2. */ -MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { size_t const mask = align - 1; - assert((align & mask) == 0); + assert(ZSTD_isPower2(align)); return (size + mask) & ~mask; } @@ -192,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { * to figure out how much space you need for the matchState tables. Everything * else is though. * - * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size(). + * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). */ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { if (size == 0) @@ -204,12 +231,16 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { #endif } +MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { + return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); +} + /** * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. * Used to determine the number of bytes required for a given "aligned". */ -MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { - return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES)); +MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { + return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); } /** @@ -217,14 +248,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { * for internal purposes (currently only alignment). */ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { - /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes - * to align the beginning of tables section, as well as another n_2=[0, 63] bytes - * to align the beginning of the aligned section. - * - * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and - * aligneds being sized in multiples of 64 bytes. + /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES + * bytes to align the beginning of tables section and end of buffers; */ - size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES; + size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; return slackSpace; } @@ -236,11 +263,23 @@ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { size_t const alignBytesMask = alignBytes - 1; size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; - assert((alignBytes & alignBytesMask) == 0); - assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES); + assert(ZSTD_isPower2(alignBytes)); + assert(bytes < alignBytes); return bytes; } +/** + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) +{ + char* endPtr = (char*)ws->workspaceEnd; + assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); + endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); + return (void*)endPtr; +} + /** * Internal function. Do not use directly. * Reserves the given number of bytes within the aligned/buffer segment of the wksp, @@ -253,7 +292,7 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) { void* const alloc = (BYTE*)ws->allocStart - bytes; void* const bottom = ws->tableEnd; - DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", + DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); ZSTD_cwksp_assert_internal_consistency(ws); assert(alloc >= bottom); @@ -281,27 +320,16 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase { assert(phase >= ws->phase); if (phase > ws->phase) { - /* Going from allocating objects to allocating buffers */ - if (ws->phase < ZSTD_cwksp_alloc_buffers && - phase >= ZSTD_cwksp_alloc_buffers) { + /* Going from allocating objects to allocating initOnce / tables */ + if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && + phase >= ZSTD_cwksp_alloc_aligned_init_once) { ws->tableValidEnd = ws->objectEnd; - } + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); - /* Going from allocating buffers to allocating aligneds/tables */ - if (ws->phase < ZSTD_cwksp_alloc_aligned && - phase >= ZSTD_cwksp_alloc_aligned) { - { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */ - size_t const bytesToAlign = - ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES); - DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign); - ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */ - RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign), - memory_allocation, "aligned phase - alignment initial allocation failed!"); - } { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ - void* const alloc = ws->objectEnd; + void *const alloc = ws->objectEnd; size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); - void* const objectEnd = (BYTE*)alloc + bytesToAlign; + void *const objectEnd = (BYTE *) alloc + bytesToAlign; DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, "table phase - alignment initial allocation failed!"); @@ -309,7 +337,9 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase ws->tableEnd = objectEnd; /* table area starts being empty */ if (ws->tableValidEnd < ws->tableEnd) { ws->tableValidEnd = ws->tableEnd; - } } } + } + } + } ws->phase = phase; ZSTD_cwksp_assert_internal_consistency(ws); } @@ -321,7 +351,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase */ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { - return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); + return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); } /** @@ -348,7 +378,9 @@ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase if (alloc) { alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - __asan_unpoison_memory_region(alloc, bytes); + /* We need to keep the redzone poisoned while unpoisoning the bytes that + * are actually allocated. */ + __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); } } #endif @@ -366,29 +398,64 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) /** * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). */ -MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) +MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) { - void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), - ZSTD_cwksp_alloc_aligned); - assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); + size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); + void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); + assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + if(ptr && ptr < ws->initOnceStart) { + /* We assume the memory following the current allocation is either: + * 1. Not usable as initOnce memory (end of workspace) + * 2. Another initOnce buffer that has been allocated before (and so was previously memset) + * 3. An ASAN redzone, in which case we don't want to write on it + * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. + * Note that we assume here that MSAN and ASAN cannot run in the same time. */ + ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); + ws->initOnceStart = ptr; + } +#if ZSTD_MEMORY_SANITIZER + assert(__msan_test_shadow(ptr, bytes) == -1); +#endif + return ptr; +} + +/** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes) +{ + void* const ptr = ZSTD_cwksp_reserve_internal(ws, + ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), + ZSTD_cwksp_alloc_aligned); + assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); return ptr; } /** * Aligned on 64 bytes. These buffers have the special property that - * their values remain constrained, allowing us to re-use them without + * their values remain constrained, allowing us to reuse them without * memset()-ing them. */ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { - const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; + const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; void* alloc; void* end; void* top; - if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { - return NULL; + /* We can only start allocating tables after we are done reserving space for objects at the + * start of the workspace */ + if(ws->phase < phase) { + if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { + return NULL; + } } alloc = ws->tableEnd; end = (BYTE *)alloc + bytes; @@ -413,7 +480,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) #endif assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); - assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); + assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); return alloc; } @@ -459,19 +526,41 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) return alloc; } +/** + * with alignment control + * Note : should happen only once, at workspace first initialization + */ +MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment) +{ + size_t const mask = alignment - 1; + size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0; + void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); + if (start == NULL) return NULL; + if (surplus == 0) return start; + assert(ZSTD_isPower2(alignment)); + return (void*)(((size_t)start + surplus) & ~mask); +} MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the table re-use logic is sound, and that we don't + /* To validate that the table reuse logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table - * space every time we mark it dirty. */ + * space every time we mark it dirty. + * Since tableValidEnd space and initOnce space may overlap we don't poison + * the initOnce portion as it break its promise. This means that this poisoning + * check isn't always applied fully. */ { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; assert(__msan_test_shadow(ws->objectEnd, size) == -1); - __msan_poison(ws->objectEnd, size); + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + __msan_poison(ws->objectEnd, size); + } else { + assert(ws->initOnceStart >= ws->objectEnd); + __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); + } } #endif @@ -508,7 +597,8 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { * Invalidates table allocations. * All other allocations remain valid. */ -MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) +{ DEBUGLOG(4, "cwksp: clearing tables!"); #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) @@ -534,13 +624,16 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing!"); #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the context re-use logic is sound, and that we don't + /* To validate that the context reuse logic is sound, and that we don't * access stuff that this compression hasn't initialized, we re-"poison" - * the workspace (or at least the non-static, non-table parts of it) - * every time we start a new compression. */ + * the workspace except for the areas in which we expect memory reuse + * without initialization (objects, valid tables area and init once + * memory). */ { - size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; - __msan_poison(ws->tableValidEnd, size); + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; + __msan_poison(ws->tableValidEnd, size); + } } #endif @@ -556,14 +649,23 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { #endif ws->tableEnd = ws->objectEnd; - ws->allocStart = ws->workspaceEnd; + ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); ws->allocFailed = 0; - if (ws->phase > ZSTD_cwksp_alloc_buffers) { - ws->phase = ZSTD_cwksp_alloc_buffers; + if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { + ws->phase = ZSTD_cwksp_alloc_aligned_init_once; } ZSTD_cwksp_assert_internal_consistency(ws); } +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); +} + +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) + + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); +} + /** * The provided workspace takes ownership of the buffer [start, start+size). * Any existing values in the workspace are ignored (the previously managed @@ -576,6 +678,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c ws->workspaceEnd = (BYTE*)start + size; ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); ws->phase = ZSTD_cwksp_alloc_objects; ws->isStatic = isStatic; ZSTD_cwksp_clear(ws); @@ -594,6 +697,11 @@ MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { void *ptr = ws->workspace; DEBUGLOG(4, "cwksp: freeing workspace"); +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) + if (ptr != NULL && customMem.customFree != NULL) { + __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); + } +#endif ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); ZSTD_customFree(ptr, customMem); } @@ -607,15 +715,6 @@ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); } -MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { - return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); -} - -MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { - return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) - + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); -} - MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { return ws->allocFailed; } @@ -628,17 +727,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { * Returns if the estimated space needed for a wksp is within an acceptable limit of the * actual amount of space used. */ -MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws, - size_t const estimatedSpace, int resizedWorkspace) { - if (resizedWorkspace) { - /* Resized/newly allocated wksp should have exact bounds */ - return ZSTD_cwksp_used(ws) == estimatedSpace; - } else { - /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes - * than estimatedSpace. See the comments in zstd_cwksp.h for details. - */ - return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63); - } +MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { + /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice + * the alignment bytes difference between estimation and actual usage */ + return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && + ZSTD_cwksp_used(ws) <= estimatedSpace; } @@ -669,8 +762,4 @@ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( } } -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_CWKSP_H */ diff --git a/lib/compress/zstd_double_fast.c b/lib/compress/zstd_double_fast.c index c2dbd54c127..1a266e7d955 100644 --- a/lib/compress/zstd_double_fast.c +++ b/lib/compress/zstd_double_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,11 @@ #include "zstd_compress_internal.h" #include "zstd_double_fast.h" -static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -47,7 +51,9 @@ static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, } } } -static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -81,7 +87,7 @@ static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, } } } -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) @@ -95,8 +101,9 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; @@ -135,9 +142,14 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( const BYTE* matchl0; /* the long match for ip */ const BYTE* matchs0; /* the short match for ip */ const BYTE* matchl1; /* the long match for ip1 */ + const BYTE* matchs0_safe; /* matchs0 or safe address */ const BYTE* ip = istart; /* the current position */ const BYTE* ip1; /* the next position */ + /* Array of ~random data, should have low probability of matching data + * we load from here instead of from tables, if matchl0/matchl1 are + * invalid indices. Used to avoid unpredictable branches. */ + const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4}; DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); @@ -184,24 +196,29 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); - if (idxl0 > prefixLowestIndex) { + /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch. + * However expression below complies into conditional move. Since + * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex + * if there is a match, all branches become predictable. */ + { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); + /* check prefix long match */ - if (MEM_read64(matchl0) == MEM_read64(ip)) { + if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; offset = (U32)(ip-matchl0); while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ goto _match_found; - } - } + } } idxl1 = hashLong[hl1]; matchl1 = base + idxl1; - if (idxs0 > prefixLowestIndex) { - /* check prefix short match */ - if (MEM_read32(matchs0) == MEM_read32(ip)) { - goto _search_next_long; - } + /* Same optimization as matchl0 above */ + matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); + + /* check prefix short match */ + if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) { + goto _search_next_long; } if (ip1 >= nextStep) { @@ -235,21 +252,23 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( _search_next_long: - /* check prefix long +1 match */ - if (idxl1 > prefixLowestIndex) { - if (MEM_read64(matchl1) == MEM_read64(ip1)) { + /* short match found: let's check for a longer one */ + mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; + offset = (U32)(ip - matchs0); + + /* check long match at +1 position */ + if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) { + size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8; + if (l1len > mLength) { + /* use the long match instead */ ip = ip1; - mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8; + mLength = l1len; offset = (U32)(ip-matchl1); - while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */ - goto _match_found; + matchs0 = matchl1; } } - /* if no long +1 match, explore the short match we found */ - mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; - offset = (U32)(ip - matchs0); - while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */ + while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */ /* fall-through */ @@ -305,8 +324,9 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { @@ -327,7 +347,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams; const U32* const dictHashLong = dms->hashTable; const U32* const dictHashSmall = dms->chainTable; @@ -348,8 +368,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( if (ms->prefetchCDictTables) { size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); - PREFETCH_AREA(dictHashLong, hashTableBytes) - PREFETCH_AREA(dictHashSmall, chainTableBytes) + PREFETCH_AREA(dictHashLong, hashTableBytes); + PREFETCH_AREA(dictHashSmall, chainTableBytes); } /* init */ @@ -384,7 +404,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ /* check repcode */ - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -393,14 +413,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( goto _match_stored; } - if (matchIndexL > prefixLowestIndex) { + if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { /* check prefix long match */ - if (MEM_read64(matchLong) == MEM_read64(ip)) { - mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; - offset = (U32)(ip-matchLong); - while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - goto _match_found; - } + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + goto _match_found; } else if (dictTagsMatchL) { /* check dictMatchState long match */ U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; @@ -415,7 +433,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( } } if (matchIndexS > prefixLowestIndex) { - /* check prefix short match */ + /* short match candidate */ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } @@ -445,14 +463,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( hashLong[hl3] = curr + 1; /* check prefix long +1 match */ - if (matchIndexL3 > prefixLowestIndex) { - if (MEM_read64(matchL3) == MEM_read64(ip+1)) { - mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; - ip++; - offset = (U32)(ip-matchL3); - while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ - goto _match_found; - } + if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + goto _match_found; } else if (dictTagsMatchL3) { /* check dict long +1 match */ U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; @@ -505,7 +521,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? dictBase + repIndex2 - dictIndexDelta : base + repIndex2; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; @@ -532,7 +548,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( #define ZSTD_GEN_DFAST_FN(dictMode, mls) \ static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ void const* src, size_t srcSize) \ { \ return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ @@ -550,7 +566,7 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 7) size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; @@ -570,7 +586,7 @@ size_t ZSTD_compressBlock_doubleFast( size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; @@ -589,8 +605,10 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( } -static size_t ZSTD_compressBlock_doubleFast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { @@ -641,7 +659,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( size_t mLength; hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ - if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ + if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; @@ -709,7 +727,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ + if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 <= current2 - dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; @@ -739,7 +757,7 @@ ZSTD_GEN_DFAST_FN(extDict, 6) ZSTD_GEN_DFAST_FN(extDict, 7) size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; @@ -756,3 +774,5 @@ size_t ZSTD_compressBlock_doubleFast_extDict( return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } + +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ diff --git a/lib/compress/zstd_double_fast.h b/lib/compress/zstd_double_fast.h index 6d8ee8c651f..cd562fea8ef 100644 --- a/lib/compress/zstd_double_fast.h +++ b/lib/compress/zstd_double_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,29 +11,32 @@ #ifndef ZSTD_DOUBLE_FAST_H #define ZSTD_DOUBLE_FAST_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + +void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp); + size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -#if defined (__cplusplus) -} -#endif +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict +#else +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ #endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c index 291173449bb..ee25bcbac8d 100644 --- a/lib/compress/zstd_fast.c +++ b/lib/compress/zstd_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,7 +11,9 @@ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" -static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -43,10 +45,12 @@ static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); - } } } } + } } } } } -static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -80,7 +84,7 @@ static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, } } } } } -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) @@ -93,6 +97,50 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, } +typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit); + +static int +ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) +{ + /* Array of ~random data, should have low probability of matching data. + * Load from here if the index is invalid. + * Used to avoid unpredictable branches. */ + static const BYTE dummy[] = {0x12,0x34,0x56,0x78}; + + /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. + * However expression below compiles into conditional move. + */ + const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); + /* Note: this used to be written as : return test1 && test2; + * Unfortunately, once inlined, these tests become branches, + * in which case it becomes critical that they are executed in the right order (test1 then test2). + * So we have to write these tests in a specific manner to ensure their ordering. + */ + if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0; + /* force ordering of these tests, which matters once the function is inlined, as they become branches */ +#if defined(__GNUC__) + __asm__(""); +#endif + return matchIdx >= idxLowLimit; +} + +static int +ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) +{ + /* using a branch instead of a cmov, + * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, + * aka almost all candidates are within range */ + U32 mval; + if (matchIdx >= idxLowLimit) { + mval = MEM_read32(matchAddress); + } else { + mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */ + } + + return (MEM_read32(currentPtr) == mval); +} + + /** * If you squint hard enough (and ignore repcodes), the search operation at any * given position is broken into 4 stages: @@ -139,17 +187,17 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, * * This is also the work we do at the beginning to enter the loop initially. */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_fast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_noDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls, U32 const hasStep) + U32 const mls, int useCmov) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; - /* support stepSize of 0 */ - size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2; + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */ const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); @@ -171,8 +219,7 @@ ZSTD_compressBlock_fast_noDict_generic( size_t hash0; /* hash for ip0 */ size_t hash1; /* hash for ip1 */ - U32 idx; /* match idx for ip0 */ - U32 mval; /* src value at match idx */ + U32 matchIdx; /* match idx for ip0 */ U32 offcode; const BYTE* match0; @@ -185,6 +232,7 @@ ZSTD_compressBlock_fast_noDict_generic( size_t step; const BYTE* nextStep; const size_t kStepIncr = (1 << (kSearchStrength - 1)); + const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch; DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); @@ -213,7 +261,7 @@ ZSTD_compressBlock_fast_noDict_generic( hash0 = ZSTD_hashPtr(ip0, hlog, mls); hash1 = ZSTD_hashPtr(ip1, hlog, mls); - idx = hashTable[hash0]; + matchIdx = hashTable[hash0]; do { /* load repcode match for ip[2]*/ @@ -233,35 +281,25 @@ ZSTD_compressBlock_fast_noDict_generic( offcode = REPCODE1_TO_OFFBASE; mLength += 4; - /* First write next hash table entry; we've already calculated it. - * This write is known to be safe because the ip1 is before the + /* Write next hash table entry: it's already calculated. + * This write is known to be safe because ip1 is before the * repcode (ip2). */ hashTable[hash1] = (U32)(ip1 - base); goto _match; } - /* load match for ip[0] */ - if (idx >= prefixStartIndex) { - mval = MEM_read32(base + idx); - } else { - mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ - } - - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ - - /* First write next hash table entry; we've already calculated it. - * This write is known to be safe because the ip1 == ip0 + 1, so - * we know we will resume searching after ip1 */ + if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { + /* Write next hash table entry (it's already calculated). + * This write is known to be safe because the ip1 == ip0 + 1, + * so searching will resume after ip1 */ hashTable[hash1] = (U32)(ip1 - base); goto _offset; } /* lookup ip[1] */ - idx = hashTable[hash1]; + matchIdx = hashTable[hash1]; /* hash ip[2] */ hash0 = hash1; @@ -276,36 +314,19 @@ ZSTD_compressBlock_fast_noDict_generic( current0 = (U32)(ip0 - base); hashTable[hash0] = current0; - /* load match for ip[0] */ - if (idx >= prefixStartIndex) { - mval = MEM_read32(base + idx); - } else { - mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ - } - - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ - - /* first write next hash table entry; we've already calculated it */ + if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { + /* Write next hash table entry, since it's already calculated */ if (step <= 4) { - /* We need to avoid writing an index into the hash table >= the - * position at which we will pick up our searching after we've - * taken this match. - * - * The minimum possible match has length 4, so the earliest ip0 - * can be after we take this match will be the current ip0 + 4. - * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely - * write this position. - */ + /* Avoid writing an index if it's >= position where search will resume. + * The minimum possible match has length 4, so search can resume at ip0 + 4. + */ hashTable[hash1] = (U32)(ip1 - base); } - goto _offset; } /* lookup ip[1] */ - idx = hashTable[hash1]; + matchIdx = hashTable[hash1]; /* hash ip[2] */ hash0 = hash1; @@ -327,7 +348,7 @@ ZSTD_compressBlock_fast_noDict_generic( } while (ip3 < ilimit); _cleanup: - /* Note that there are probably still a couple positions we could search. + /* Note that there are probably still a couple positions one could search. * However, it seems to be a meaningful performance hit to try to search * them. So let's not. */ @@ -356,7 +377,7 @@ ZSTD_compressBlock_fast_noDict_generic( _offset: /* Requires: ip0, idx */ /* Compute the offset code. */ - match0 = base + idx; + match0 = base + matchIdx; rep_offset2 = rep_offset1; rep_offset1 = (U32)(ip0-match0); offcode = OFFSET_TO_OFFBASE(rep_offset1); @@ -401,12 +422,12 @@ ZSTD_compressBlock_fast_noDict_generic( goto _start; } -#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \ - static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \ - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ +#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \ + static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \ + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ void const* src, size_t srcSize) \ { \ - return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \ + return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \ } ZSTD_GEN_FAST_FN(noDict, 4, 1) @@ -420,13 +441,15 @@ ZSTD_GEN_FAST_FN(noDict, 6, 0) ZSTD_GEN_FAST_FN(noDict, 7, 0) size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - U32 const mls = ms->cParams.minMatch; + U32 const mml = ms->cParams.minMatch; + /* use cmov when "candidate in range" branch is likely unpredictable */ + int const useCmov = ms->cParams.windowLog < 19; assert(ms->dictMatchState == NULL); - if (ms->cParams.targetLength > 1) { - switch(mls) + if (useCmov) { + switch(mml) { default: /* includes case 3 */ case 4 : @@ -439,7 +462,8 @@ size_t ZSTD_compressBlock_fast( return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); } } else { - switch(mls) + /* use a branch instead */ + switch(mml) { default: /* includes case 3 */ case 4 : @@ -451,13 +475,13 @@ size_t ZSTD_compressBlock_fast( case 7 : return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); } - } } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -476,7 +500,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; const U32* const dictHashTable = dms->hashTable; const U32 dictStartIndex = dms->window.dictLimit; @@ -502,7 +526,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( if (ms->prefetchCDictTables) { size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); - PREFETCH_AREA(dictHashTable, hashTableBytes) + PREFETCH_AREA(dictHashTable, hashTableBytes); } /* init */ @@ -540,8 +564,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); hashTable[hash0] = curr; /* update hash table */ - if (((U32) ((prefixStartIndex - 1) - repIndex) >= - 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ + if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; @@ -574,8 +597,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( } } - if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) { - /* found a regular match */ + if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) { + /* found a regular match of size >= 4 */ U32 const offset = (U32) (ip0 - match); mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; while (((ip0 > anchor) & (match > prefixStart)) @@ -625,7 +648,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; - if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) && (MEM_read32(repMatch2) == MEM_read32(ip0))) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -661,7 +684,7 @@ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) size_t ZSTD_compressBlock_fast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; @@ -681,8 +704,10 @@ size_t ZSTD_compressBlock_fast_dictMatchState( } -static size_t ZSTD_compressBlock_fast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_extDict_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -917,7 +942,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( while (ip0 <= ilimit) { U32 const repIndex2 = (U32)(ip0-base) - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */ + if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0)) && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -940,7 +965,7 @@ ZSTD_GEN_FAST_FN(extDict, 6, 0) ZSTD_GEN_FAST_FN(extDict, 7, 0) size_t ZSTD_compressBlock_fast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; diff --git a/lib/compress/zstd_fast.h b/lib/compress/zstd_fast.h index 3bfeb2c5f83..216821ac33b 100644 --- a/lib/compress/zstd_fast.h +++ b/lib/compress/zstd_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,28 +11,20 @@ #ifndef ZSTD_FAST_H #define ZSTD_FAST_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_FAST_H */ diff --git a/lib/compress/zstd_lazy.c b/lib/compress/zstd_lazy.c index 3e2ee1dda01..18b7b43948f 100644 --- a/lib/compress/zstd_lazy.c +++ b/lib/compress/zstd_lazy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,13 +12,21 @@ #include "zstd_lazy.h" #include "../common/bits.h" /* ZSTD_countTrailingZeros64 */ +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) + +#define kLazySkippingStep 8 + /*-************************************* * Binary Tree search ***************************************/ -static void -ZSTD_updateDUBT(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend, U32 mls) { @@ -61,8 +69,9 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms, * sort one already inserted but unsorted position * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ -static void -ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms, U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) @@ -150,9 +159,10 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, } -static size_t -ZSTD_DUBT_findBetterDictMatch ( - const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_DUBT_findBetterDictMatch ( + const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, @@ -160,7 +170,7 @@ ZSTD_DUBT_findBetterDictMatch ( U32 const mls, const ZSTD_dictMode_e dictMode) { - const ZSTD_matchState_t * const dms = ms->dictMatchState; + const ZSTD_MatchState_t * const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; const U32 * const dictHashTable = dms->hashTable; U32 const hashLog = dmsCParams->hashLog; @@ -228,8 +238,9 @@ ZSTD_DUBT_findBetterDictMatch ( } -static size_t -ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offBasePtr, U32 const mls, @@ -379,8 +390,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offBasePtr, const U32 mls /* template */, @@ -396,7 +408,7 @@ ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, * Dedicated dict search ***********************************/ -void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip) +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); @@ -515,7 +527,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B */ FORCE_INLINE_TEMPLATE size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts, - const ZSTD_matchState_t* const dms, + const ZSTD_MatchState_t* const dms, const BYTE* const ip, const BYTE* const iLimit, const BYTE* const prefixStart, const U32 curr, const U32 dictLimit, const size_t ddsIdx) { @@ -615,10 +627,12 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( - ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertAndFindFirstIndex_internal( + ZSTD_MatchState_t* ms, const ZSTD_compressionParameters* const cParams, - const BYTE* ip, U32 const mls) + const BYTE* ip, U32 const mls, U32 const lazySkipping) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; @@ -633,21 +647,25 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; + /* Stop inserting every position when in the lazy skipping mode. */ + if (lazySkipping) + break; } ms->nextToUpdate = target; return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; } -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { +U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0); } /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_HcFindBestMatch( - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode) @@ -671,7 +689,7 @@ size_t ZSTD_HcFindBestMatch( U32 nbAttempts = 1U << cParams->searchLog; size_t ml=4-1; - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0; const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch @@ -685,7 +703,7 @@ size_t ZSTD_HcFindBestMatch( } /* HC4 match finder */ - matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); + matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping); for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; @@ -758,8 +776,6 @@ size_t ZSTD_HcFindBestMatch( * (SIMD) Row-based matchfinder ***********************************/ /* Constants for row-based hash */ -#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ @@ -775,39 +791,15 @@ MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) { return ZSTD_countTrailingZeros64(val); } -/* ZSTD_rotateRight_*(): - * Rotates a bitfield to the right by "count" bits. - * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts - */ -FORCE_INLINE_TEMPLATE -U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { - assert(count < 64); - count &= 0x3F; /* for fickle pattern recognition */ - return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); -} - -FORCE_INLINE_TEMPLATE -U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { - assert(count < 32); - count &= 0x1F; /* for fickle pattern recognition */ - return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); -} - -FORCE_INLINE_TEMPLATE -U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { - assert(count < 16); - count &= 0x0F; /* for fickle pattern recognition */ - return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); -} - /* ZSTD_row_nextIndex(): * Returns the next index to insert at within a tagTable row, and updates the "head" - * value to reflect the update. Essentially cycles backwards from [0, {entries per row}) + * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) */ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) { - U32 const next = (*tagRow - 1) & rowMask; - *tagRow = (BYTE)next; - return next; + U32 next = (*tagRow-1) & rowMask; + next += (next == 0) ? rowMask : 0; /* skip first position */ + *tagRow = (BYTE)next; + return next; } /* ZSTD_isAligned(): @@ -821,7 +813,7 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) { /* ZSTD_row_prefetch(): * Performs prefetching for the hashTable and tagTable at a given row. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) { +FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) { PREFETCH_L1(hashTable + relRow); if (rowLog >= 5) { PREFETCH_L1(hashTable + relRow + 16); @@ -840,18 +832,20 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* ta * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, * but not beyond iLimit. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base, U32 const rowLog, U32 const mls, U32 idx, const BYTE* const iLimit) { U32 const* const hashTable = ms->hashTable; - U16 const* const tagTable = ms->tagTable; + BYTE const* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1); U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch); for (; idx < lim; ++idx) { - U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash; @@ -866,12 +860,15 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. */ -FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, - U16 const* tagTable, BYTE const* base, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, + BYTE const* tagTable, BYTE const* base, U32 idx, U32 const hashLog, - U32 const rowLog, U32 const mls) + U32 const rowLog, U32 const mls, + U64 const hashSalt) { - U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK]; @@ -883,28 +880,29 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab /* ZSTD_row_update_internalImpl(): * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, - U32 updateStartIdx, U32 const updateEndIdx, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, + U32 updateStartIdx, U32 const updateEndIdx, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { U32* const hashTable = ms->hashTable; - U16* const tagTable = ms->tagTable; + BYTE* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; const BYTE* const base = ms->window.base; DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { - U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls) - : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) + : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32* const row = hashTable + relRow; - BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte. - Explicit cast allows us to get exact desired position within each row */ + BYTE* tagRow = tagTable + relRow; U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); - ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK; + assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt)); + tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK; row[pos] = updateStartIdx; } } @@ -913,9 +911,11 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. * Skips sections of long matches as is necessary. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { U32 idx = ms->nextToUpdate; const BYTE* const base = ms->window.base; @@ -946,7 +946,7 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary * processing. */ -void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { +void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) { const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); const U32 rowMask = (1u << rowLog) - 1; const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); @@ -1050,6 +1050,44 @@ ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag } } #endif +#if defined(ZSTD_ARCH_RISCV_RVV) && (__riscv_xlen == 64) +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getRVVMask(int rowEntries, const BYTE* const src, const BYTE tag, const U32 head) +{ + ZSTD_VecMask matches; + size_t vl; + + if (rowEntries == 16) { + vl = __riscv_vsetvl_e8m1(16); + { + vuint8m1_t chunk = __riscv_vle8_v_u8m1(src, vl); + vbool8_t mask = __riscv_vmseq_vx_u8m1_b8(chunk, tag, vl); + vuint16m1_t mask_u16 = __riscv_vreinterpret_v_b8_u16m1(mask); + matches = __riscv_vmv_x_s_u16m1_u16(mask_u16); + return ZSTD_rotateRight_U16((U16)matches, head); + } + + } else if (rowEntries == 32) { + vl = __riscv_vsetvl_e8m2(32); + { + vuint8m2_t chunk = __riscv_vle8_v_u8m2(src, vl); + vbool4_t mask = __riscv_vmseq_vx_u8m2_b4(chunk, tag, vl); + vuint32m1_t mask_u32 = __riscv_vreinterpret_v_b4_u32m1(mask); + matches = __riscv_vmv_x_s_u32m1_u32(mask_u32); + return ZSTD_rotateRight_U32((U32)matches, head); + } + } else { // rowEntries = 64 + vl = __riscv_vsetvl_e8m4(64); + { + vuint8m4_t chunk = __riscv_vle8_v_u8m4(src, vl); + vbool2_t mask = __riscv_vmseq_vx_u8m4_b2(chunk, tag, vl); + vuint64m1_t mask_u64 = __riscv_vreinterpret_v_b2_u64m1(mask); + matches = __riscv_vmv_x_s_u64m1_u64(mask_u64); + return ZSTD_rotateRight_U64(matches, head); + } + } +} +#endif /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" @@ -1060,7 +1098,7 @@ ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag FORCE_INLINE_TEMPLATE ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries) { - const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET; + const BYTE* const src = tagRow; assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8); @@ -1069,16 +1107,22 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped); -#else /* SW or NEON-LE */ +#elif defined(ZSTD_ARCH_RISCV_RVV) && (__riscv_xlen == 64) -# if defined(ZSTD_ARCH_ARM_NEON) + return ZSTD_row_getRVVMask(rowEntries, src, tag, headGrouped); + +#else + +#if defined(ZSTD_ARCH_ARM_NEON) /* This NEON path only works for little endian - otherwise use SWAR below */ if (MEM_isLittleEndian()) { return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped); } -# endif /* ZSTD_ARCH_ARM_NEON */ + + +#endif /* SWAR */ - { const size_t chunkSize = sizeof(size_t); + { const int chunkSize = sizeof(size_t); const size_t shiftAmount = ((chunkSize * 8) - chunkSize); const size_t xFF = ~((size_t)0); const size_t x01 = xFF / 0xFF; @@ -1123,29 +1167,30 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr /* The high-level approach of the SIMD row based match finder is as follows: * - Figure out where to insert the new entry: - * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag" - * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines + * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. + * - The hash is salted by a value that changes on every context reset, so when the same table is used + * we will avoid collisions that would otherwise slow us down by introducing phantom matches. + * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines * which row to insert into. - * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can - * be considered as a circular buffer with a "head" index that resides in the tagTable. - * - Also insert the "tag" into the equivalent row and position in the tagTable. - * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry. - * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively, - * for alignment/performance reasons, leaving some bytes unused. - * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and + * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can + * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes + * per row). + * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and * generate a bitfield that we can cycle through to check the collisions in the hash table. * - Pick the longest match. + * - Insert the tag into the equivalent row and position in the tagTable. */ FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_RowFindBestMatch( - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode, const U32 rowLog) { U32* const hashTable = ms->hashTable; - U16* const tagTable = ms->tagTable; + BYTE* const tagTable = ms->tagTable; U32* const hashCache = ms->hashCache; const U32 hashLog = ms->rowHashLog; const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -1164,11 +1209,13 @@ size_t ZSTD_RowFindBestMatch( const U32 rowMask = rowEntries - 1; const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); + const U64 hashSalt = ms->hashSalt; U32 nbAttempts = 1U << cappedSearchLog; size_t ml=4-1; + U32 hash; /* DMS/DDS variables that may be referenced laster */ - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; /* Initialize the following variables to satisfy static analyzer */ size_t ddsIdx = 0; @@ -1189,7 +1236,7 @@ size_t ZSTD_RowFindBestMatch( if (dictMode == ZSTD_dictMatchState) { /* Prefetch DMS rows */ U32* const dmsHashTable = dms->hashTable; - U16* const dmsTagTable = dms->tagTable; + BYTE* const dmsTagTable = dms->tagTable; U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls); U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK; @@ -1199,9 +1246,19 @@ size_t ZSTD_RowFindBestMatch( } /* Update the hashTable and tagTable up to (but not including) ip */ - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); + if (!ms->lazySkipping) { + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); + hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt); + } else { + /* Stop inserting every position when in the lazy skipping mode. + * The hash cache is also not kept up to date in this mode. + */ + hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); + ms->nextToUpdate = curr; + } + ms->hashSaltEntropy += hash; /* collect salt entropy */ + { /* Get the hash for ip, compute the appropriate row */ - U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK; U32* const row = hashTable + relRow; @@ -1213,9 +1270,10 @@ size_t ZSTD_RowFindBestMatch( ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries); /* Cycle through the matches and prefetch */ - for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = row[matchPos]; + if(matchPos == 0) continue; assert(numMatches < rowEntries); if (matchIndex < lowLimit) break; @@ -1225,13 +1283,14 @@ size_t ZSTD_RowFindBestMatch( PREFETCH_L1(dictBase + matchIndex); } matchBuffer[numMatches++] = matchIndex; + --nbAttempts; } /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop in ZSTD_row_update_internal() at the next search. */ { U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag; + tagRow[pos] = (BYTE)tag; row[pos] = ms->nextToUpdate++; } @@ -1282,13 +1341,15 @@ size_t ZSTD_RowFindBestMatch( size_t currMatch = 0; ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries); - for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = dmsRow[matchPos]; + if(matchPos == 0) continue; if (matchIndex < dmsLowestIndex) break; PREFETCH_L1(dmsBase + matchIndex); matchBuffer[numMatches++] = matchIndex; + --nbAttempts; } /* Return the longest match */ @@ -1317,17 +1378,13 @@ size_t ZSTD_RowFindBestMatch( } -typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - /** - * This struct contains the functions necessary for lazy to search. - * Currently, that is only searchMax. However, it is still valuable to have the - * VTable because this makes it easier to add more functions to the VTable later. + * Generate search functions templated on (dictMode, mls, rowLog). + * These functions are outlined for code size & compilation time. + * ZSTD_searchMax() dispatches to the correct implementation function. * * TODO: The start of the search function involves loading and calculating a - * bunch of constants from the ZSTD_matchState_t. These computations could be + * bunch of constants from the ZSTD_MatchState_t. These computations could be * done in an initialization function, and saved somewhere in the match state. * Then we could pass a pointer to the saved state instead of the match state, * and avoid duplicate computations. @@ -1342,39 +1399,36 @@ typedef size_t (*searchMax_f)( * the single segment loop. It should go in searchMax instead of its own * function to avoid having multiple virtual function calls per search. */ -typedef struct { - searchMax_f searchMax; -} ZSTD_LazyVTable; - -#define GEN_ZSTD_BT_VTABLE(dictMode, mls) \ - static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offBasePtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode);\ - } \ - static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \ - ZSTD_BtFindBestMatch_##dictMode##_##mls \ - }; -#define GEN_ZSTD_HC_VTABLE(dictMode, mls) \ - static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, \ +#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls +#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls +#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog + +#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE + +#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ + ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offBasePtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \ + } \ + +#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ + ZSTD_MatchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ { \ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ } \ - static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \ - ZSTD_HcFindBestMatch_##dictMode##_##mls \ - }; -#define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \ - static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \ - ZSTD_matchState_t* ms, \ +#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ + ZSTD_MatchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ { \ @@ -1382,9 +1436,6 @@ typedef struct { assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ } \ - static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \ - ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \ - }; #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ X(dictMode, mls, 4) \ @@ -1407,87 +1458,107 @@ typedef struct { X(__VA_ARGS__, dictMatchState) \ X(__VA_ARGS__, dedicatedDictSearch) -/* Generate Row VTables for each combination of (dictMode, mls, rowLog) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE) -/* Generate Binary Tree VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE) -/* Generate Hash Chain VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE) - -#define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_BtVTable_##dictMode##_4, \ - &ZSTD_BtVTable_##dictMode##_5, \ - &ZSTD_BtVTable_##dictMode##_6 \ - } - -#define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_HcVTable_##dictMode##_4, \ - &ZSTD_HcVTable_##dictMode##_5, \ - &ZSTD_HcVTable_##dictMode##_6 \ - } +/* Generate row search fns for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN) +/* Generate binary Tree search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN) +/* Generate hash chain search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN) -#define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \ - { \ - &ZSTD_RowVTable_##dictMode##_##mls##_4, \ - &ZSTD_RowVTable_##dictMode##_##mls##_5, \ - &ZSTD_RowVTable_##dictMode##_##mls##_6 \ - } - -#define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \ - { \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \ - } +typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; -#define GEN_ZSTD_VTABLE_ARRAY(X) \ - { \ - X(noDict), \ - X(extDict), \ - X(dictMatchState), \ - X(dedicatedDictSearch) \ +#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + case rowLog: \ + return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr); + +#define ZSTD_SWITCH_MLS(X, dictMode) \ + switch (mls) { \ + ZSTD_FOR_EACH_MLS(X, dictMode) \ } -/* ******************************* -* Common parser - lazy strategy -*********************************/ -typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; +#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \ + case mls: \ + switch (rowLog) { \ + ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \ + } \ + ZSTD_UNREACHABLE; \ + break; + +#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \ + switch (searchMethod) { \ + case search_hashChain: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \ + break; \ + case search_binaryTree: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \ + break; \ + case search_rowHash: \ + ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \ + break; \ + } \ + ZSTD_UNREACHABLE; /** - * This table is indexed first by the four ZSTD_dictMode_e values, and then - * by the two searchMethod_e values. NULLs are placed for configurations - * that should never occur (extDict modes go to the other implementation - * below and there is no DDSS for binary tree search yet). + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. */ - -static ZSTD_LazyVTable const* -ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) +FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( + ZSTD_MatchState_t* ms, + const BYTE* ip, + const BYTE* iend, + size_t* offsetPtr, + U32 const mls, + U32 const rowLog, + searchMethod_e const searchMethod, + ZSTD_dictMode_e const dictMode) { - /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */ - ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY); - ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY); - /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */ - ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY); - - U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch)); - U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); - switch (searchMethod) { - case search_hashChain: - return hcVTables[dictMode][mls - 4]; - case search_binaryTree: - return btVTables[dictMode][mls - 4]; - case search_rowHash: - return rowVTables[dictMode][mls - 4][rowLog - 4]; - default: - return NULL; + if (dictMode == ZSTD_noDict) { + ZSTD_SWITCH_SEARCH_METHOD(noDict) + } else if (dictMode == ZSTD_extDict) { + ZSTD_SWITCH_SEARCH_METHOD(extDict) + } else if (dictMode == ZSTD_dictMatchState) { + ZSTD_SWITCH_SEARCH_METHOD(dictMatchState) + } else if (dictMode == ZSTD_dedicatedDictSearch) { + ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch) } + ZSTD_UNREACHABLE; + return 0; } -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_lazy_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, +/* ******************************* +* Common parser - lazy strategy +*********************************/ + +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_lazy_generic( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth, @@ -1501,15 +1572,16 @@ ZSTD_compressBlock_lazy_generic( const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; U32 offsetSaved1 = 0, offsetSaved2 = 0; const int isDMS = dictMode == ZSTD_dictMatchState; const int isDDS = dictMode == ZSTD_dedicatedDictSearch; const int isDxS = isDMS || isDDS; - const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_MatchState_t* const dms = ms->dictMatchState; const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0; const BYTE* const dictBase = isDxS ? dms->window.base : NULL; const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL; @@ -1519,8 +1591,6 @@ ZSTD_compressBlock_lazy_generic( 0; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); - assert(searchMax != NULL); - DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod); ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { @@ -1537,11 +1607,11 @@ ZSTD_compressBlock_lazy_generic( assert(offset_2 <= dictAndPrefixLength); } + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + if (searchMethod == search_rowHash) { - const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); - ZSTD_row_fillHashCache(ms, base, rowLog, - MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), - ms->nextToUpdate, ilimit); + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); } /* Match Loop */ @@ -1564,7 +1634,7 @@ ZSTD_compressBlock_lazy_generic( && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -1579,13 +1649,22 @@ ZSTD_compressBlock_lazy_generic( /* first search (depth 0) */ { size_t offbaseFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offbaseFound); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); if (ml2 > matchLength) matchLength = ml2, start = ip, offBase = offbaseFound; } if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */; + ip += step; + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } @@ -1607,7 +1686,7 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -1618,7 +1697,7 @@ ZSTD_compressBlock_lazy_generic( } } { size_t ofbCandidate=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &ofbCandidate); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { @@ -1643,7 +1722,7 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; - if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -1654,7 +1733,7 @@ ZSTD_compressBlock_lazy_generic( } } { size_t ofbCandidate=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &ofbCandidate); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { @@ -1689,6 +1768,13 @@ ZSTD_compressBlock_lazy_generic( ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ if (isDxS) { @@ -1698,7 +1784,7 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : base + repIndex; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) + if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; @@ -1735,154 +1821,165 @@ ZSTD_compressBlock_lazy_generic( /* Return the last literals size */ return (size_t)(iend - anchor); } +#endif /* build exclusions */ -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); } -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); } -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict); } -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch); } +#endif -size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); } - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); } -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState); } -/* Row-based matchfinder */ -size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch); } +#endif -size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); } -size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState); } - size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch); } +#endif -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); } -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); } +#endif +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth) @@ -1899,19 +1996,20 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const dictStart = dictBase + ms->window.lowLimit; const U32 windowLog = ms->cParams.windowLog; - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + /* init */ ip += (ip == prefixStart); if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, - MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), - ms->nextToUpdate, ilimit); + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); } /* Match Loop */ @@ -1932,7 +2030,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 repIndex = (U32)(curr+1 - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ @@ -1943,13 +2041,22 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* first search (depth 0) */ { size_t ofbCandidate = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &ofbCandidate); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); if (ml2 > matchLength) matchLength = ml2, start = ip, offBase = ofbCandidate; } if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength); + ip += step + 1; /* jump faster over incompressible sections */ + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } @@ -1964,7 +2071,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ @@ -1978,7 +2085,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* search match, depth 1 */ { size_t ofbCandidate = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &ofbCandidate); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { @@ -1996,7 +2103,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ @@ -2010,7 +2117,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* search match, depth 2 */ { size_t ofbCandidate = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &ofbCandidate); + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { @@ -2035,6 +2142,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ while (ip <= ilimit) { @@ -2043,7 +2157,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 repIndex = repCurrent - offset_2; const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; - if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ + if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ @@ -2065,58 +2179,65 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* Return the last literals size */ return (size_t)(iend - anchor); } +#endif /* build exclusions */ - +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); } -size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0); } +#endif -size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); } -size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1); } +#endif -size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) + { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); } -size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); } +#endif -size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); } +#endif diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h index 150f7b390b8..bd8dc49e64c 100644 --- a/lib/compress/zstd_lazy.h +++ b/lib/compress/zstd_lazy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,6 @@ #ifndef ZSTD_LAZY_H #define ZSTD_LAZY_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" /** @@ -25,101 +21,173 @@ extern "C" { */ #define ZSTD_LAZY_DDSS_BUCKET_LOG 2 -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); -void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ -void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip); +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) +U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip); +void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip); + +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip); void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ +#endif -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy +#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_GREEDY NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + +#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy +#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_LAZY NULL +#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + +#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2 +#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row +#else +#define ZSTD_COMPRESSBLOCK_LAZY2 NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL +#endif + +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -#if defined (__cplusplus) -} +#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2 +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict +#else +#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL #endif #endif /* ZSTD_LAZY_H */ diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c index c14c62454f4..7bf36cd8655 100644 --- a/lib/compress/zstd_ldm.c +++ b/lib/compress/zstd_ldm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,7 @@ #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ #include "zstd_ldm_geartab.h" -#define LDM_BUCKET_SIZE_LOG 3 +#define LDM_BUCKET_SIZE_LOG 4 #define LDM_MIN_MATCH_LENGTH 64 #define LDM_HASH_RLOG 7 @@ -133,21 +133,39 @@ static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, } void ZSTD_ldm_adjustParameters(ldmParams_t* params, - ZSTD_compressionParameters const* cParams) + const ZSTD_compressionParameters* cParams) { params->windowLog = cParams->windowLog; ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); - if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; - if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; + if (params->hashRateLog == 0) { + if (params->hashLog > 0) { + /* if params->hashLog is set, derive hashRateLog from it */ + assert(params->hashLog <= ZSTD_HASHLOG_MAX); + if (params->windowLog > params->hashLog) { + params->hashRateLog = params->windowLog - params->hashLog; + } + } else { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + /* mapping from [fast, rate7] to [btultra2, rate4] */ + params->hashRateLog = 7 - (cParams->strategy/3); + } + } if (params->hashLog == 0) { - params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); - assert(params->hashLog <= ZSTD_HASHLOG_MAX); + if (params->windowLog <= params->hashRateLog) { + params->hashLog = ZSTD_HASHLOG_MIN; + } else { + params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX); + } } - if (params->hashRateLog == 0) { - params->hashRateLog = params->windowLog < params->hashLog - ? 0 - : params->windowLog - params->hashLog; + if (params->minMatchLength == 0) { + params->minMatchLength = LDM_MIN_MATCH_LENGTH; + if (cParams->strategy >= ZSTD_btultra) + params->minMatchLength /= 2; + } + if (params->bucketSizeLog==0) { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX); } params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); } @@ -170,22 +188,22 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ static ldmEntry_t* ZSTD_ldm_getBucket( - ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) + const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog) { - return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); + return ldmState->hashTable + (hash << bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, size_t const hash, const ldmEntry_t entry, - ldmParams_t const ldmParams) + U32 const bucketSizeLog) { BYTE* const pOffset = ldmState->bucketOffsets + hash; unsigned const offset = *pOffset; - *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry; - *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1)); + *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; + *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1)); } @@ -234,7 +252,7 @@ static size_t ZSTD_ldm_countBackwardsMatch_2segments( * * The tables for the other strategies are filled within their * block compressors. */ -static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, +static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void const* end) { const BYTE* const iend = (const BYTE*)end; @@ -246,7 +264,11 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, break; case ZSTD_dfast: +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ +#endif break; case ZSTD_greedy: @@ -269,7 +291,8 @@ void ZSTD_ldm_fillHashTable( const BYTE* iend, ldmParams_t const* params) { U32 const minMatchLength = params->minMatchLength; - U32 const hBits = params->hashLog - params->bucketSizeLog; + U32 const bucketSizeLog = params->bucketSizeLog; + U32 const hBits = params->hashLog - bucketSizeLog; BYTE const* const base = ldmState->window.base; BYTE const* const istart = ip; ldmRollingHashState_t hashState; @@ -284,7 +307,7 @@ void ZSTD_ldm_fillHashTable( unsigned n; numSplits = 0; - hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits); + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits); for (n = 0; n < numSplits; n++) { if (ip + splits[n] >= istart + minMatchLength) { @@ -295,7 +318,7 @@ void ZSTD_ldm_fillHashTable( entry.offset = (U32)(split - base); entry.checksum = (U32)(xxhash >> 32); - ZSTD_ldm_insertEntry(ldmState, hash, entry, *params); + ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog); } } @@ -309,7 +332,7 @@ void ZSTD_ldm_fillHashTable( * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way * (after a long match, only update tables a limited amount). */ -static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) +static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor) { U32 const curr = (U32)(anchor - ms->window.base); if (curr > ms->nextToUpdate + 1024) { @@ -318,8 +341,10 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) } } -static size_t ZSTD_ldm_generateSequences_internal( - ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, RawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { /* LDM parameters */ @@ -373,7 +398,7 @@ static size_t ZSTD_ldm_generateSequences_internal( candidates[n].split = split; candidates[n].hash = hash; candidates[n].checksum = (U32)(xxhash >> 32); - candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params); + candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog); PREFETCH_L1(candidates[n].bucket); } @@ -396,7 +421,7 @@ static size_t ZSTD_ldm_generateSequences_internal( * the previous one, we merely register it in the hash table and * move on */ if (split < anchor) { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); continue; } @@ -443,7 +468,7 @@ static size_t ZSTD_ldm_generateSequences_internal( /* No match found -- insert an entry into the hash table * and process the next candidate match */ if (bestEntry == NULL) { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); continue; } @@ -464,7 +489,7 @@ static size_t ZSTD_ldm_generateSequences_internal( /* Insert the current entry into the hash table --- it must be * done after the previous block to avoid clobbering bestEntry */ - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); anchor = split + forwardMatchLength; @@ -503,7 +528,7 @@ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, } size_t ZSTD_ldm_generateSequences( - ldmState_t* ldmState, rawSeqStore_t* sequences, + ldmState_t* ldmState, RawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize) { U32 const maxDist = 1U << params->windowLog; @@ -580,7 +605,7 @@ size_t ZSTD_ldm_generateSequences( } void -ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) +ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; @@ -616,7 +641,7 @@ ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const min * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ -static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, +static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore, U32 const remaining, U32 const minMatch) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; @@ -640,7 +665,7 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, return sequence; } -void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { +void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) { U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); while (currPos && rawSeqStore->pos < rawSeqStore->size) { rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; @@ -657,14 +682,14 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { } } -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_paramSwitch_e useRowMatchFinder, +size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_ParamSwitch_e useRowMatchFinder, void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; unsigned const minMatch = cParams->minMatch; - ZSTD_blockCompressor const blockCompressor = + ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); /* Input bounds */ BYTE const* const istart = (BYTE const*)src; @@ -689,7 +714,6 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); - int i; /* End signal */ if (sequence.offset == 0) break; @@ -702,6 +726,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, /* Run the block compressor */ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { + int i; size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; diff --git a/lib/compress/zstd_ldm.h b/lib/compress/zstd_ldm.h index 4e68dbf52e3..42736231aa8 100644 --- a/lib/compress/zstd_ldm.h +++ b/lib/compress/zstd_ldm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,6 @@ #ifndef ZSTD_LDM_H #define ZSTD_LDM_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" /* ldmParams_t, U32 */ #include "../zstd.h" /* ZSTD_CCtx, size_t */ @@ -43,7 +39,7 @@ void ZSTD_ldm_fillHashTable( * sequences. */ size_t ZSTD_ldm_generateSequences( - ldmState_t* ldms, rawSeqStore_t* sequences, + ldmState_t* ldms, RawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize); /** @@ -64,9 +60,9 @@ size_t ZSTD_ldm_generateSequences( * two. We handle that case correctly, and update `rawSeqStore` appropriately. * NOTE: This function does not return any errors. */ -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_paramSwitch_e useRowMatchFinder, +size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_ParamSwitch_e useRowMatchFinder, void const* src, size_t srcSize); /** @@ -76,7 +72,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, * Avoids emitting matches less than `minMatch` bytes. * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). */ -void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, +void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch); /* ZSTD_ldm_skipRawSeqStoreBytes(): @@ -84,7 +80,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, * Not to be used in conjunction with ZSTD_ldm_skipSequences(). * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). */ -void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes); +void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes); /** ZSTD_ldm_getTableSize() : * Estimate the space needed for long distance matching tables or 0 if LDM is @@ -110,8 +106,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams); -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_FAST_H */ diff --git a/lib/compress/zstd_ldm_geartab.h b/lib/compress/zstd_ldm_geartab.h index 647f865be29..ef34bc5c923 100644 --- a/lib/compress/zstd_ldm_geartab.h +++ b/lib/compress/zstd_ldm_geartab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c index 800f87e9efc..562a6b17bc9 100644 --- a/lib/compress/zstd_opt.c +++ b/lib/compress/zstd_opt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,11 +12,14 @@ #include "hist.h" #include "zstd_opt.h" +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ #define ZSTD_MAX_PRICE (1<<30) -#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ +#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* @@ -26,27 +29,35 @@ #if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat)) +# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat)) #elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) +# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) +# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif +/* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } +/* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); @@ -57,7 +68,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) /* debugging function, * @return price in bytes as fractional value * for debug messages only */ -MEM_STATIC double ZSTD_fCost(U32 price) +MEM_STATIC double ZSTD_fCost(int price) { return (double)price / (BITCOST_MULTIPLIER*8); } @@ -88,20 +99,26 @@ static U32 sum_u32(const unsigned table[], size_t nbElts) return total; } -static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift) +typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e; + +static U32 +ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1) { U32 s, sum=0; - DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", + (unsigned)lastEltIndex+1, (unsigned)shift ); assert(shift < 30); for (s=0; s> shift); - sum += table[s]; + unsigned const base = base1 ? 1 : (table[s]>0); + unsigned const newStat = base + (table[s] >> shift); + sum += newStat; + table[s] = newStat; } return sum; } /* ZSTD_scaleStats() : - * reduce all elements in table is sum too large + * reduce all elt frequencies in table if sum too large * return the resulting sum of elements */ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) { @@ -110,7 +127,7 @@ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); assert(logTarget < 30); if (factor <= 1) return prevsum; - return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor)); + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed); } /* ZSTD_rescaleFreqs() : @@ -129,18 +146,22 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; - if (optPtr->litLengthSum == 0) { /* first block : init */ - if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ - DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); + if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */ + + /* heuristic: use pre-defined stats for too small inputs */ + if (srcSize <= ZSTD_PREDEF_THRESHOLD) { + DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { - /* huffman table presumed generated by dictionary */ + + /* huffman stats covering the full value set : table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { + /* generate literals statistics from huffman table */ unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; @@ -188,13 +209,14 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } - } else { /* not a dictionary */ + } else { /* first block, no dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { + /* base initial cost of literals on direct frequency within src */ unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible); } { unsigned const baseLLfreqs[MaxLL+1] = { @@ -224,10 +246,9 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - } - } else { /* new block : re-use previous statistics, scaled down */ + } else { /* new block : scale down accumulated statistics */ if (compressedLiterals) optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); @@ -246,6 +267,7 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, const optState_t* const optPtr, int optLevel) { + DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength); if (litLength == 0) return 0; if (!ZSTD_compressedLiterals(optPtr)) @@ -275,10 +297,11 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP assert(litLength <= ZSTD_BLOCKSIZE_MAX); if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); - /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX - * because it isn't representable in the zstd format. So instead just - * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block - * would be all literals. + + /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX + * because it isn't representable in the zstd format. + * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. + * In such a case, the block would be all literals. */ if (litLength == ZSTD_BLOCKSIZE_MAX) return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); @@ -292,7 +315,7 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP } /* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence + * Provides the cost of the match part (offset + matchLength) of a sequence. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) @@ -308,8 +331,9 @@ ZSTD_getMatchPrice(U32 const offBase, U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); - if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ - return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); + if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */ + return WEIGHT(mlBase, optLevel) + + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */ /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); @@ -347,7 +371,7 @@ static void ZSTD_updateStats(optState_t* const optPtr, optPtr->litLengthSum++; } - /* offset code : expected to follow storeSeq() numeric representation */ + /* offset code : follows storeSeq() numeric representation */ { U32 const offCode = ZSTD_highbit32(offBase); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; @@ -382,9 +406,11 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip) { U32* const hashTable3 = ms->hashTable3; U32 const hashLog3 = ms->hashLog3; @@ -411,8 +437,10 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, * @param ip assumed <= iend-8 . * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ -static U32 ZSTD_insertBt1( - const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertBt1( + const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, U32 const target, U32 const mls, const int extDict) @@ -530,15 +558,16 @@ static U32 ZSTD_insertBt1( } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_updateTree_internal( - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, const U32 mls, const ZSTD_dictMode_e dictMode) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; - DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", + DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", idx, target, dictMode); while(idx < target) { @@ -551,20 +580,23 @@ void ZSTD_updateTree_internal( ms->nextToUpdate = target; } -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { +void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) { ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } FORCE_INLINE_TEMPLATE -U32 ZSTD_insertBtAndGetAllMatches ( - ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ - const U32 lengthToBeat, - U32 const mls /* template */) +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 +ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ + ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, + const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ + const U32 lengthToBeat, + const U32 mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); @@ -593,7 +625,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( U32 mnum = 0; U32 nbCompares = 1U << cParams->searchLog; - const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; + const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; const ZSTD_compressionParameters* const dmsCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; @@ -632,13 +664,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( assert(curr >= windowLow); if ( dictMode == ZSTD_extDict && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ - & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) + & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; } if (dictMode == ZSTD_dictMatchState && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ - & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ + & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; } } @@ -787,7 +819,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( typedef U32 (*ZSTD_getAllMatchesFn)( ZSTD_match_t*, - ZSTD_matchState_t*, + ZSTD_MatchState_t*, U32*, const BYTE*, const BYTE*, @@ -795,9 +827,11 @@ typedef U32 (*ZSTD_getAllMatchesFn)( U32 const ll0, U32 const lengthToBeat); -FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal( +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_btGetAllMatches_internal( ZSTD_match_t* matches, - ZSTD_matchState_t* ms, + ZSTD_MatchState_t* ms, U32* nextToUpdate3, const BYTE* ip, const BYTE* const iHighLimit, @@ -820,7 +854,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal( #define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \ static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \ ZSTD_match_t* matches, \ - ZSTD_matchState_t* ms, \ + ZSTD_MatchState_t* ms, \ U32* nextToUpdate3, \ const BYTE* ip, \ const BYTE* const iHighLimit, \ @@ -852,7 +886,7 @@ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState) } static ZSTD_getAllMatchesFn -ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode) +ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode) { ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = { ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict), @@ -871,7 +905,7 @@ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const di /* Struct containing info needed to make decision about ldm inclusion */ typedef struct { - rawSeqStore_t seqStore; /* External match candidates store for this block */ + RawSeqStore_t seqStore; /* External match candidates store for this block */ U32 startPosInBlock; /* Start position of the current match candidate */ U32 endPosInBlock; /* End position of the current match candidate */ U32 offset; /* Offset of the match candidate */ @@ -881,7 +915,7 @@ typedef struct { * Moves forward in @rawSeqStore by @nbBytes, * which will update the fields 'pos' and 'posInSequence'. */ -static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) +static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) { U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); while (currPos && rawSeqStore->pos < rawSeqStore->size) { @@ -938,7 +972,7 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock return; } - /* Matches may be < MINMATCH by this process. In that case, we will reject them + /* Matches may be < minMatch by this process. In that case, we will reject them when we are deciding whether or not to add the ldm */ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; @@ -960,7 +994,8 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock * into 'matches'. Maintains the correct ordering of 'matches'. */ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, - const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) + const ZSTD_optLdm_t* optLdm, U32 currPosInBlock, + U32 minMatch) { U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ @@ -969,7 +1004,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, /* Ensure that current block position is not outside of the match */ if (currPosInBlock < optLdm->startPosInBlock || currPosInBlock >= optLdm->endPosInBlock - || candidateMatchLength < MINMATCH) { + || candidateMatchLength < minMatch) { return; } @@ -989,7 +1024,8 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches, - U32 currPosInBlock, U32 remainingBytes) + U32 currPosInBlock, U32 remainingBytes, + U32 minMatch) { if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { return; @@ -1006,7 +1042,7 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, } ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); } - ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); + ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); } @@ -1014,11 +1050,6 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, * Optimal parser *********************************/ -static U32 ZSTD_totalLen(ZSTD_optimal_t sol) -{ - return sol.litlen + sol.mlen; -} - #if 0 /* debug */ static void @@ -1036,9 +1067,15 @@ listStats(const U32* table, int lastEltID) #endif -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, - seqStore_t* seqStore, +#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel) +#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel) +#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1)) + +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t +ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const int optLevel, @@ -1062,9 +1099,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, ZSTD_optimal_t* const opt = optStatePtr->priceTable; ZSTD_match_t* const matches = optStatePtr->matchTable; - ZSTD_optimal_t lastSequence; + ZSTD_optimal_t lastStretch; ZSTD_optLdm_t optLdm; + ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t)); + optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); @@ -1085,19 +1124,32 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, U32 const ll0 = !litlen; U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, - (U32)(ip-istart), (U32)(iend - ip)); - if (!nbMatches) { ip++; continue; } + (U32)(ip-istart), (U32)(iend-ip), + minMatch); + if (!nbMatches) { + DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart)); + ip++; + continue; + } + + /* Match found: let's store this solution, and eventually find more candidates. + * During this forward pass, @opt is used to store stretches, + * defined as "a match followed by N literals". + * Note how this is different from a Sequence, which is "N literals followed by a match". + * Storing stretches allows us to store different match predecessors + * for each literal position part of a literals run. */ /* initialize opt[0] */ - { U32 i ; for (i=0; i immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; @@ -1106,82 +1158,106 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { - lastSequence.litlen = litlen; - lastSequence.mlen = maxML; - lastSequence.off = maxOffBase; - DEBUGLOG(6, "large match (%u>%u), immediate encoding", + lastStretch.litlen = 0; + lastStretch.mlen = maxML; + lastStretch.off = maxOffBase; + DEBUGLOG(6, "large match (%u>%u) => immediate encoding", maxML, sufficient_len); cur = 0; - last_pos = ZSTD_totalLen(lastSequence); + last_pos = maxML; goto _shortestPath; } } /* set prices for first matches starting position == 0 */ assert(opt[0].price >= 0); - { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); - U32 pos; + { U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { - opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ + opt[pos].price = ZSTD_MAX_PRICE; + opt[pos].mlen = 0; + opt[pos].litlen = litlen + pos; } for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offBase = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { - U32 const matchPrice = ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); - U32 const sequencePrice = literalsPrice + matchPrice; + int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); + int const sequencePrice = opt[0].price + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", pos, ZSTD_fCost(sequencePrice)); opt[pos].mlen = pos; opt[pos].off = offBase; - opt[pos].litlen = litlen; - opt[pos].price = (int)sequencePrice; - } } + opt[pos].litlen = 0; /* end of match */ + opt[pos].price = sequencePrice + LL_PRICE(0); + } + } last_pos = pos-1; + opt[pos].price = ZSTD_MAX_PRICE; } } /* check further positions */ for (cur = 1; cur <= last_pos; cur++) { const BYTE* const inr = ip + cur; - assert(cur < ZSTD_OPT_NUM); - DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) + assert(cur <= ZSTD_OPT_NUM); + DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur); /* Fix current position with one literal if cheaper */ - { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; + { U32 const litlen = opt[cur-1].litlen + 1; int const price = opt[cur-1].price - + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) - + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); + + LIT_PRICE(ip+cur-1) + + LL_INCPRICE(litlen); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { - DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, + ZSTD_optimal_t const prevMatch = opt[cur]; + DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", + (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); - opt[cur].mlen = 0; - opt[cur].off = 0; + opt[cur] = opt[cur-1]; opt[cur].litlen = litlen; opt[cur].price = price; + if ( (optLevel >= 1) /* additional check only for higher modes */ + && (prevMatch.litlen == 0) /* replace a match */ + && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */ + && LIKELY(ip + cur < iend) + ) { + /* check next position, in case it would be cheaper */ + int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1); + int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1); + DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f", + cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals)); + if ( (with1literal < withMoreLiterals) + && (with1literal < opt[cur+1].price) ) { + /* update offset history - before it disappears */ + U32 const prev = cur - prevMatch.mlen; + Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0); + assert(cur >= prevMatch.mlen); + DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !", + ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals), + newReps.rep[0], newReps.rep[1], newReps.rep[2] ); + opt[cur+1] = prevMatch; /* mlen & offbase */ + ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t)); + opt[cur+1].litlen = 1; + opt[cur+1].price = with1literal; + if (last_pos < cur+1) last_pos = cur+1; + } + } } else { - DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), - opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); + DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)", + (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price)); } } - /* Set the repcodes of the current position. We must do it here - * because we rely on the repcodes of the 2nd to last sequence being - * correct to set the next chunks repcodes during the backward - * traversal. + /* Offset history is not updated during match comparison. + * Do it here, now that the match is selected and confirmed. */ - ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); + ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t)); assert(cur >= opt[cur].mlen); - if (opt[cur].mlen != 0) { + if (opt[cur].litlen == 0) { + /* just finished a match => alter offset history */ U32 const prev = cur - opt[cur].mlen; - repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); - ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); - } else { - ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); + Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0); + ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t)); } /* last match must start at a minimum distance of 8 from oend */ @@ -1191,38 +1267,37 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, if ( (optLevel==0) /*static_test*/ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { - DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); + DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1); continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } assert(opt[cur].price >= 0); - { U32 const ll0 = (opt[cur].mlen != 0); - U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; - U32 const previousPrice = (U32)opt[cur].price; - U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); + { U32 const ll0 = (opt[cur].litlen == 0); + int const previousPrice = opt[cur].price; + int const basePrice = previousPrice + LL_PRICE(0); U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); U32 matchNb; ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, - (U32)(inr-istart), (U32)(iend-inr)); + (U32)(inr-istart), (U32)(iend-inr), + minMatch); if (!nbMatches) { DEBUGLOG(7, "rPos:%u : no match found", cur); continue; } - { U32 const maxML = matches[nbMatches-1].len; - DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", - inr-istart, cur, nbMatches, maxML); - - if ( (maxML > sufficient_len) - || (cur + maxML >= ZSTD_OPT_NUM) ) { - lastSequence.mlen = maxML; - lastSequence.off = matches[nbMatches-1].off; - lastSequence.litlen = litlen; - cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ - last_pos = cur + ZSTD_totalLen(lastSequence); - if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ + { U32 const longestML = matches[nbMatches-1].len; + DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u", + (int)(inr-istart), cur, nbMatches, longestML); + + if ( (longestML > sufficient_len) + || (cur + longestML >= ZSTD_OPT_NUM) + || (ip + cur + longestML >= iend) ) { + lastStretch.mlen = longestML; + lastStretch.off = matches[nbMatches-1].off; + lastStretch.litlen = 0; + last_pos = cur + longestML; goto _shortestPath; } } @@ -1234,19 +1309,24 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, U32 mlen; DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u", - matchNb, matches[matchNb].off, lastML, litlen); + matchNb, matches[matchNb].off, lastML, opt[cur].litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; - int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); - while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ + while (last_pos < pos) { + /* fill empty positions, for future comparisons */ + last_pos++; + opt[last_pos].price = ZSTD_MAX_PRICE; + opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */ + } opt[pos].mlen = mlen; opt[pos].off = offset; - opt[pos].litlen = litlen; + opt[pos].litlen = 0; opt[pos].price = price; } else { DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", @@ -1254,55 +1334,81 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ } } } } + opt[last_pos+1].price = ZSTD_MAX_PRICE; } /* for (cur = 1; cur <= last_pos; cur++) */ - lastSequence = opt[last_pos]; - cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ - assert(cur < ZSTD_OPT_NUM); /* control overflow*/ + lastStretch = opt[last_pos]; + assert(cur >= lastStretch.mlen); + cur = last_pos - lastStretch.mlen; _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ assert(opt[0].mlen == 0); + assert(last_pos >= lastStretch.mlen); + assert(cur == last_pos - lastStretch.mlen); - /* Set the next chunk's repcodes based on the repcodes of the beginning - * of the last match, and the last sequence. This avoids us having to - * update them while traversing the sequences. - */ - if (lastSequence.mlen != 0) { - repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); - ZSTD_memcpy(rep, &reps, sizeof(reps)); + if (lastStretch.mlen==0) { + /* no solution : all matches have been converted into literals */ + assert(lastStretch.litlen == (ip - anchor) + last_pos); + ip += last_pos; + continue; + } + assert(lastStretch.off > 0); + + /* Update offset history */ + if (lastStretch.litlen == 0) { + /* finishing on a match : update offset history */ + Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0); + ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t)); } else { - ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); + ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t)); + assert(cur >= lastStretch.litlen); + cur -= lastStretch.litlen; } - { U32 const storeEnd = cur + 1; + /* Let's write the shortest path solution. + * It is stored in @opt in reverse order, + * starting from @storeEnd (==cur+2), + * effectively partially @opt overwriting. + * Content is changed too: + * - So far, @opt stored stretches, aka a match followed by literals + * - Now, it will store sequences, aka literals followed by a match + */ + { U32 const storeEnd = cur + 2; U32 storeStart = storeEnd; - U32 seqPos = cur; + U32 stretchPos = cur; DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", last_pos, cur); (void)last_pos; - assert(storeEnd < ZSTD_OPT_NUM); - DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", - storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); - opt[storeEnd] = lastSequence; - while (seqPos > 0) { - U32 const backDist = ZSTD_totalLen(opt[seqPos]); + assert(storeEnd < ZSTD_OPT_SIZE); + DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", + storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off); + opt[storeEnd] = lastStretch; /* note: litlen will be fixed */ + storeStart = storeEnd; + while (1) { + ZSTD_optimal_t nextStretch = opt[stretchPos]; + opt[storeStart].litlen = nextStretch.litlen; + DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)", + opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off); + if (nextStretch.mlen == 0) { + /* reaching beginning of segment */ + break; + } storeStart--; - DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", - seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); - opt[storeStart] = opt[seqPos]; - seqPos = (seqPos > backDist) ? seqPos - backDist : 0; + opt[storeStart] = nextStretch; /* note: litlen will be fixed */ + assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); + stretchPos -= nextStretch.litlen + nextStretch.mlen; } /* save sequences */ - DEBUGLOG(6, "sending selected sequences into seqStore") + DEBUGLOG(6, "sending selected sequences into seqStore"); { U32 storePos; for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; U32 const offBase = opt[storePos].off; U32 const advance = llen + mlen; - DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", - anchor - istart, (unsigned)llen, (unsigned)mlen); + DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u", + (int)(anchor - istart), (unsigned)llen, (unsigned)mlen); if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ assert(storePos == storeEnd); /* must be last sequence */ @@ -1316,6 +1422,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, anchor += advance; ip = anchor; } } + DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]); + + /* update all costs */ ZSTD_setBasePrices(optStatePtr, optLevel); } } /* while (ip < ilimit) */ @@ -1323,42 +1432,51 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* Return the last literals size */ return (size_t)(iend - anchor); } +#endif /* build exclusions */ +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR static size_t ZSTD_compressBlock_opt0( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode); } +#endif +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR static size_t ZSTD_compressBlock_opt2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode); } +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } +#endif +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. - * this function cannot error, hence its contract must be respected. + * this function cannot error out, its narrow contract must be respected. */ -static void -ZSTD_initStats_ultra(ZSTD_matchState_t* ms, - seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) { U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); @@ -1371,7 +1489,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ - /* invalidate first scan from history */ + /* invalidate first scan from history, only keep entropy stats */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; @@ -1381,7 +1499,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, } size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); @@ -1389,16 +1507,16 @@ size_t ZSTD_compressBlock_btultra( } size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); - /* 2-pass strategy: + /* 2-passes strategy: * this strategy makes a first pass over first block to collect statistics - * and seed next round's statistics with it. - * After 1st pass, function forgets everything, and starts a new block. + * in order to seed next round's statistics with it. + * After 1st pass, function forgets history, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), @@ -1407,42 +1525,47 @@ size_t ZSTD_compressBlock_btultra2( if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ - && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ - && (srcSize > ZSTD_PREDEF_THRESHOLD) + && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ + && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */ ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } +#endif -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } +#endif /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h index 627255f53de..756c7b1d0c5 100644 --- a/lib/compress/zstd_opt.h +++ b/lib/compress/zstd_opt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,46 +11,62 @@ #ifndef ZSTD_OPT_H #define ZSTD_OPT_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "zstd_compress_internal.h" +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) /* used in ZSTD_loadDictionaryContent() */ -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); +void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend); +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); +#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt +#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict +#else +#define ZSTD_COMPRESSBLOCK_BTOPT NULL +#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL +#endif -size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ +size_t ZSTD_compressBlock_btultra2( + ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); -#if defined (__cplusplus) -} +#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra +#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict +#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2 +#else +#define ZSTD_COMPRESSBLOCK_BTULTRA NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL +#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL #endif #endif /* ZSTD_OPT_H */ diff --git a/lib/compress/zstd_preSplit.c b/lib/compress/zstd_preSplit.c new file mode 100644 index 00000000000..d820c20ac24 --- /dev/null +++ b/lib/compress/zstd_preSplit.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "../common/compiler.h" /* ZSTD_ALIGNOF */ +#include "../common/mem.h" /* S64 */ +#include "../common/zstd_deps.h" /* ZSTD_memset */ +#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */ +#include "hist.h" /* HIST_add */ +#include "zstd_preSplit.h" + + +#define BLOCKSIZE_MIN 3500 +#define THRESHOLD_PENALTY_RATE 16 +#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2) +#define THRESHOLD_PENALTY 3 + +#define HASHLENGTH 2 +#define HASHLOG_MAX 10 +#define HASHTABLESIZE (1 << HASHLOG_MAX) +#define HASHMASK (HASHTABLESIZE - 1) +#define KNUTH 0x9e3779b9 + +/* for hashLog > 8, hash 2 bytes. + * for hashLog == 8, just take the byte, no hashing. + * The speed of this method relies on compile-time constant propagation */ +FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog) +{ + assert(hashLog >= 8); + if (hashLog == 8) return (U32)((const BYTE*)p)[0]; + assert(hashLog <= HASHLOG_MAX); + return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog); +} + + +typedef struct { + unsigned events[HASHTABLESIZE]; + size_t nbEvents; +} Fingerprint; +typedef struct { + Fingerprint pastEvents; + Fingerprint newEvents; +} FPStats; + +static void initStats(FPStats* fpstats) +{ + ZSTD_memset(fpstats, 0, sizeof(FPStats)); +} + +FORCE_INLINE_TEMPLATE void +addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) +{ + const char* p = (const char*)src; + size_t limit = srcSize - HASHLENGTH + 1; + size_t n; + assert(srcSize >= HASHLENGTH); + for (n = 0; n < limit; n+=samplingRate) { + fp->events[hash2(p+n, hashLog)]++; + } + fp->nbEvents += limit/samplingRate; +} + +FORCE_INLINE_TEMPLATE void +recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) +{ + ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog)); + fp->nbEvents = 0; + addEvents_generic(fp, src, srcSize, samplingRate, hashLog); +} + +typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize); + +#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate + +#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \ + static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \ + { \ + recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \ + } + +ZSTD_GEN_RECORD_FINGERPRINT(1, 10) +ZSTD_GEN_RECORD_FINGERPRINT(5, 10) +ZSTD_GEN_RECORD_FINGERPRINT(11, 9) +ZSTD_GEN_RECORD_FINGERPRINT(43, 8) + + +static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); } + +static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog) +{ + U64 distance = 0; + size_t n; + assert(hashLog <= HASHLOG_MAX); + for (n = 0; n < ((size_t)1 << hashLog); n++) { + distance += + abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents); + } + return distance; +} + +/* Compare newEvents with pastEvents + * return 1 when considered "too different" + */ +static int compareFingerprints(const Fingerprint* ref, + const Fingerprint* newfp, + int penalty, + unsigned hashLog) +{ + assert(ref->nbEvents > 0); + assert(newfp->nbEvents > 0); + { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents; + U64 deviation = fpDistance(ref, newfp, hashLog); + U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE; + return deviation >= threshold; + } +} + +static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + acc->events[n] += newfp->events[n]; + } + acc->nbEvents += newfp->nbEvents; +} + +static void flushEvents(FPStats* fpstats) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; + } + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; + ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents)); +} + +static void removeEvents(Fingerprint* acc, const Fingerprint* slice) +{ + size_t n; + for (n = 0; n < HASHTABLESIZE; n++) { + assert(acc->events[n] >= slice->events[n]); + acc->events[n] -= slice->events[n]; + } + acc->nbEvents -= slice->nbEvents; +} + +#define CHUNKSIZE (8 << 10) +static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize) +{ + static const RecordEvents_f records_fs[] = { + FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1) + }; + static const unsigned hashParams[] = { 8, 9, 10, 10 }; + const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]); + FPStats* const fpstats = (FPStats*)workspace; + const char* p = (const char*)blockStart; + int penalty = THRESHOLD_PENALTY; + size_t pos = 0; + assert(blockSize == (128 << 10)); + assert(workspace != NULL); + assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); + ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); + assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; + + initStats(fpstats); + record_f(&fpstats->pastEvents, p, CHUNKSIZE); + for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) { + record_f(&fpstats->newEvents, p + pos, CHUNKSIZE); + if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) { + return pos; + } else { + mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); + if (penalty > 0) penalty--; + } + } + assert(pos == blockSize); + return blockSize; + (void)flushEvents; (void)removeEvents; +} + +/* ZSTD_splitBlock_fromBorders(): very fast strategy : + * compare fingerprint from beginning and end of the block, + * derive from their difference if it's preferable to split in the middle, + * repeat the process a second time, for finer grained decision. + * 3 times did not brought improvements, so I stopped at 2. + * Benefits are good enough for a cheap heuristic. + * More accurate splitting saves more, but speed impact is also more perceptible. + * For better accuracy, use more elaborate variant *_byChunks. + */ +static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize, + void* workspace, size_t wkspSize) +{ +#define SEGMENT_SIZE 512 + FPStats* const fpstats = (FPStats*)workspace; + Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned)); + assert(blockSize == (128 << 10)); + assert(workspace != NULL); + assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); + ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); + assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; + + initStats(fpstats); + HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE); + HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE); + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE; + if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8)) + return blockSize; + + HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE); + middleEvents->nbEvents = SEGMENT_SIZE; + { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); + U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); + U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3; + if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance) + return 64 KB; + return (distFromBegin > distFromEnd) ? 32 KB : 96 KB; + } +} + +size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize) +{ + DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level); + assert(0<=level && level<=4); + if (level == 0) + return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); + /* level >= 1*/ + return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize); +} diff --git a/lib/compress/zstd_preSplit.h b/lib/compress/zstd_preSplit.h new file mode 100644 index 00000000000..b89a200dccd --- /dev/null +++ b/lib/compress/zstd_preSplit.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_PRESPLIT_H +#define ZSTD_PRESPLIT_H + +#include /* size_t */ + +#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208 + +/* ZSTD_splitBlock(): + * @level must be a value between 0 and 4. + * higher levels spend more energy to detect block boundaries. + * @workspace must be aligned for size_t. + * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE + * note: + * For the time being, this function only accepts full 128 KB blocks. + * Therefore, @blockSize must be == 128 KB. + * While this could be extended to smaller sizes in the future, + * it is not yet clear if this would be useful. TBD. + */ +size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, + int level, + void* workspace, size_t wkspSize); + +#endif /* ZSTD_PRESPLIT_H */ diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c index 0c10eb603df..cb710475c2a 100644 --- a/lib/compress/zstdmt_compress.c +++ b/lib/compress/zstdmt_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,16 +15,13 @@ #endif -/* ====== Constants ====== */ -#define ZSTDMT_OVERLAPLOG_DEFAULT 0 - - /* ====== Dependencies ====== */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */ #include "../common/mem.h" /* MEM_STATIC */ #include "../common/pool.h" /* threadpool */ #include "../common/threading.h" /* mutex */ -#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ +#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ #include "zstd_ldm.h" #include "zstdmt_compress.h" @@ -43,12 +40,13 @@ # include # include -# define DEBUG_PRINTHEX(l,p,n) { \ - unsigned debug_u; \ - for (debug_u=0; debug_u<(n); debug_u++) \ - RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ - RAWLOG(l, " \n"); \ -} +# define DEBUG_PRINTHEX(l,p,n) \ + do { \ + unsigned debug_u; \ + for (debug_u=0; debug_u<(n); debug_u++) \ + RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ + RAWLOG(l, " \n"); \ + } while (0) static unsigned long long GetCurrentClockTimeMicroseconds(void) { @@ -60,25 +58,28 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void) } } #define MUTEX_WAIT_TIME_DLEVEL 6 -#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ - if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ - unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ - ZSTD_pthread_mutex_lock(mutex); \ - { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ - unsigned long long const elapsedTime = (afterTime-beforeTime); \ - if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ - DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ - elapsedTime, #mutex); \ - } } \ - } else { \ - ZSTD_pthread_mutex_lock(mutex); \ - } \ -} +#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \ + do { \ + if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ + unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ + ZSTD_pthread_mutex_lock(mutex); \ + { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ + unsigned long long const elapsedTime = (afterTime-beforeTime); \ + if (elapsedTime > 1000) { \ + /* or whatever threshold you like; I'm using 1 millisecond here */ \ + DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \ + "Thread took %llu microseconds to acquire mutex %s \n", \ + elapsedTime, #mutex); \ + } } \ + } else { \ + ZSTD_pthread_mutex_lock(mutex); \ + } \ + } while (0) #else # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) -# define DEBUG_PRINTHEX(l,p,n) {} +# define DEBUG_PRINTHEX(l,p,n) do { } while (0) #endif @@ -89,9 +90,9 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void) typedef struct buffer_s { void* start; size_t capacity; -} buffer_t; +} Buffer; -static const buffer_t g_nullBuffer = { NULL, 0 }; +static const Buffer g_nullBuffer = { NULL, 0 }; typedef struct ZSTDMT_bufferPool_s { ZSTD_pthread_mutex_t poolMutex; @@ -99,18 +100,39 @@ typedef struct ZSTDMT_bufferPool_s { unsigned totalBuffers; unsigned nbBuffers; ZSTD_customMem cMem; - buffer_t bTable[1]; /* variable size */ + Buffer* buffers; } ZSTDMT_bufferPool; +static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) +{ + DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); + if (!bufPool) return; /* compatibility with free on NULL */ + if (bufPool->buffers) { + unsigned u; + for (u=0; utotalBuffers; u++) { + DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start); + ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); + } + ZSTD_customFree(bufPool->buffers, bufPool->cMem); + } + ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); + ZSTD_customFree(bufPool, bufPool->cMem); +} + static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem) { - ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc( - sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); + ZSTDMT_bufferPool* const bufPool = + (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem); if (bufPool==NULL) return NULL; if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { ZSTD_customFree(bufPool, cMem); return NULL; } + bufPool->buffers = (Buffer*)ZSTD_customCalloc(maxNbBuffers * sizeof(Buffer), cMem); + if (bufPool->buffers==NULL) { + ZSTDMT_freeBufferPool(bufPool); + return NULL; + } bufPool->bufferSize = 64 KB; bufPool->totalBuffers = maxNbBuffers; bufPool->nbBuffers = 0; @@ -118,32 +140,19 @@ static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_cu return bufPool; } -static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) -{ - unsigned u; - DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); - if (!bufPool) return; /* compatibility with free on NULL */ - for (u=0; utotalBuffers; u++) { - DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); - ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem); - } - ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); - ZSTD_customFree(bufPool, bufPool->cMem); -} - /* only works at initialization, not during compression */ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) { - size_t const poolSize = sizeof(*bufPool) - + (bufPool->totalBuffers - 1) * sizeof(buffer_t); + size_t const poolSize = sizeof(*bufPool); + size_t const arraySize = bufPool->totalBuffers * sizeof(Buffer); unsigned u; size_t totalBufferSize = 0; ZSTD_pthread_mutex_lock(&bufPool->poolMutex); for (u=0; utotalBuffers; u++) - totalBufferSize += bufPool->bTable[u].capacity; + totalBufferSize += bufPool->buffers[u].capacity; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - return poolSize + totalBufferSize; + return poolSize + arraySize + totalBufferSize; } /* ZSTDMT_setBufferSize() : @@ -180,15 +189,15 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, * assumption : bufPool must be valid * @return : a buffer, with start pointer and size * note: allocation may fail, in this case, start==NULL and size==0 */ -static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) +static Buffer ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) { size_t const bSize = bufPool->bufferSize; DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers) { /* try to use an existing buffer */ - buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; + Buffer const buf = bufPool->buffers[--(bufPool->nbBuffers)]; size_t const availBufferSize = buf.capacity; - bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; + bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { /* large enough, but not too much */ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", @@ -203,7 +212,7 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* create new buffer */ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); - { buffer_t buffer; + { Buffer buffer; void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.capacity = (start==NULL) ? 0 : bSize; @@ -222,12 +231,12 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) * @return : a buffer that is at least the buffer pool buffer size. * If a reallocation happens, the data in the input buffer is copied. */ -static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) +static Buffer ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, Buffer buffer) { size_t const bSize = bufPool->bufferSize; if (buffer.capacity < bSize) { void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); - buffer_t newBuffer; + Buffer newBuffer; newBuffer.start = start; newBuffer.capacity = start == NULL ? 0 : bSize; if (start != NULL) { @@ -243,20 +252,20 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) #endif /* store buffer for later re-use, up to pool capacity */ -static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) +static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, Buffer buf) { DEBUGLOG(5, "ZSTDMT_releaseBuffer"); if (buf.start == NULL) return; /* compatible with release on NULL */ ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers < bufPool->totalBuffers) { - bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ + bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return; } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - /* Reached bufferPool capacity (should not happen) */ + /* Reached bufferPool capacity (note: should not happen) */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); ZSTD_customFree(buf.start, bufPool->cMem); } @@ -281,23 +290,23 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) return ZSTDMT_sizeof_bufferPool(seqPool); } -static rawSeqStore_t bufferToSeq(buffer_t buffer) +static RawSeqStore_t bufferToSeq(Buffer buffer) { - rawSeqStore_t seq = kNullRawSeqStore; + RawSeqStore_t seq = kNullRawSeqStore; seq.seq = (rawSeq*)buffer.start; seq.capacity = buffer.capacity / sizeof(rawSeq); return seq; } -static buffer_t seqToBuffer(rawSeqStore_t seq) +static Buffer seqToBuffer(RawSeqStore_t seq) { - buffer_t buffer; + Buffer buffer; buffer.start = seq.seq; buffer.capacity = seq.capacity * sizeof(rawSeq); return buffer; } -static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) +static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) { if (seqPool->bufferSize == 0) { return kNullRawSeqStore; @@ -306,13 +315,13 @@ static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) } #if ZSTD_RESIZE_SEQPOOL -static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +static RawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) { return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); } #endif -static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) { ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); } @@ -349,16 +358,20 @@ typedef struct { int totalCCtx; int availCCtx; ZSTD_customMem cMem; - ZSTD_CCtx* cctx[1]; /* variable size */ + ZSTD_CCtx** cctxs; } ZSTDMT_CCtxPool; -/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ +/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) { - int cid; - for (cid=0; cidtotalCCtx; cid++) - ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ + if (!pool) return; ZSTD_pthread_mutex_destroy(&pool->poolMutex); + if (pool->cctxs) { + int cid; + for (cid=0; cidtotalCCtx; cid++) + ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */ + ZSTD_customFree(pool->cctxs, pool->cMem); + } ZSTD_customFree(pool, pool->cMem); } @@ -367,19 +380,24 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) { - ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc( - sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); + ZSTDMT_CCtxPool* const cctxPool = + (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem); assert(nbWorkers > 0); if (!cctxPool) return NULL; if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { ZSTD_customFree(cctxPool, cMem); return NULL; } - cctxPool->cMem = cMem; cctxPool->totalCCtx = nbWorkers; + cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem); + if (!cctxPool->cctxs) { + ZSTDMT_freeCCtxPool(cctxPool); + return NULL; + } + cctxPool->cMem = cMem; + cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); + if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ - cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); - if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); return cctxPool; } @@ -401,16 +419,16 @@ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) { ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); { unsigned const nbWorkers = cctxPool->totalCCtx; - size_t const poolSize = sizeof(*cctxPool) - + (nbWorkers-1) * sizeof(ZSTD_CCtx*); - unsigned u; + size_t const poolSize = sizeof(*cctxPool); + size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*); size_t totalCCtxSize = 0; + unsigned u; for (u=0; ucctx[u]); + totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); assert(nbWorkers > 0); - return poolSize + totalCCtxSize; + return poolSize + arraySize + totalCCtxSize; } } @@ -420,7 +438,7 @@ static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); if (cctxPool->availCCtx) { cctxPool->availCCtx--; - { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; + { ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx]; ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return cctx; } } @@ -434,7 +452,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) if (cctx==NULL) return; /* compatibility with release on NULL */ ZSTD_pthread_mutex_lock(&pool->poolMutex); if (pool->availCCtx < pool->totalCCtx) - pool->cctx[pool->availCCtx++] = cctx; + pool->cctxs[pool->availCCtx++] = cctx; else { /* pool overflow : should not happen, since totalCCtx==nbWorkers */ DEBUGLOG(4, "CCtx pool overflow : free cctx"); @@ -448,7 +466,7 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) typedef struct { void const* start; size_t size; -} range_t; +} Range; typedef struct { /* All variables in the struct are protected by mutex. */ @@ -464,10 +482,10 @@ typedef struct { ZSTD_pthread_mutex_t ldmWindowMutex; ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ -} serialState_t; +} SerialState; static int -ZSTDMT_serialState_reset(serialState_t* serialState, +ZSTDMT_serialState_reset(SerialState* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize, @@ -537,7 +555,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState, return 0; } -static int ZSTDMT_serialState_init(serialState_t* serialState) +static int ZSTDMT_serialState_init(SerialState* serialState) { int initError = 0; ZSTD_memset(serialState, 0, sizeof(*serialState)); @@ -548,7 +566,7 @@ static int ZSTDMT_serialState_init(serialState_t* serialState) return initError; } -static void ZSTDMT_serialState_free(serialState_t* serialState) +static void ZSTDMT_serialState_free(SerialState* serialState) { ZSTD_customMem cMem = serialState->params.customMem; ZSTD_pthread_mutex_destroy(&serialState->mutex); @@ -559,9 +577,10 @@ static void ZSTDMT_serialState_free(serialState_t* serialState) ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); } -static void ZSTDMT_serialState_update(serialState_t* serialState, - ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, - range_t src, unsigned jobID) +static void +ZSTDMT_serialState_genSequences(SerialState* serialState, + RawSeqStore_t* seqStore, + Range src, unsigned jobID) { /* Wait for our turn */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); @@ -574,12 +593,13 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, /* It is now our turn, do any processing necessary */ if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { size_t error; - assert(seqStore.seq != NULL && seqStore.pos == 0 && - seqStore.size == 0 && seqStore.capacity > 0); + DEBUGLOG(6, "ZSTDMT_serialState_genSequences: LDM update"); + assert(seqStore->seq != NULL && seqStore->pos == 0 && + seqStore->size == 0 && seqStore->capacity > 0); assert(src.size <= serialState->params.jobSize); ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0); error = ZSTD_ldm_generateSequences( - &serialState->ldmState, &seqStore, + &serialState->ldmState, seqStore, &serialState->params.ldmParams, src.start, src.size); /* We provide a large enough buffer to never fail. */ assert(!ZSTD_isError(error)); (void)error; @@ -598,17 +618,22 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, serialState->nextJobID++; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_pthread_mutex_unlock(&serialState->mutex); +} - if (seqStore.size > 0) { - size_t const err = ZSTD_referenceExternalSequences( - jobCCtx, seqStore.seq, seqStore.size); - assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); - assert(!ZSTD_isError(err)); - (void)err; +static void +ZSTDMT_serialState_applySequences(const SerialState* serialState, /* just for an assert() check */ + ZSTD_CCtx* jobCCtx, + const RawSeqStore_t* seqStore) +{ + if (seqStore->size > 0) { + DEBUGLOG(5, "ZSTDMT_serialState_applySequences: uploading %u external sequences", (unsigned)seqStore->size); + assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); (void)serialState; + assert(jobCCtx); + ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); } } -static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, +static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, unsigned jobID, size_t cSize) { ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); @@ -632,36 +657,37 @@ static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, /* ===== Worker thread ===== */ /* ------------------------------------------ */ -static const range_t kNullRange = { NULL, 0 }; +static const Range kNullRange = { NULL, 0 }; typedef struct { - size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ - size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ - ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ - ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ - ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ - serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ - buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ - range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ - range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ - unsigned jobID; /* set by mtctx, then read by worker => no barrier */ - unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ - unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ - ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ - const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ - unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ - size_t dstFlushed; /* used only by mtctx */ - unsigned frameChecksumNeeded; /* used only by mtctx */ + size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ + size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ + ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ + ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ + ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ + SerialState* serial; /* Thread-safe - used by mtctx and (all) workers */ + Buffer dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ + Range prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ + Range src; /* set by mtctx, then read by worker & mtctx => no barrier */ + unsigned jobID; /* set by mtctx, then read by worker => no barrier */ + unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ + unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ + ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ + const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ + unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ + size_t dstFlushed; /* used only by mtctx */ + unsigned frameChecksumNeeded; /* used only by mtctx */ } ZSTDMT_jobDescription; -#define JOB_ERROR(e) { \ - ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ - job->cSize = e; \ - ZSTD_pthread_mutex_unlock(&job->job_mutex); \ - goto _endJob; \ -} +#define JOB_ERROR(e) \ + do { \ + ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ + job->cSize = e; \ + ZSTD_pthread_mutex_unlock(&job->job_mutex); \ + goto _endJob; \ + } while (0) /* ZSTDMT_compressionJob() is a POOL_function type */ static void ZSTDMT_compressionJob(void* jobDescription) @@ -669,10 +695,11 @@ static void ZSTDMT_compressionJob(void* jobDescription) ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); - rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); - buffer_t dstBuff = job->dstBuff; + RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); + Buffer dstBuff = job->dstBuff; size_t lastCBlockSize = 0; + DEBUGLOG(5, "ZSTDMT_compressionJob: job %u", job->jobID); /* resources */ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ @@ -694,11 +721,15 @@ static void ZSTDMT_compressionJob(void* jobDescription) /* init */ + + /* Perform serial step as early as possible */ + ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); + if (job->cdict) { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); assert(job->firstJob); /* only allowed for first job */ if (ZSTD_isError(initError)) JOB_ERROR(initError); - } else { /* srcStart points at reloaded section */ + } else { U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); @@ -707,25 +738,26 @@ static void ZSTDMT_compressionJob(void* jobDescription) size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0); if (ZSTD_isError(err)) JOB_ERROR(err); } + DEBUGLOG(6, "ZSTDMT_compressionJob: job %u: loading prefix of size %zu", job->jobID, job->prefix.size); { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, - job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ + job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, ZSTD_dtlm_fast, NULL, /*cdict*/ &jobParams, pledgedSrcSize); if (ZSTD_isError(initError)) JOB_ERROR(initError); } } - /* Perform serial step as early as possible, but after CCtx initialization */ - ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); + /* External Sequences can only be applied after CCtx initialization */ + ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ - size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); if (ZSTD_isError(hSize)) JOB_ERROR(hSize); DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); ZSTD_invalidateRepCodes(cctx); } - /* compress */ + /* compress the entire job by smaller chunks, for better granularity */ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); const BYTE* ip = (const BYTE*) job->src.start; @@ -737,7 +769,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); + size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); ip += chunkSize; op += cSize; assert(op < oend); @@ -757,8 +789,8 @@ static void ZSTDMT_compressionJob(void* jobDescription) size_t const lastBlockSize1 = job->src.size & (chunkSize-1); size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; size_t const cSize = (job->lastJob) ? - ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : - ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); + ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) : + ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); lastCBlockSize = cSize; } } @@ -793,10 +825,10 @@ static void ZSTDMT_compressionJob(void* jobDescription) /* ------------------------------------------ */ typedef struct { - range_t prefix; /* read-only non-owned prefix buffer */ - buffer_t buffer; + Range prefix; /* read-only non-owned prefix buffer */ + Buffer buffer; size_t filled; -} inBuff_t; +} InBuff_t; typedef struct { BYTE* buffer; /* The round input buffer. All jobs get references @@ -810,9 +842,9 @@ typedef struct { * the inBuff is sent to the worker thread. * pos <= capacity. */ -} roundBuff_t; +} RoundBuff_t; -static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; +static const RoundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 /* Don't create chunks smaller than the zstd block size. @@ -829,7 +861,7 @@ typedef struct { U64 hash; U64 hitMask; U64 primePower; -} rsyncState_t; +} RSyncState_t; struct ZSTDMT_CCtx_s { POOL_ctx* factory; @@ -841,10 +873,10 @@ struct ZSTDMT_CCtx_s { size_t targetSectionSize; size_t targetPrefixSize; int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ - inBuff_t inBuff; - roundBuff_t roundBuff; - serialState_t serial; - rsyncState_t rsync; + InBuff_t inBuff; + RoundBuff_t roundBuff; + SerialState serial; + RSyncState_t rsync; unsigned jobIDMask; unsigned doneJobID; unsigned nextJobID; @@ -976,18 +1008,20 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) { unsigned jobID; DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); - for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { - /* Copy the mutex/cond out */ - ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; - ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; - - DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - - /* Clear the job description, but keep the mutex/cond */ - ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); - mtctx->jobs[jobID].job_mutex = mutex; - mtctx->jobs[jobID].job_cond = cond; + if (mtctx->jobs) { + for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + /* Copy the mutex/cond out */ + ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; + ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; + + DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + + /* Clear the job description, but keep the mutex/cond */ + ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); + mtctx->jobs[jobID].job_mutex = mutex; + mtctx->jobs[jobID].job_cond = cond; + } } mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; @@ -1090,7 +1124,7 @@ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) { unsigned jobNb; unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", - mtctx->doneJobID, lastJobNb, mtctx->jobReady) + mtctx->doneJobID, lastJobNb, mtctx->jobReady); for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { unsigned const wJobID = jobNb & mtctx->jobIDMask; ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; @@ -1229,30 +1263,26 @@ size_t ZSTDMT_initCStream_internal( /* init */ if (params.nbWorkers != mtctx->params.nbWorkers) - FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , ""); + FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, (unsigned)params.nbWorkers) , ""); if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; - DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); - if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - mtctx->allJobsCompleted = 1; + ZSTDMT_releaseAllJobResources(mtctx); /* Will set allJobsCompleted to 1 */ } mtctx->params = params; mtctx->frameContentSize = pledgedSrcSize; + ZSTD_freeCDict(mtctx->cdictLocal); if (dict) { - ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ params.cParams, mtctx->cMem); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); } else { - ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = NULL; mtctx->cdict = cdict; } @@ -1318,9 +1348,32 @@ size_t ZSTDMT_initCStream_internal( mtctx->allJobsCompleted = 0; mtctx->consumed = 0; mtctx->produced = 0; + + /* update dictionary */ + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = NULL; + mtctx->cdict = NULL; + if (dict) { + if (dictContentType == ZSTD_dct_rawContent) { + mtctx->inBuff.prefix.start = (const BYTE*)dict; + mtctx->inBuff.prefix.size = dictSize; + } else { + /* note : a loadPrefix becomes an internal CDict */ + mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byRef, dictContentType, + params.cParams, mtctx->cMem); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); + } + } else { + mtctx->cdict = cdict; + } + if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, dict, dictSize, dictContentType)) return ERROR(memory_allocation); + + return 0; } @@ -1387,7 +1440,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZS mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; - /* Set the prefix */ + /* Set the prefix for next job */ if (!endFrame) { size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; @@ -1524,12 +1577,17 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u * If the data of the first job is broken up into two segments, we cover both * sections. */ -static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) +static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) { unsigned const firstJobID = mtctx->doneJobID; unsigned const lastJobID = mtctx->nextJobID; unsigned jobID; + /* no need to check during first round */ + size_t roundBuffCapacity = mtctx->roundBuff.capacity; + size_t nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; + if (lastJobID < nbJobs1stRoundMin) return kNullRange; + for (jobID = firstJobID; jobID < lastJobID; ++jobID) { unsigned const wJobID = jobID & mtctx->jobIDMask; size_t consumed; @@ -1539,7 +1597,7 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (consumed < mtctx->jobs[wJobID].src.size) { - range_t range = mtctx->jobs[wJobID].prefix; + Range range = mtctx->jobs[wJobID].prefix; if (range.size == 0) { /* Empty prefix */ range = mtctx->jobs[wJobID].src; @@ -1555,7 +1613,7 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) /** * Returns non-zero iff buffer and range overlap. */ -static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) +static int ZSTDMT_isOverlapped(Buffer buffer, Range range) { BYTE const* const bufferStart = (BYTE const*)buffer.start; BYTE const* const rangeStart = (BYTE const*)range.start; @@ -1575,10 +1633,10 @@ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) } } -static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) +static int ZSTDMT_doesOverlapWindow(Buffer buffer, ZSTD_window_t window) { - range_t extDict; - range_t prefix; + Range extDict; + Range prefix; DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); extDict.start = window.dictBase + window.lowLimit; @@ -1597,7 +1655,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) || ZSTDMT_isOverlapped(buffer, prefix); } -static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) +static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, Buffer buffer) { if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; @@ -1622,16 +1680,16 @@ static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) */ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) { - range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); + Range const inUse = ZSTDMT_getInputDataInUse(mtctx); size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; - size_t const target = mtctx->targetSectionSize; - buffer_t buffer; + size_t const spaceNeeded = mtctx->targetSectionSize; + Buffer buffer; DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); assert(mtctx->inBuff.buffer.start == NULL); - assert(mtctx->roundBuff.capacity >= target); + assert(mtctx->roundBuff.capacity >= spaceNeeded); - if (spaceLeft < target) { + if (spaceLeft < spaceNeeded) { /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. * Simply copy the prefix to the beginning in that case. */ @@ -1650,7 +1708,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) mtctx->roundBuff.pos = prefixSize; } buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; - buffer.capacity = target; + buffer.capacity = spaceNeeded; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); @@ -1677,7 +1735,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) typedef struct { size_t toLoad; /* The number of bytes to load from the input. */ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ -} syncPoint_t; +} SyncPoint; /** * Searches through the input for a synchronization point. If one is found, we @@ -1685,14 +1743,14 @@ typedef struct { * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ -static syncPoint_t +static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) { BYTE const* const istart = (BYTE const*)input.src + input.pos; U64 const primePower = mtctx->rsync.primePower; U64 const hitMask = mtctx->rsync.hitMask; - syncPoint_t syncPoint; + SyncPoint syncPoint; U64 hash; BYTE const* prev; size_t pos; @@ -1824,7 +1882,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); } if (mtctx->inBuff.buffer.start != NULL) { - syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); + SyncPoint const syncPoint = findSynchronizationPoint(mtctx, *input); if (syncPoint.flush && endOp == ZSTD_e_continue) { endOp = ZSTD_e_flush; } diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h index 271eb1ac71f..91b489b9cb4 100644 --- a/lib/compress/zstdmt_compress.h +++ b/lib/compress/zstdmt_compress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,10 @@ #ifndef ZSTDMT_COMPRESS_H #define ZSTDMT_COMPRESS_H - #if defined (__cplusplus) - extern "C" { - #endif - +/* === Dependencies === */ +#include "../common/zstd_deps.h" /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ +#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ /* Note : This is an internal API. * These APIs used to be exposed with ZSTDLIB_API, @@ -25,12 +25,6 @@ * otherwise ZSTDMT_createCCtx*() will fail. */ -/* === Dependencies === */ -#include "../common/zstd_deps.h" /* size_t */ -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ -#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ - - /* === Constants === */ #ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */ # define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256) @@ -105,9 +99,4 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); - -#if defined (__cplusplus) -} -#endif - #endif /* ZSTDMT_COMPRESS_H */ diff --git a/lib/decompress/huf_decompress.c b/lib/decompress/huf_decompress.c index c6fd9286006..92020617824 100644 --- a/lib/decompress/huf_decompress.c +++ b/lib/decompress/huf_decompress.c @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman decoder, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -15,11 +15,11 @@ /* ************************************************************** * Dependencies ****************************************************************/ +#include /* size_t */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ #include "../common/compiler.h" #include "../common/bitstream.h" /* BIT_* */ #include "../common/fse.h" /* to compress headers */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" #include "../common/zstd_internal.h" @@ -35,6 +35,12 @@ * Macros ****************************************************************/ +#ifdef HUF_DISABLE_FAST_DECODE +# define HUF_ENABLE_FAST_DECODE 0 +#else +# define HUF_ENABLE_FAST_DECODE 1 +#endif + /* These two optional macros force the use one way or another of the two * Huffman decompression implementations. You can't force in both directions * at the same time. @@ -44,10 +50,14 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif -#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2 -# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is + * supported at runtime, so we can add the BMI2 target attribute. + * When it is disabled, we will still get BMI2 if it is enabled statically. + */ +#if DYNAMIC_BMI2 +# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE #else -# define HUF_ASM_X86_64_BMI2_ATTRS +# define HUF_FAST_BMI2_ATTRS #endif #ifdef __cplusplus @@ -57,18 +67,12 @@ #endif #define HUF_ASM_DECL HUF_EXTERN_C -#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +#if DYNAMIC_BMI2 # define HUF_NEED_BMI2_FUNCTION 1 #else # define HUF_NEED_BMI2_FUNCTION 0 #endif -#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) -# define HUF_NEED_DEFAULT_FUNCTION 1 -#else -# define HUF_NEED_DEFAULT_FUNCTION 0 -#endif - /* ************************************************************** * Error Management ****************************************************************/ @@ -85,6 +89,11 @@ /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ +typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); + #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ @@ -106,9 +115,9 @@ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - if (bmi2) { \ + if (flags & HUF_flags_bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ @@ -118,9 +127,9 @@ #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - (void)bmi2; \ + (void)flags; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } @@ -139,43 +148,66 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) return dtd; } -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -static size_t HUF_initDStream(BYTE const* ip) { +static size_t HUF_initFastDStream(BYTE const* ip) { BYTE const lastByte = ip[7]; size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; size_t const value = MEM_readLEST(ip) | 1; assert(bitsConsumed <= 8); + assert(sizeof(size_t) == 8); return value << bitsConsumed; } + + +/** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilowest [in] - The beginning of the valid range of the input. Decoders may read + * down to this pointer. It may be below iend[0]. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilowest, but that indicates corruption. + */ typedef struct { BYTE const* ip[4]; BYTE* op[4]; U64 bits[4]; void const* dt; - BYTE const* ilimit; + BYTE const* ilowest; BYTE* oend; BYTE const* iend[4]; -} HUF_DecompressAsmArgs; +} HUF_DecompressFastArgs; + +typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*); /** - * Initializes args for the asm decoding loop. - * @returns 0 on success - * 1 if the fallback implementation should be used. + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. * Or an error code on failure. */ -static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) { void const* dt = DTable + 1; U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; - const BYTE* const ilimit = (const BYTE*)src + 6 + 8; + const BYTE* const istart = (const BYTE*)src; + + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); - BYTE* const oend = (BYTE*)dst + dstSize; + /* The fast decoding loop assumes 64-bit little-endian. + * This condition is false on x32. + */ + if (!MEM_isLittleEndian() || MEM_32bits()) + return 0; - /* The following condition is false on x32 platform, - * but HUF_asm is not compatible with this ABI */ - if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1; + /* Avoid nullptr addition */ + if (dstSize == 0) + return 0; + assert(dst != NULL); /* strict minimum : jump table + 1 byte per stream */ if (srcSize < 10) @@ -186,11 +218,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. */ if (dtLog != HUF_DECODER_FAST_TABLELOG) - return 1; + return 0; /* Read the jump table. */ { - const BYTE* const istart = (const BYTE*)src; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); @@ -200,13 +231,11 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, args->iend[2] = args->iend[1] + length2; args->iend[3] = args->iend[2] + length3; - /* HUF_initDStream() requires this, and this small of an input + /* HUF_initFastDStream() requires this, and this small of an input * won't benefit from the ASM loop anyways. - * length1 must be >= 16 so that ip[0] >= ilimit before the loop - * starts. */ - if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8) - return 1; + if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) + return 0; if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ } /* ip[] contains the position that is currently loaded into bits[]. */ @@ -223,7 +252,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, /* No point to call the ASM loop for tiny outputs. */ if (args->op[3] >= oend) - return 1; + return 0; /* bits[] is the bit container. * It is read from the MSB down to the LSB. @@ -232,24 +261,25 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, * set, so that CountTrailingZeros(bits[]) can be used * to count how many bits we've consumed. */ - args->bits[0] = HUF_initDStream(args->ip[0]); - args->bits[1] = HUF_initDStream(args->ip[1]); - args->bits[2] = HUF_initDStream(args->ip[2]); - args->bits[3] = HUF_initDStream(args->ip[3]); - - /* If ip[] >= ilimit, it is guaranteed to be safe to - * reload bits[]. It may be beyond its section, but is - * guaranteed to be valid (>= istart). - */ - args->ilimit = ilimit; + args->bits[0] = HUF_initFastDStream(args->ip[0]); + args->bits[1] = HUF_initFastDStream(args->ip[1]); + args->bits[2] = HUF_initFastDStream(args->ip[2]); + args->bits[3] = HUF_initFastDStream(args->ip[3]); + + /* The decoders must be sure to never read beyond ilowest. + * This is lower than iend[0], but allowing decoders to read + * down to ilowest can allow an extra iteration or two in the + * fast loop. + */ + args->ilowest = istart; args->oend = oend; args->dt = dt; - return 0; + return 1; } -static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd) +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd) { /* Validate that we haven't overwritten. */ if (args->op[stream] > segmentEnd) @@ -264,15 +294,32 @@ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs /* Construct the BIT_DStream_t. */ assert(sizeof(size_t) == 8); - bit->bitContainer = MEM_readLE64(args->ip[stream]); + bit->bitContainer = MEM_readLEST(args->ip[stream]); bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); - bit->start = (const char*)args->iend[0]; + bit->start = (const char*)args->ilowest; bit->limitPtr = bit->start + sizeof(size_t); bit->ptr = (const char*)args->ip[stream]; return 0; } -#endif + +/* Calls X(N) for each stream 0, 1, 2, 3. */ +#define HUF_4X_FOR_EACH_STREAM(X) \ + do { \ + X(0); \ + X(1); \ + X(2); \ + X(3); \ + } while (0) + +/* Calls X(N, var) for each stream 0, 1, 2, 3. */ +#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \ + do { \ + X(0, (var)); \ + X(1, (var)); \ + X(2, (var)); \ + X(3, (var)); \ + } while (0) #ifndef HUF_FORCE_DECOMPRESS_X2 @@ -289,10 +336,11 @@ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decodi static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { - D4 = (symbol << 8) + nbBits; + D4 = (U64)((symbol << 8) + nbBits); } else { - D4 = symbol + (nbBits << 8); + D4 = (U64)(symbol + (nbBits << 8)); } + assert(D4 < (1U << 16)); D4 *= 0x0001000100010001ULL; return D4; } @@ -335,13 +383,7 @@ typedef struct { BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; } HUF_ReadDTableX1_Workspace; - -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags) { U32 tableLog = 0; U32 nbSymbols = 0; @@ -356,7 +398,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags); if (HUF_isError(iSize)) return iSize; @@ -383,9 +425,8 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr * rankStart[0] is not filled because there are no entries in the table for * weight 0. */ - { - int n; - int nextRankStart = 0; + { int n; + U32 nextRankStart = 0; int const unroll = 4; int const nLimit = (int)nbSymbols - unroll + 1; for (n=0; n<(int)tableLog+1; n++) { @@ -412,10 +453,9 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ - { - U32 w; - int symbol=wksp->rankVal[0]; - int rankStart=0; + { U32 w; + int symbol = wksp->rankVal[0]; + int rankStart = 0; for (w=1; wrankVal[w]; int const length = (1 << w) >> 1; @@ -489,15 +529,19 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog } #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ - *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) + do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0) -#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ + } while (0) -#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ + } while (0) HINT_INLINE size_t HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) @@ -525,7 +569,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - return pEnd-pStart; + return (size_t)(pEnd-pStart); } FORCE_INLINE_TEMPLATE size_t @@ -535,7 +579,7 @@ HUF_decompress1X1_usingDTable_internal_body( const HUF_DTable* DTable) { BYTE* op = (BYTE*)dst; - BYTE* const oend = op + dstSize; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(op, (ptrdiff_t)dstSize); const void* dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; BIT_DStream_t bitD; @@ -551,6 +595,10 @@ HUF_decompress1X1_usingDTable_internal_body( return dstSize; } +/* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, @@ -559,6 +607,7 @@ HUF_decompress4X1_usingDTable_internal_body( { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; @@ -594,6 +643,7 @@ HUF_decompress4X1_usingDTable_internal_body( if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + assert(dstSize >= 6); /* validated above */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -656,52 +706,173 @@ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; + +#endif -static HUF_ASM_X86_64_BMI2_ATTRS +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + U16 const* const dtable = (U16 const*)args->dt; + BYTE* const oend = args->oend; + BYTE const* const ilowest = args->ilowest; + + /* Copy the arguments to local variables */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= (stream == 3 ? oend : op[stream + 1])); + assert(ip[stream] >= ilowest); + } +#endif + /* Compute olimit */ + { + /* Each iteration produces 5 output symbols per stream */ + size_t const oiters = (size_t)(oend - op[3]) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + size_t const iiters = (size_t)(ip[0] - ilowest) / 7; + /* We can safely run iters iterations before running bounds checks */ + size_t const iters = MIN(oiters, iiters); + size_t const symbols = iters * 5; + + /* We can simply check that op[3] < olimit, instead of checking all + * of our bounds, since we can't hit the other bounds until we've run + * iters iterations, which only happens when op[3] == olimit. + */ + olimit = op[3] + symbols; + + /* Exit fast decoding loop once we reach the end. */ + if (op[3] == olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + +#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \ + do { \ + U64 const index = bits[(_stream)] >> 53; \ + U16 const entry = dtable[index]; \ + bits[(_stream)] <<= entry & 0x3F; \ + op[(_stream)][(_symbol)] = (BYTE)(entry >> 8); \ + } while (0) + +#define HUF_5X1_RELOAD_STREAM(_stream) \ + do { \ + U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ + U64 const nbBits = ctz & 7; \ + U64 const nbBytes = ctz >> 3; \ + op[(_stream)] += 5; \ + ip[(_stream)] -= nbBytes; \ + bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ + bits[(_stream)] <<= nbBits; \ + } while (0) + + /* Manually unroll the loop because compilers don't consistently + * unroll the inner loops, which destroys performance. + */ + do { + /* Decode 5 symbols in each of the 4 streams */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4); + + /* Reload each of the 4 the bitstreams */ + HUF_4X_FOR_EACH_STREAM(HUF_5X1_RELOAD_STREAM); + } while (op[3] < olimit); + +#undef HUF_4X1_DECODE_SYMBOL +#undef HUF_5X1_RELOAD_STREAM + } + +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + +/** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ +static HUF_FAST_BMI2_ATTRS size_t -HUF_decompress4X1_usingDTable_internal_bmi2_asm( +HUF_decompress4X1_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; - const BYTE* const iend = (const BYTE*)cSrc + 6; - BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; - { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); - FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + BYTE const* const ilowest = (BYTE const*)cSrc; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); + HUF_DecompressFastArgs args; + { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); + if (ret == 0) + return 0; } - assert(args.ip[0] >= args.ilimit); - HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args); + assert(args.ip[0] >= args.ilowest); + loopFn(&args); - /* Our loop guarantees that ip[] >= ilimit and that we haven't + /* Our loop guarantees that ip[] >= ilowest and that we haven't * overwritten any op[]. */ - assert(args.ip[0] >= iend); - assert(args.ip[1] >= iend); - assert(args.ip[2] >= iend); - assert(args.ip[3] >= iend); + assert(args.ip[0] >= ilowest); + assert(args.ip[0] >= ilowest); + assert(args.ip[1] >= ilowest); + assert(args.ip[2] >= ilowest); + assert(args.ip[3] >= ilowest); assert(args.op[3] <= oend); - (void)iend; + + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend[0]); + (void)ilowest; /* finish bit streams one by one. */ - { - size_t const segmentSize = (dstSize+3) / 4; + { size_t const segmentSize = (dstSize+3) / 4; BYTE* segmentEnd = (BYTE*)dst; int i; for (i = 0; i < 4; ++i) { @@ -718,97 +889,59 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm( } /* decoded size */ + assert(dstSize != 0); return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ - -typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, - const void *cSrc, - size_t cSrcSize, - const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } #endif -} - -size_t HUF_decompress1X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); -} - -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } - #endif /* HUF_FORCE_DECOMPRESS_X2 */ @@ -991,7 +1124,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32* const rankVal = rankValOrigin[0]; @@ -1046,14 +1179,7 @@ typedef struct { size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, - const void* src, size_t srcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); @@ -1075,7 +1201,7 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags); if (HUF_isError(iSize)) return iSize; /* check result */ @@ -1165,15 +1291,19 @@ HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, c } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) + do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0) -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ + } while (0) -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + do { \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ + } while (0) HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, @@ -1233,7 +1363,7 @@ HUF_decompress1X2_usingDTable_internal_body( /* decode */ { BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)dstSize); const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; DTableDesc const dtd = HUF_getDTableDesc(DTable); @@ -1246,6 +1376,11 @@ HUF_decompress1X2_usingDTable_internal_body( /* decoded size */ return dstSize; } + +/* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -1253,6 +1388,7 @@ HUF_decompress4X2_usingDTable_internal_body( const HUF_DTable* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; @@ -1286,8 +1422,9 @@ HUF_decompress4X2_usingDTable_internal_body( DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + assert(dstSize >= 6 /* validated above */); CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -1372,44 +1509,199 @@ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; + +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + BYTE* oend[4]; + HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt; + BYTE const* const ilowest = args->ilowest; + + /* Copy the arguments to local registers. */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + oend[0] = op[1]; + oend[1] = op[2]; + oend[2] = op[3]; + oend[3] = args->oend; + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= oend[stream]); + assert(ip[stream] >= ilowest); + } +#endif + /* Compute olimit */ + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + size_t iters = (size_t)(ip[0] - ilowest) / 7; + /* Each iteration can produce up to 10 bytes of output per stream. + * Each output stream my advance at different rates. So take the + * minimum number of safe iterations among all the output streams. + */ + for (stream = 0; stream < 4; ++stream) { + size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10; + iters = MIN(iters, oiters); + } + + /* Each iteration produces at least 5 output symbols. So until + * op[3] crosses olimit, we know we haven't executed iters + * iterations yet. This saves us maintaining an iters counter, + * at the expense of computing the remaining # of iterations + * more frequently. + */ + olimit = op[3] + (iters * 5); + + /* Exit the fast decoding loop once we reach the end. */ + if (op[3] == olimit) + break; -static HUF_ASM_X86_64_BMI2_ATTRS size_t -HUF_decompress4X2_usingDTable_internal_bmi2_asm( + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + +#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \ + do { \ + if ((_decode3) || (_stream) != 3) { \ + U64 const index = bits[(_stream)] >> 53; \ + size_t const entry = MEM_readLE32(&dtable[index]); \ + MEM_write16(op[(_stream)], (U16)entry); \ + bits[(_stream)] <<= (entry >> 16) & 0x3F; \ + op[(_stream)] += entry >> 24; \ + } \ + } while (0) + +#define HUF_5X2_RELOAD_STREAM(_stream, _decode3) \ + do { \ + if (_decode3) HUF_4X2_DECODE_SYMBOL(3, 1); \ + { \ + U64 const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ + U64 const nbBits = ctz & 7; \ + U64 const nbBytes = ctz >> 3; \ + ip[(_stream)] -= nbBytes; \ + bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ + bits[(_stream)] <<= nbBits; \ + } \ + } while (0) + +#if defined(__aarch64__) +# define HUF_4X2_4WAY 1 +#else +# define HUF_4X2_4WAY 0 +#endif +#define HUF_4X2_3WAY !HUF_4X2_4WAY + + /* Manually unroll the loop because compilers don't consistently + * unroll the inner loops, which destroys performance. + */ + do { + /* Decode 5 symbols from each of the first 3 or 4 streams. + * In the 3-way case the final stream will be decoded during + * the reload phase to reduce register pressure. + */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, HUF_4X2_4WAY); + + /* In the 3-way case decode one symbol from the final stream. */ + HUF_4X2_DECODE_SYMBOL(3, HUF_4X2_3WAY); + + /* In the 3-way case decode 4 symbols from the final stream & + * reload bitstreams. The final stream is reloaded last, meaning + * that all 5 symbols are decoded from the final stream before + * it is reloaded. + */ + HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_5X2_RELOAD_STREAM, HUF_4X2_3WAY); + } while (op[3] < olimit); + } + +#undef HUF_4X2_DECODE_SYMBOL +#undef HUF_5X2_RELOAD_STREAM + +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + + +static HUF_FAST_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) { + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; - const BYTE* const iend = (const BYTE*)cSrc + 6; - BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; + const BYTE* const ilowest = (const BYTE*)cSrc; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)dstSize); + HUF_DecompressFastArgs args; { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (ret == 0) + return 0; } - assert(args.ip[0] >= args.ilimit); - HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args); + assert(args.ip[0] >= args.ilowest); + loopFn(&args); /* note : op4 already verified within main loop */ - assert(args.ip[0] >= iend); - assert(args.ip[1] >= iend); - assert(args.ip[2] >= iend); - assert(args.ip[3] >= iend); + assert(args.ip[0] >= ilowest); + assert(args.ip[1] >= ilowest); + assert(args.ip[2] >= ilowest); + assert(args.ip[3] >= ilowest); assert(args.op[3] <= oend); - (void)iend; + + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend[0]); + (void)ilowest; /* finish bitStreams one by one */ { @@ -1432,91 +1724,72 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm( /* decoded size */ return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } #endif + + if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -size_t HUF_decompress1X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); } -static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - - #endif /* HUF_FORCE_DECOMPRESS_X1 */ @@ -1524,44 +1797,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, /* Universal decompression selectors */ /* ***********************************/ -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; @@ -1616,36 +1851,9 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) #endif } - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, - size_t dstSize, const void* cSrc, - size_t cSrcSize, void* workSpace, - size_t wkspSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize == 0) return ERROR(corruption_detected); - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif - } -} - size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1658,71 +1866,71 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): + cSrcSize, workSpace, wkspSize, flags): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #endif } } -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1732,160 +1940,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : - HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#endif - } -} - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX1_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX2_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); - -size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) - static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; -#endif - - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); - return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #else - return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); + return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : + HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #endif } } - -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#else - return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : - HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; -#endif - } -} - -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -#endif diff --git a/lib/decompress/huf_decompress_amd64.S b/lib/decompress/huf_decompress_amd64.S index 3f0e5c26c7d..dc1f3d92113 100644 --- a/lib/decompress/huf_decompress_amd64.S +++ b/lib/decompress/huf_decompress_amd64.S @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,34 +10,63 @@ #include "../common/portability_macros.h" +#if defined(__ELF__) && defined(__GNUC__) /* Stack marking * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart */ -#if defined(__ELF__) && defined(__GNUC__) .section .note.GNU-stack,"",%progbits + +#if defined(__aarch64__) +/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64. + * See: https://github.com/facebook/zstd/issues/3841 + * See: https://gcc.godbolt.org/z/sqr5T4ffK + * See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/ + * See: https://reviews.llvm.org/D62609 + */ +.pushsection .note.gnu.property, "a" +.p2align 3 +.long 4 /* size of the name - "GNU\0" */ +.long 0x10 /* size of descriptor */ +.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */ +.asciz "GNU" +.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */ +.long 4 /* pr_datasz - 4 bytes */ +.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ +.p2align 3 /* pr_padding - bring everything to 8 byte alignment */ +.popsection +#endif + +#endif + +// There appears to be an unreconcilable syntax difference between Linux and Darwin assemblers. +// Name of a private label (i.e. not exported to symbol table) on Darwin has to start with "L", +// on Linux has to start with ".". There's no way to have a name start with both "." and "L", so +// we have to use a macro. +#if defined(__APPLE__) +#define LOCAL_LABEL(label) L_ ## label +#else +#define LOCAL_LABEL(label) .L_ ## label #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 /* Calling convention: * - * %rdi contains the first argument: HUF_DecompressAsmArgs*. + * %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*. * %rbp isn't maintained (no frame pointer). * %rsp contains the stack pointer that grows down. * No red-zone is assumed, only addresses >= %rsp are used. * All register contents are preserved. - * - * TODO: Support Windows calling convention. */ -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop .text /* Sets up register mappings for clarity. @@ -95,27 +124,65 @@ ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) /* Define both _HUF_* & HUF_* symbols because MacOS * C symbols are prefixed with '_' & Linux symbols aren't. */ -_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: +_HUF_decompress4X1_usingDTable_internal_fast_asm_loop: +HUF_decompress4X1_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH + .cfi_startproc + .cfi_def_cfa_offset 8 + .cfi_offset %rip, -8 /* Save all registers - even if they are callee saved for simplicity. */ push %rax + .cfi_def_cfa_offset 16 + .cfi_offset rax, -16 push %rbx + .cfi_def_cfa_offset 24 + .cfi_offset rbx, -24 push %rcx + .cfi_def_cfa_offset 32 + .cfi_offset rcx, -32 push %rdx + .cfi_def_cfa_offset 40 + .cfi_offset rdx, -40 push %rbp + .cfi_def_cfa_offset 48 + .cfi_offset rbp, -48 push %rsi + .cfi_def_cfa_offset 56 + .cfi_offset rsi, -56 push %rdi + .cfi_def_cfa_offset 64 + .cfi_offset rdi, -64 push %r8 + .cfi_def_cfa_offset 72 + .cfi_offset r8, -72 push %r9 + .cfi_def_cfa_offset 80 + .cfi_offset r9, -80 push %r10 + .cfi_def_cfa_offset 88 + .cfi_offset r10, -88 push %r11 + .cfi_def_cfa_offset 96 + .cfi_offset r11, -96 push %r12 + .cfi_def_cfa_offset 104 + .cfi_offset r12, -104 push %r13 + .cfi_def_cfa_offset 112 + .cfi_offset r13, -112 push %r14 + .cfi_def_cfa_offset 120 + .cfi_offset r14, -120 push %r15 + .cfi_def_cfa_offset 128 + .cfi_offset r15, -128 /* Read HUF_DecompressAsmArgs* args from %rax */ +#if defined(_WIN32) + movq %rcx, %rax +#else movq %rdi, %rax +#endif movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 @@ -130,13 +197,18 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ - push 104(%rax) /* ilimit */ + .cfi_def_cfa_offset 136 + push 104(%rax) /* ilowest */ + .cfi_def_cfa_offset 144 push 112(%rax) /* oend */ + .cfi_def_cfa_offset 152 push %olimit /* olimit space */ + .cfi_def_cfa_offset 160 subq $24, %rsp + .cfi_def_cfa_offset 184 -.L_4X1_compute_olimit: +LOCAL_LABEL(4X1_compute_olimit): /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rbx, rdx must be saved @@ -155,11 +227,11 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: shrq $2, %r15 movq %ip0, %rax /* rax = ip0 */ - movq 40(%rsp), %rdx /* rdx = ilimit */ - subq %rdx, %rax /* rax = ip0 - ilimit */ - movq %rax, %rbx /* rbx = ip0 - ilimit */ + movq 40(%rsp), %rdx /* rdx = ilowest */ + subq %rdx, %rax /* rax = ip0 - ilowest */ + movq %rax, %rbx /* rbx = ip0 - ilowest */ - /* rdx = (ip0 - ilimit) / 7 */ + /* rdx = (ip0 - ilowest) / 7 */ movabsq $2635249153387078803, %rdx mulq %rdx subq %rdx, %rbx @@ -182,21 +254,20 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: /* If (op3 + 20 > olimit) */ movq %op3, %rax /* rax = op3 */ - addq $20, %rax /* rax = op3 + 20 */ - cmpq %rax, %olimit /* op3 + 20 > olimit */ - jb .L_4X1_exit + cmpq %rax, %olimit /* op3 == olimit */ + je LOCAL_LABEL(4X1_exit) /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 - jb .L_4X1_exit + jb LOCAL_LABEL(4X1_exit) /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 - jb .L_4X1_exit + jb LOCAL_LABEL(4X1_exit) /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 - jb .L_4X1_exit + jb LOCAL_LABEL(4X1_exit) /* Reads top 11 bits from bits[n] * Loads dt[bits[n]] into var[n] @@ -257,7 +328,7 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: .p2align 6 -.L_4X1_loop_body: +LOCAL_LABEL(4X1_loop_body): /* Decode 5 symbols in each of the 4 streams (20 total) * Must have called GET_NEXT_DELT for each stream */ @@ -295,7 +366,7 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: /* If op3 < olimit: continue the loop */ cmp %op3, 24(%rsp) - ja .L_4X1_loop_body + ja LOCAL_LABEL(4X1_loop_body) /* Reload ip[1,2,3] from stack */ movq 0(%rsp), %ip1 @@ -303,20 +374,25 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: movq 16(%rsp), %ip3 /* Re-compute olimit */ - jmp .L_4X1_compute_olimit + jmp LOCAL_LABEL(4X1_compute_olimit) #undef GET_NEXT_DELT #undef DECODE_FROM_DELT #undef DECODE #undef RELOAD_BITS -.L_4X1_exit: +LOCAL_LABEL(4X1_exit): addq $24, %rsp + .cfi_def_cfa_offset 160 /* Restore stack (oend & olimit) */ pop %rax /* olimit */ + .cfi_def_cfa_offset 152 pop %rax /* oend */ - pop %rax /* ilimit */ + .cfi_def_cfa_offset 144 + pop %rax /* ilowest */ + .cfi_def_cfa_offset 136 pop %rax /* arg */ + .cfi_def_cfa_offset 128 /* Save ip / op / bits */ movq %ip0, 0(%rax) @@ -334,42 +410,112 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: /* Restore registers */ pop %r15 + .cfi_restore r15 + .cfi_def_cfa_offset 120 pop %r14 + .cfi_restore r14 + .cfi_def_cfa_offset 112 pop %r13 + .cfi_restore r13 + .cfi_def_cfa_offset 104 pop %r12 + .cfi_restore r12 + .cfi_def_cfa_offset 96 pop %r11 + .cfi_restore r11 + .cfi_def_cfa_offset 88 pop %r10 + .cfi_restore r10 + .cfi_def_cfa_offset 80 pop %r9 + .cfi_restore r9 + .cfi_def_cfa_offset 72 pop %r8 + .cfi_restore r8 + .cfi_def_cfa_offset 64 pop %rdi + .cfi_restore rdi + .cfi_def_cfa_offset 56 pop %rsi + .cfi_restore rsi + .cfi_def_cfa_offset 48 pop %rbp + .cfi_restore rbp + .cfi_def_cfa_offset 40 pop %rdx + .cfi_restore rdx + .cfi_def_cfa_offset 32 pop %rcx + .cfi_restore rcx + .cfi_def_cfa_offset 24 pop %rbx + .cfi_restore rbx + .cfi_def_cfa_offset 16 pop %rax + .cfi_restore rax + .cfi_def_cfa_offset 8 ret - -_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: + .cfi_endproc + +_HUF_decompress4X2_usingDTable_internal_fast_asm_loop: +HUF_decompress4X2_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH + .cfi_startproc + .cfi_def_cfa_offset 8 + .cfi_offset %rip, -8 /* Save all registers - even if they are callee saved for simplicity. */ push %rax + .cfi_def_cfa_offset 16 + .cfi_offset rax, -16 push %rbx + .cfi_def_cfa_offset 24 + .cfi_offset rbx, -24 push %rcx + .cfi_def_cfa_offset 32 + .cfi_offset rcx, -32 push %rdx + .cfi_def_cfa_offset 40 + .cfi_offset rdx, -40 push %rbp + .cfi_def_cfa_offset 48 + .cfi_offset rbp, -48 push %rsi + .cfi_def_cfa_offset 56 + .cfi_offset rsi, -56 push %rdi + .cfi_def_cfa_offset 64 + .cfi_offset rdi, -64 push %r8 + .cfi_def_cfa_offset 72 + .cfi_offset r8, -72 push %r9 + .cfi_def_cfa_offset 80 + .cfi_offset r9, -80 push %r10 + .cfi_def_cfa_offset 88 + .cfi_offset r10, -88 push %r11 + .cfi_def_cfa_offset 96 + .cfi_offset r11, -96 push %r12 + .cfi_def_cfa_offset 104 + .cfi_offset r12, -104 push %r13 + .cfi_def_cfa_offset 112 + .cfi_offset r13, -112 push %r14 + .cfi_def_cfa_offset 120 + .cfi_offset r14, -120 push %r15 + .cfi_def_cfa_offset 128 + .cfi_offset r15, -128 + /* Read HUF_DecompressAsmArgs* args from %rax */ +#if defined(_WIN32) + movq %rcx, %rax +#else movq %rdi, %rax +#endif movq 0(%rax), %ip0 movq 8(%rax), %ip1 movq 16(%rax), %ip2 @@ -384,25 +530,33 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: movq 88(%rax), %bits3 movq 96(%rax), %dtable push %rax /* argument */ + .cfi_def_cfa_offset 136 push %rax /* olimit */ - push 104(%rax) /* ilimit */ + .cfi_def_cfa_offset 144 + push 104(%rax) /* ilowest */ + .cfi_def_cfa_offset 152 movq 112(%rax), %rax push %rax /* oend3 */ + .cfi_def_cfa_offset 160 movq %op3, %rax push %rax /* oend2 */ + .cfi_def_cfa_offset 168 movq %op2, %rax push %rax /* oend1 */ + .cfi_def_cfa_offset 176 movq %op1, %rax push %rax /* oend0 */ + .cfi_def_cfa_offset 184 /* Scratch space */ subq $8, %rsp + .cfi_def_cfa_offset 192 -.L_4X2_compute_olimit: +LOCAL_LABEL(4X2_compute_olimit): /* Computes how many iterations we can do safely * %r15, %rax may be clobbered * rdx must be saved @@ -412,9 +566,9 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: /* We can consume up to 7 input bytes each iteration. */ movq %ip0, %rax /* rax = ip0 */ - movq 40(%rsp), %rdx /* rdx = ilimit */ - subq %rdx, %rax /* rax = ip0 - ilimit */ - movq %rax, %r15 /* r15 = ip0 - ilimit */ + movq 40(%rsp), %rdx /* rdx = ilowest */ + subq %rdx, %rax /* rax = ip0 - ilowest */ + movq %rax, %r15 /* r15 = ip0 - ilowest */ /* rdx = rax / 7 */ movabsq $2635249153387078803, %rdx @@ -424,7 +578,7 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: addq %r15, %rdx shrq $2, %rdx - /* r15 = (ip0 - ilimit) / 7 */ + /* r15 = (ip0 - ilowest) / 7 */ movq %rdx, %r15 /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ @@ -465,21 +619,20 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: /* If (op3 + 10 > olimit) */ movq %op3, %rax /* rax = op3 */ - addq $10, %rax /* rax = op3 + 10 */ - cmpq %rax, %olimit /* op3 + 10 > olimit */ - jb .L_4X2_exit + cmpq %rax, %olimit /* op3 == olimit */ + je LOCAL_LABEL(4X2_exit) /* If (ip1 < ip0) go to exit */ cmpq %ip0, %ip1 - jb .L_4X2_exit + jb LOCAL_LABEL(4X2_exit) /* If (ip2 < ip1) go to exit */ cmpq %ip1, %ip2 - jb .L_4X2_exit + jb LOCAL_LABEL(4X2_exit) /* If (ip3 < ip2) go to exit */ cmpq %ip2, %ip3 - jb .L_4X2_exit + jb LOCAL_LABEL(4X2_exit) #define DECODE(n, idx) \ movq %bits##n, %rax; \ @@ -506,7 +659,7 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: .p2align 6 -.L_4X2_loop_body: +LOCAL_LABEL(4X2_loop_body): /* We clobber r8, so store it on the stack */ movq %r8, 0(%rsp) @@ -523,21 +676,29 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: FOR_EACH_STREAM(RELOAD_BITS) cmp %op3, 48(%rsp) - ja .L_4X2_loop_body - jmp .L_4X2_compute_olimit + ja LOCAL_LABEL(4X2_loop_body) + jmp LOCAL_LABEL(4X2_compute_olimit) #undef DECODE #undef RELOAD_BITS -.L_4X2_exit: +LOCAL_LABEL(4X2_exit): addq $8, %rsp + .cfi_def_cfa_offset 184 /* Restore stack (oend & olimit) */ pop %rax /* oend0 */ + .cfi_def_cfa_offset 176 pop %rax /* oend1 */ + .cfi_def_cfa_offset 168 pop %rax /* oend2 */ + .cfi_def_cfa_offset 160 pop %rax /* oend3 */ - pop %rax /* ilimit */ + .cfi_def_cfa_offset 152 + pop %rax /* ilowest */ + .cfi_def_cfa_offset 144 pop %rax /* olimit */ + .cfi_def_cfa_offset 136 pop %rax /* arg */ + .cfi_def_cfa_offset 128 /* Save ip / op / bits */ movq %ip0, 0(%rax) @@ -555,20 +716,51 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: /* Restore registers */ pop %r15 + .cfi_restore r15 + .cfi_def_cfa_offset 120 pop %r14 + .cfi_restore r14 + .cfi_def_cfa_offset 112 pop %r13 + .cfi_restore r13 + .cfi_def_cfa_offset 104 pop %r12 + .cfi_restore r12 + .cfi_def_cfa_offset 96 pop %r11 + .cfi_restore r11 + .cfi_def_cfa_offset 88 pop %r10 + .cfi_restore r10 + .cfi_def_cfa_offset 80 pop %r9 + .cfi_restore r9 + .cfi_def_cfa_offset 72 pop %r8 + .cfi_restore r8 + .cfi_def_cfa_offset 64 pop %rdi + .cfi_restore rdi + .cfi_def_cfa_offset 56 pop %rsi + .cfi_restore rsi + .cfi_def_cfa_offset 48 pop %rbp + .cfi_restore rbp + .cfi_def_cfa_offset 40 pop %rdx + .cfi_restore rdx + .cfi_def_cfa_offset 32 pop %rcx + .cfi_restore rcx + .cfi_def_cfa_offset 24 pop %rbx + .cfi_restore rbx + .cfi_def_cfa_offset 16 pop %rax + .cfi_restore rax + .cfi_def_cfa_offset 8 ret + .cfi_endproc #endif diff --git a/lib/decompress/zstd_ddict.c b/lib/decompress/zstd_ddict.c index 889764a5e87..309ec0d0364 100644 --- a/lib/decompress/zstd_ddict.c +++ b/lib/decompress/zstd_ddict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,12 +14,12 @@ /*-******************************************************* * Dependencies *********************************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" @@ -240,5 +240,5 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; - return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); + return ddict->dictID; } diff --git a/lib/decompress/zstd_ddict.h b/lib/decompress/zstd_ddict.h index bd03268b508..c4ca8877a07 100644 --- a/lib/decompress/zstd_ddict.h +++ b/lib/decompress/zstd_ddict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c index 9fe326e73ea..9eb98327ef3 100644 --- a/lib/decompress/zstd_decompress.c +++ b/lib/decompress/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -56,17 +56,18 @@ * Dependencies *********************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ +#include "../common/error_private.h" +#include "../common/zstd_internal.h" /* blockProperties_t */ #include "../common/mem.h" /* low level memory routines */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */ -#include "../common/zstd_internal.h" /* blockProperties_t */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ -#include "../common/bits.h" /* ZSTD_highbit32 */ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "../legacy/zstd_legacy.h" @@ -244,6 +245,8 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) dctx->outBufferMode = ZSTD_bm_buffered; dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; + dctx->maxBlockSizeParam = 0; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) @@ -264,6 +267,7 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) #endif dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; + dctx->isFrameDecompression = 1; #if DYNAMIC_BMI2 dctx->bmi2 = ZSTD_cpuSupportsBmi2(); #endif @@ -440,7 +444,7 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, ** or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) +size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); @@ -480,8 +484,10 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); - zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); zfhPtr->frameType = ZSTD_skippableFrame; + zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START; + zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE; + zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); return 0; } RETURN_ERROR(prefix_unknown, ""); @@ -550,7 +556,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) +size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize) { return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } @@ -568,7 +574,7 @@ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; } #endif - { ZSTD_frameHeader zfh; + { ZSTD_FrameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { @@ -588,49 +594,52 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); - { - size_t const skippableSize = skippableHeaderSize + sizeU32; + { size_t const skippableSize = skippableHeaderSize + sizeU32; RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } /*! ZSTD_readSkippableFrame() : - * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * Retrieves content of a skippable frame, and writes it to dst buffer. * * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested * in the magicVariant. * - * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. * * @return : number of bytes written or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, - const void* src, size_t srcSize) +size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, + unsigned* magicVariant, /* optional, can be NULL */ + const void* src, size_t srcSize) { - U32 const magicNumber = MEM_readLE32(src); - size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); - size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; - - /* check input validity */ - RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); - RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); - RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); - /* deliver payload */ - if (skippableContentSize > 0 && dst != NULL) - ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); - if (magicVariant != NULL) - *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; - return skippableContentSize; + { U32 const magicNumber = MEM_readLE32(src); + size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); + size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; + + /* check input validity */ + RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); + RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + + /* deliver payload */ + if (skippableContentSize > 0 && dst != NULL) + ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); + if (magicVariant != NULL) + *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; + return skippableContentSize; + } } /** ZSTD_findDecompressedSize() : - * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames - * @return : decompressed size of the frames contained */ + * note: compatible with legacy mode + * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; @@ -640,9 +649,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); - if (ZSTD_isError(skippableSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR; assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; @@ -650,17 +657,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) continue; } - { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize); + if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs; - /* check for overflow */ - if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; - totalDstSize += ret; + if (totalDstSize + fcs < totalDstSize) + return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */ + totalDstSize += fcs; } + /* skip to next frame */ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ZSTD_isError(frameSrcSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR; + assert(frameSrcSize <= srcSize); src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; @@ -724,17 +731,17 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) return frameSizeInfo; } -static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize) +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format) { ZSTD_frameSizeInfo frameSizeInfo; ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(src, srcSize)) + if (format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameSizeInfoLegacy(src, srcSize); #endif - if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE) + if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE) && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || @@ -745,10 +752,10 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize const BYTE* const ipstart = ip; size_t remainingSize = srcSize; size_t nbBlocks = 0; - ZSTD_frameHeader zfh; + ZSTD_FrameHeader zfh; /* Extract Frame Header */ - { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); + { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) @@ -782,28 +789,31 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize ip += 4; } + frameSizeInfo.nbBlocks = nbBlocks; frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize - : nbBlocks * zfh.blockSizeMax; + : (unsigned long long)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } +static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); + return frameSizeInfo.compressedSize; +} + /** ZSTD_findFrameCompressedSize() : - * compatible with legacy mode - * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame - * `srcSize` must be at least as large as the frame contained - * @return : the compressed size of the frame starting at `src` */ + * See docs in zstd.h + * Note: compatible with legacy mode */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); - return frameSizeInfo.compressedSize; + return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1); } /** ZSTD_decompressBound() : * compatible with legacy mode - * `src` must point to the start of a ZSTD frame or a skippeable frame + * `src` must point to the start of a ZSTD frame or a skippable frame * `srcSize` must be at least as large as the frame contained * @return : the maximum decompressed size of the compressed source */ @@ -812,7 +822,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) @@ -825,6 +835,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) return bound; } +size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) +{ + size_t margin = 0; + unsigned maxBlockSize = 0; + + /* Iterate over each frame */ + while (srcSize > 0) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_FrameHeader zfh; + + FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) + return ERROR(corruption_detected); + + if (zfh.frameType == ZSTD_frame) { + /* Add the frame header to our margin */ + margin += zfh.headerSize; + /* Add the checksum to our margin */ + margin += zfh.checksumFlag ? 4 : 0; + /* Add 3 bytes per block */ + margin += 3 * frameSizeInfo.nbBlocks; + + /* Compute the max block size */ + maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); + } else { + assert(zfh.frameType == ZSTD_skippableFrame); + /* Add the entire skippable frame size to our margin. */ + margin += compressedSize; + } + + assert(srcSize >= compressedSize); + src = (const BYTE*)src + compressedSize; + srcSize -= compressedSize; + } + + /* Add the max block size back to the margin. */ + margin += maxBlockSize; + + return margin; +} /*-************************************************************* * Frame decoding @@ -850,7 +902,7 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } - ZSTD_memcpy(dst, src, srcSize); + ZSTD_memmove(dst, src, srcSize); return srcSize; } @@ -867,7 +919,7 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, return regenSize; } -static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming) +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming) { #if ZSTD_TRACE if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) { @@ -926,8 +978,13 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } + /* Shrink the blockSizeMax if enabled */ + if (dctx->maxBlockSizeParam != 0) + dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam); + /* Loop on each block */ while (1) { + BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); @@ -937,27 +994,48 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + if (ip >= op && ip < oBlockEnd) { + /* We are decompressing in-place. Limit the output pointer so that we + * don't overwrite the block that we are currently reading. This will + * fail decompression if the input & output pointers aren't spaced + * far enough apart. + * + * This is important to set, even when the pointers are far enough + * apart, because ZSTD_decompressBlock_internal() can decide to store + * literals in the output buffer, after the block it is decompressing. + * Since we don't want anything to overwrite our input, we have to tell + * ZSTD_decompressBlock_internal to never write past ip. + * + * See ZSTD_allocateLiteralsBuffer() for reference. + */ + oBlockEnd = op + (ip - op); + } + switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); + assert(dctx->isFrameDecompression == 1); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming); break; case bt_raw : + /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : - decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: RETURN_ERROR(corruption_detected, "invalid block type"); } - - if (ZSTD_isError(decodedSize)) return decodedSize; - if (dctx->validateChecksum) + FORWARD_IF_ERROR(decodedSize, "Block decompression failure"); + DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize); + if (dctx->validateChecksum) { XXH64_update(&dctx->xxhState, op, decodedSize); - if (decodedSize != 0) + } + if (decodedSize) /* support dst = NULL,0 */ { op += decodedSize; + } assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; @@ -981,12 +1059,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ + DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr)); *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); } -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, @@ -1006,7 +1087,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, while (srcSize >= ZSTD_startingInputLength(dctx->format)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(src, srcSize)) { + if (dctx->format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) { size_t decodedSize; size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); if (ZSTD_isError(frameSize)) return frameSize; @@ -1016,6 +1097,15 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); if (ZSTD_isError(decodedSize)) return decodedSize; + { + unsigned long long const expectedSize = ZSTD_getFrameContentSize(src, srcSize); + RETURN_ERROR_IF(expectedSize == ZSTD_CONTENTSIZE_ERROR, corruption_detected, "Corrupted frame header!"); + if (expectedSize != ZSTD_CONTENTSIZE_UNKNOWN) { + RETURN_ERROR_IF(expectedSize != decodedSize, corruption_detected, + "Frame header size does not match decoded size!"); + } + } + assert(decodedSize <= dstCapacity); dst = (BYTE*)dst + decodedSize; dstCapacity -= decodedSize; @@ -1027,17 +1117,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, } #endif - { U32 const magicNumber = MEM_readLE32(src); - DEBUGLOG(4, "reading magic number %08X (expecting %08X)", - (unsigned)magicNumber, ZSTD_MAGICNUMBER); + if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) { + U32 const magicNumber = MEM_readLE32(src); + DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + /* skippable frame detected : skip it */ size_t const skippableSize = readSkippableFrameSize(src, srcSize); - FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed"); + FORWARD_IF_ERROR(skippableSize, "invalid skippable frame"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; - continue; + continue; /* check next frame */ } } if (ddict) { @@ -1253,7 +1344,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming); + assert(dctx->isFrameDecompression == 1); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : @@ -1322,6 +1414,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); + assert(dctx->format != ZSTD_f_zstd1_magicless); ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; @@ -1375,11 +1468,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (size_t)(dictEnd - dictPtr), - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; @@ -1482,6 +1575,7 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; + dctx->isFrameDecompression = 1; ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; @@ -1549,7 +1643,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; @@ -1656,7 +1750,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); - return ZSTD_initDStream_usingDDict(zds, NULL); + FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); + return ZSTD_startingInputLength(zds->format); } /* ZSTD_initDStream_usingDDict() : @@ -1664,6 +1760,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { + DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); @@ -1674,6 +1771,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { + DEBUGLOG(4, "ZSTD_resetDStream"); FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } @@ -1745,6 +1843,15 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; + case ZSTD_d_disableHuffmanAssembly: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_d_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + default:; } bounds.error = ERROR(parameter_unsupported); @@ -1785,6 +1892,12 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; + case ZSTD_d_disableHuffmanAssembly: + *value = (int)dctx->disableHufAsm; + return 0; + case ZSTD_d_maxBlockSize: + *value = dctx->maxBlockSizeParam; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1818,6 +1931,14 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; + case ZSTD_d_disableHuffmanAssembly: + CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); + dctx->disableHufAsm = value != 0; + return 0; + case ZSTD_d_maxBlockSize: + if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value); + dctx->maxBlockSizeParam = value; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1829,6 +1950,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; + dctx->isFrameDecompression = 1; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { @@ -1845,11 +1967,17 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) return ZSTD_sizeof_DCtx(dctx); } -size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax) { - size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/ - unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2); + size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax); + /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block + * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing + * the block at the beginning of the output buffer, and maintain a full window. + * + * We need another blockSize worth of buffer so that we can store split + * literals at the end of the block without overwriting the extDict window. + */ + unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, @@ -1857,6 +1985,11 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long return minRBSize; } +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +{ + return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX); +} + size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); @@ -1868,7 +2001,7 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize) size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ - ZSTD_frameHeader zfh; + ZSTD_FrameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; RETURN_ERROR_IF(err>0, srcSize_wrong, ""); @@ -1963,6 +2096,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); + assert(zds != NULL); RETURN_ERROR_IF( input->pos > input->size, srcSize_wrong, @@ -2052,12 +2186,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { - size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart)); + size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format); if (cSize <= (size_t)(iend-istart)) { /* shortcut : using single-pass mode */ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; - DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()"); + assert(istart != NULL); ip = istart + cSize; op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ zds->expected = 0; @@ -2078,7 +2213,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB DEBUGLOG(4, "Consume header"); FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); - if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + if (zds->format == ZSTD_f_zstd1 + && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { @@ -2094,11 +2230,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, frameParameter_windowTooLarge, ""); + if (zds->maxBlockSizeParam != 0) + zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam); /* Adapt buffer sizes to frame header instructions */ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered - ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize) + ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) : 0; ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); @@ -2143,6 +2281,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); + assert(ip != NULL); ip += neededInSize; /* Function modifies the stage so we must break */ break; @@ -2157,7 +2296,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ - assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { @@ -2166,8 +2305,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } - ip += loadedSize; - zds->inPos += loadedSize; + if (loadedSize != 0) { + /* ip may be NULL */ + ip += loadedSize; + zds->inPos += loadedSize; + } if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ @@ -2214,8 +2356,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { - RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); - RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); + RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); + RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); assert(0); } } else { @@ -2252,11 +2394,17 @@ size_t ZSTD_decompressStream_simpleArgs ( void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; - /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c index e1ff21582c5..56e1f9eda28 100644 --- a/lib/decompress/zstd_decompress_block.c +++ b/lib/decompress/zstd_decompress_block.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,15 +16,13 @@ *********************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/compiler.h" /* prefetch */ -#include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ +#include #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/zstd_internal.h" #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ -#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" #include "../common/bits.h" /* ZSTD_highbit32 */ @@ -52,6 +50,13 @@ static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); } * Block decoding ***************************************************************/ +static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx) +{ + size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX; + assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX); + return blockSizeMax; +} + /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, @@ -74,41 +79,49 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) { - if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) - { - /* room for litbuffer to fit without read faulting */ - dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH; + size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); + assert(litSize <= blockSizeMax); + assert(dctx->isFrameDecompression || streaming == not_streaming); + assert(expectedWriteSize <= blockSizeMax); + if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) { + /* If we aren't streaming, we can just put the literals after the output + * of the current block. We don't need to worry about overwriting the + * extDict of our window, because it doesn't exist. + * So if we have space after the end of the block, just put it there. + */ + dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH; dctx->litBufferEnd = dctx->litBuffer + litSize; dctx->litBufferLocation = ZSTD_in_dst; - } - else if (litSize > ZSTD_LITBUFFEREXTRASIZE) - { - /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) { + /* Literals fit entirely within the extra buffer, put them there to avoid + * having to split the literals. + */ + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; + } else { + assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE); + /* Literals must be split between the output block and the extra lit + * buffer. We fill the extra lit buffer with the tail of the literals, + * and put the rest of the literals at the end of the block, with + * WILDCOPY_OVERLENGTH of buffer room to allow for overreads. + * This MUST not write more than our maxBlockSize beyond dst, because in + * streaming mode, that could overwrite part of our extDict window. + */ if (splitImmediately) { /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; - } - else { + } else { /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; } dctx->litBufferLocation = ZSTD_split; - } - else - { - /* fits entirely within litExtraBuffer, so no split is necessary */ - dctx->litBuffer = dctx->litExtraBuffer; - dctx->litBufferEnd = dctx->litBuffer + litSize; - dctx->litBufferLocation = ZSTD_not_in_dst; + assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize); } } -/* Hidden declaration for fullbench */ -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize, - void* dst, size_t dstCapacity, const streaming_operation streaming); /*! ZSTD_decodeLiteralsBlock() : * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current @@ -117,7 +130,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, * * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, +static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ void* dst, size_t dstCapacity, const streaming_operation streaming) { @@ -125,7 +138,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); { const BYTE* const istart = (const BYTE*) src; - symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3); + size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); switch(litEncType) { @@ -141,7 +155,10 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; - size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); + int const flags = 0 + | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) + | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -165,7 +182,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); + if (!singleStream) + RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, + "Not enough literals (%zu) for the 4-streams mode (min %u)", + litSize, MIN_LITERALS_FOR_4_STREAMS); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); @@ -177,13 +198,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, if (litEncType==set_repeat) { if (singleStream) { - hufSuccess = HUF_decompress1X_usingDTable_bmi2( + hufSuccess = HUF_decompress1X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } else { - hufSuccess = HUF_decompress4X_usingDTable_bmi2( + assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); + hufSuccess = HUF_decompress4X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } } else { if (singleStream) { @@ -191,26 +213,28 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace)); + sizeof(dctx->workspace), flags); #else - hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( + hufSuccess = HUF_decompress1X1_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); #endif } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( + hufSuccess = HUF_decompress4X_hufOnly_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); } } if (dctx->litBufferLocation == ZSTD_split) { + assert(litSize > ZSTD_LITBUFFEREXTRASIZE); ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; + assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax); } RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); @@ -225,7 +249,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; - size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -244,6 +268,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ @@ -272,7 +297,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; - size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); + size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -291,7 +316,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); if (dctx->litBufferLocation == ZSTD_split) @@ -313,6 +338,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } } +/* Hidden declaration for fullbench */ +size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, + const void* src, size_t srcSize, + void* dst, size_t dstCapacity); +size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, + const void* src, size_t srcSize, + void* dst, size_t dstCapacity) +{ + dctx->isFrameDecompression = 0; + return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming); +} + /* Default FSE distribution tables. * These are pre-calculated FSE decoding tables using default distributions as defined in specification : * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions @@ -320,7 +357,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, * - start from default distributions, present in /lib/common/zstd_internal.h * - generate tables normally, using ZSTD_buildFSETable() * - printout the content of tables - * - pretify output, report below, test with fuzzer to ensure it's correct */ + * - prettify output, report below, test with fuzzer to ensure it's correct */ /* Default FSE distribution table for Literal Lengths */ static const ZSTD_seqSymbol LL_defaultDTable[(1<=0); + pos += (size_t)n; } } /* Now we spread those positions across the table. @@ -606,7 +644,7 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, * @return : nb bytes read from src, * or an error code if it fails */ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr, - symbolEncodingType_e type, unsigned max, U32 maxLog, + SymbolEncodingType_e type, unsigned max, U32 maxLog, const void* src, size_t srcSize, const U32* baseValue, const U8* nbAdditionalBits, const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable, @@ -667,11 +705,6 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, /* SeqHead */ nbSeq = *ip++; - if (!nbSeq) { - *nbSeqPtr=0; - RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, ""); - return 1; - } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); @@ -684,17 +717,26 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, } *nbSeqPtr = nbSeq; + if (nbSeq == 0) { + /* No sequence : section ends immediately */ + RETURN_ERROR_IF(ip != iend, corruption_detected, + "extraneous data present in the Sequences section"); + return (size_t)(ip - istart); + } + /* FSE table descriptors */ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ - { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); - symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); - symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */ + { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6); + SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3); + SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ + assert(ip <= iend); { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, @@ -704,9 +746,10 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, ip += llhSize; } + assert(ip <= iend); { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, @@ -716,9 +759,10 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, ip += ofhSize; } + assert(ip <= iend); { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, - ip, iend-ip, + ip, (size_t)(iend-ip), ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, @@ -729,7 +773,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, } } - return ip-istart; + return (size_t)(ip-istart); } @@ -759,7 +803,8 @@ typedef struct { * Precondition: *ip <= *op * Postcondition: *op - *op >= 8 */ -HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) +{ assert(*ip <= *op); if (offset < 8) { /* close range match, overlap */ @@ -792,7 +837,9 @@ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ -static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { +static void +ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, size_t length, ZSTD_overlap_e ovtype) +{ ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; @@ -807,7 +854,8 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt if (ovtype == ZSTD_overlap_src_before_dst) { /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ assert(length >= 8); - ZSTD_overlapCopy8(&op, &ip, diff); + assert(diff > 0); + ZSTD_overlapCopy8(&op, &ip, (size_t)diff); length -= 8; assert(op - ip >= 8); assert(op <= oend); @@ -821,7 +869,7 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt if (op <= oend_w) { /* Wildcopy until we get close to the end. */ assert(oend > oend_w); - ZSTD_wildcopy(op, ip, oend_w - op, ovtype); + ZSTD_wildcopy(op, ip, (size_t)(oend_w - op), ovtype); ip += oend_w - op; op += oend_w - op; } @@ -832,7 +880,8 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt /* ZSTD_safecopyDstBeforeSrc(): * This version allows overlap with dst before src, or handles the non-overlap case with dst after src * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ -static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) { +static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, size_t length) +{ ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; @@ -843,7 +892,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length } if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { - ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); + ZSTD_wildcopy(op, ip, (size_t)(oend - WILDCOPY_OVERLENGTH - op), ZSTD_no_overlap); ip += oend - WILDCOPY_OVERLENGTH - op; op += oend - WILDCOPY_OVERLENGTH - op; } @@ -861,6 +910,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). */ FORCE_NOINLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceEnd(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, @@ -893,11 +943,11 @@ size_t ZSTD_execSequenceEnd(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; } } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); @@ -908,6 +958,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op, * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. */ FORCE_NOINLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, BYTE* const oend, const BYTE* const oend_w, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, @@ -941,11 +992,11 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; } } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); @@ -953,6 +1004,7 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, } HINT_INLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, @@ -1013,11 +1065,11 @@ size_t ZSTD_execSequence(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; + { size_t const length1 = (size_t)(dictEnd - match); + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; } } /* Match within prefix of 1 or more bytes */ @@ -1034,7 +1086,7 @@ size_t ZSTD_execSequence(BYTE* op, * longer than literals (in general). In silesia, ~10% of matches are longer * than 16 bytes. */ - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); + ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap); return sequenceLength; } assert(sequence.offset < WILDCOPY_VECLEN); @@ -1045,12 +1097,13 @@ size_t ZSTD_execSequence(BYTE* op, /* If the match length is > 8 bytes, then continue with the wildcopy. */ if (sequence.matchLength > 8) { assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); + ZSTD_wildcopy(op, match, sequence.matchLength - 8, ZSTD_overlap_src_before_dst); } return sequenceLength; } HINT_INLINE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, BYTE* const oend, const BYTE* const oend_w, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, @@ -1105,7 +1158,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = (size_t)(dictEnd - match); ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -1125,7 +1178,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, * longer than literals (in general). In silesia, ~10% of matches are longer * than 16 bytes. */ - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); + ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap); return sequenceLength; } assert(sequence.offset < WILDCOPY_VECLEN); @@ -1136,7 +1189,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, /* If the match length is > 8 bytes, then continue with the wildcopy. */ if (sequence.matchLength > 8) { assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); + ZSTD_wildcopy(op, match, sequence.matchLength-8, ZSTD_overlap_src_before_dst); } return sequenceLength; } @@ -1162,7 +1215,7 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum - * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) + * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32 * bits before reloading. This value is the maximum number of bytes we read * after reloading when we are decoding long offsets. */ @@ -1173,19 +1226,29 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; +/** + * ZSTD_decodeSequence(): + * @p longOffsets : tells the decoder to reload more bit while decoding large offsets + * only used in 32-bit mode + * @return : Sequence (litL + matchL + offset) + */ FORCE_INLINE_TEMPLATE seq_t -ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq) { seq_t seq; +#if defined(__aarch64__) + size_t prevOffset0 = seqState->prevOffset[0]; + size_t prevOffset1 = seqState->prevOffset[1]; + size_t prevOffset2 = seqState->prevOffset[2]; /* - * ZSTD_seqSymbol is a structure with a total of 64 bits wide. So it can be - * loaded in one operation and extracted its fields by simply shifting or - * bit-extracting on aarch64. + * ZSTD_seqSymbol is a 64 bits wide structure. + * It can be loaded in one operation + * and its fields extracted by simply shifting or bit-extracting on aarch64. * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh * operations that cause performance drop. This can be avoided by using this * ZSTD_memcpy hack. */ -#if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__)) +# if defined(__GNUC__) && !defined(__clang__) ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS; ZSTD_seqSymbol* const llDInfo = &llDInfoS; ZSTD_seqSymbol* const mlDInfo = &mlDInfoS; @@ -1193,11 +1256,102 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); -#else +# else + const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; + const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; + const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; +# endif + (void)longOffsets; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { U32 const ofBase = ofDInfo->baseValue; + BYTE const llBits = llDInfo->nbAdditionalBits; + BYTE const mlBits = mlDInfo->nbAdditionalBits; + BYTE const ofBits = ofDInfo->nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; + + U16 const llNext = llDInfo->nextState; + U16 const mlNext = mlDInfo->nextState; + U16 const ofNext = ofDInfo->nextState; + U32 const llnbBits = llDInfo->nbBits; + U32 const mlnbBits = mlDInfo->nbBits; + U32 const ofnbBits = ofDInfo->nbBits; + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); + /* As GCC has better branch and block analyzers, sometimes it is only + * valuable to mark likeliness for Clang. + */ + + /* sequence */ + { size_t offset; + if (ofBits > 1) { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + prevOffset2 = prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset; + } else { + U32 const ll0 = (llDInfo->baseValue == 0); + if (LIKELY((ofBits == 0))) { + if (ll0) { + offset = prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset; + } else { + offset = prevOffset0; + } + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset == 1) ? prevOffset1 + : (offset == 3) ? prevOffset0 - 1 + : (offset >= 2) ? prevOffset2 + : prevOffset0; + /* 0 is not valid: input corrupted => force offset to -1 => + * corruption detected at execSequence. + */ + temp -= !temp; + prevOffset2 = (offset == 1) ? prevOffset2 : prevOffset1; + prevOffset1 = prevOffset0; + prevOffset0 = offset = temp; + } } } + seq.offset = offset; + } + + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + if (!isLastSeq) { + /* Don't update FSE state for last sequence. */ + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ + BIT_reloadDStream(&seqState->DStream); + } + } + seqState->prevOffset[0] = prevOffset0; + seqState->prevOffset[1] = prevOffset1; + seqState->prevOffset[2] = prevOffset2; +#else /* !defined(__aarch64__) */ const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; -#endif seq.matchLength = mlDInfo->baseValue; seq.litLength = llDInfo->baseValue; { U32 const ofBase = ofDInfo->baseValue; @@ -1212,28 +1366,29 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) U32 const llnbBits = llDInfo->nbBits; U32 const mlnbBits = mlDInfo->nbBits; U32 const ofnbBits = ofDInfo->nbBits; - /* - * As gcc has better branch and block analyzers, sometimes it is only - * valuable to mark likeliness for clang, it gives around 3-4% of - * performance. + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); + /* As GCC has better branch and block analyzers, sometimes it is only + * valuable to mark likeliness for Clang. */ /* sequence */ { size_t offset; - #if defined(__clang__) - if (LIKELY(ofBits > 1)) { - #else if (ofBits > 1) { - #endif ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + offset += BIT_readBitsFast(&seqState->DStream, extraBits); } else { offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); @@ -1250,7 +1405,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) } else { offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; @@ -1258,11 +1413,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) seq.offset = offset; } - #if defined(__clang__) - if (UNLIKELY(mlBits > 0)) - #else if (mlBits > 0) - #endif seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) @@ -1272,11 +1423,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - #if defined(__clang__) - if (UNLIKELY(llBits > 0)) - #else if (llBits > 0) - #endif seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); if (MEM_32bits()) @@ -1285,17 +1432,23 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ + if (!isLastSeq) { + /* Don't update FSE state for last sequence. */ + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ + BIT_reloadDStream(&seqState->DStream); + } } +#endif /* defined(__aarch64__) */ return seq; } -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) +#if DEBUGLEVEL >= 1 +static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) { size_t const windowSize = dctx->fParams.windowSize; /* No dictionary used. */ @@ -1309,30 +1462,33 @@ MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefix /* Dictionary is active. */ return 1; } +#endif -MEM_STATIC void ZSTD_assertValidSequence( +static void ZSTD_assertValidSequence( ZSTD_DCtx const* dctx, BYTE const* op, BYTE const* oend, seq_t const seq, BYTE const* prefixStart, BYTE const* virtualStart) { #if DEBUGLEVEL >= 1 - size_t const windowSize = dctx->fParams.windowSize; - size_t const sequenceSize = seq.litLength + seq.matchLength; - BYTE const* const oLitEnd = op + seq.litLength; - DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - assert(op <= oend); - assert((size_t)(oend - op) >= sequenceSize); - assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX); - if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { - size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); - /* Offset must be within the dictionary. */ - assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); - assert(seq.offset <= windowSize + dictSize); - } else { - /* Offset must be within our window. */ - assert(seq.offset <= windowSize); + if (dctx->isFrameDecompression) { + size_t const windowSize = dctx->fParams.windowSize; + size_t const sequenceSize = seq.litLength + seq.matchLength; + BYTE const* const oLitEnd = op + seq.litLength; + DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + assert(op <= oend); + assert((size_t)(oend - op) >= sequenceSize); + assert(sequenceSize <= ZSTD_blockSizeMax(dctx)); + if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { + size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); + /* Offset must be within the dictionary. */ + assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); + assert(seq.offset <= windowSize + dictSize); + } else { + /* Offset must be within our window. */ + assert(seq.offset <= windowSize); + } } #else (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart; @@ -1348,29 +1504,25 @@ DONT_VECTORIZE ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + maxDstSize; + BYTE* const oend = (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize); BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer"); - (void)frame; + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq); - /* Regen sequences */ + /* Literals are split between internal buffer & output buffer */ if (nbSeq) { seqState_t seqState; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); @@ -1383,8 +1535,7 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, BIT_DStream_completed < BIT_DStream_overflow); /* decompress without overrunning litPtr begins */ - { - seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ /* Align the decompression loop to 32 + 16 bytes. * * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression @@ -1446,27 +1597,27 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, #endif /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ - for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) { - size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + for ( ; nbSeq; nbSeq--) { + sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); + if (litPtr + sequence.litLength > dctx->litBufferEnd) break; + { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif - if (UNLIKELY(ZSTD_isError(oneSeqSize))) - return oneSeqSize; - DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - op += oneSeqSize; - if (UNLIKELY(!--nbSeq)) - break; - BIT_reloadDStream(&(seqState.DStream)); - sequence = ZSTD_decodeSequence(&seqState, isLongOffset); - } + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + } } + DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)"); /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ if (nbSeq > 0) { - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - if (leftoverLit) - { + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength); + if (leftoverLit) { RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); sequence.litLength -= leftoverLit; @@ -1475,24 +1626,22 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, litPtr = dctx->litExtraBuffer; litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; dctx->litBufferLocation = ZSTD_not_in_dst; - { - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif if (UNLIKELY(ZSTD_isError(oneSeqSize))) return oneSeqSize; DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); op += oneSeqSize; - if (--nbSeq) - BIT_reloadDStream(&(seqState.DStream)); } + nbSeq--; } } - if (nbSeq > 0) /* there is remaining lit from extra buffer */ - { + if (nbSeq > 0) { + /* there is remaining lit from extra buffer */ #if defined(__GNUC__) && defined(__x86_64__) __asm__(".p2align 6"); @@ -1511,35 +1660,34 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, # endif #endif - for (; ; ) { - seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + for ( ; nbSeq ; nbSeq--) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif if (UNLIKELY(ZSTD_isError(oneSeqSize))) return oneSeqSize; DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); op += oneSeqSize; - if (UNLIKELY(!--nbSeq)) - break; - BIT_reloadDStream(&(seqState.DStream)); } } /* check if reached exact end */ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); RETURN_ERROR_IF(nbSeq, corruption_detected, ""); - RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); + DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed); + RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ - if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ - { - size_t const lastLLSize = litBufferEnd - litPtr; + if (dctx->litBufferLocation == ZSTD_split) { + /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize); RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memmove(op, litPtr, lastLLSize); @@ -1549,15 +1697,17 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; dctx->litBufferLocation = ZSTD_not_in_dst; } - { size_t const lastLLSize = litBufferEnd - litPtr; + /* copy last literals from internal buffer */ + { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize); RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memcpy(op, litPtr, lastLLSize); op += lastLLSize; - } - } + } } - return op-ostart; + DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); + return (size_t)(op - ostart); } FORCE_INLINE_TEMPLATE size_t @@ -1565,13 +1715,12 @@ DONT_VECTORIZE ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer; + BYTE* const oend = (dctx->litBufferLocation == ZSTD_not_in_dst) ? + (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize) : + dctx->litBuffer; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; @@ -1579,7 +1728,6 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); - (void)frame; /* Regen sequences */ if (nbSeq) { @@ -1587,18 +1735,13 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, dctx->fseEntropy = 1; { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); assert(dst != NULL); - ZSTD_STATIC_ASSERT( - BIT_DStream_unfinished < BIT_DStream_completed && - BIT_DStream_endOfBuffer < BIT_DStream_completed && - BIT_DStream_completed < BIT_DStream_overflow); - #if defined(__GNUC__) && defined(__x86_64__) __asm__(".p2align 6"); __asm__("nop"); @@ -1613,74 +1756,71 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, # endif #endif - for ( ; ; ) { - seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + for ( ; nbSeq ; nbSeq--) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); + ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif if (UNLIKELY(ZSTD_isError(oneSeqSize))) return oneSeqSize; DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); op += oneSeqSize; - if (UNLIKELY(!--nbSeq)) - break; - BIT_reloadDStream(&(seqState.DStream)); } /* check if reached exact end */ - DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq); - RETURN_ERROR_IF(nbSeq, corruption_detected, ""); - RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); + assert(nbSeq == 0); + RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; + { size_t const lastLLSize = (size_t)(litEnd - litPtr); + DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize); RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memcpy(op, litPtr, lastLLSize); op += lastLLSize; - } - } + } } - return op-ostart; + DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); + return (size_t)(op - ostart); } static size_t ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } static size_t ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -FORCE_INLINE_TEMPLATE size_t -ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence, +FORCE_INLINE_TEMPLATE + +size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence, const BYTE* const prefixStart, const BYTE* const dictEnd) { prefetchPos += sequence.litLength; { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; - const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. - * No consequence though : memory address is only used for prefetching, not for dereferencing */ - PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ + /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : memory address is only used for prefetching, not for dereferencing */ + const BYTE* const match = (const BYTE*)ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (ptrdiff_t)prefetchPos), (ptrdiff_t)sequence.offset); + PREFETCH_L1(match); PREFETCH_L1(ZSTD_wrappedPtrAdd(match, CACHELINE_SIZE)); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ } return prefetchPos + sequence.matchLength; } @@ -1694,20 +1834,18 @@ ZSTD_decompressSequencesLong_body( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize; + BYTE* const oend = (dctx->litBufferLocation == ZSTD_in_dst) ? + dctx->litBuffer : + (BYTE*)ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize); BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - (void)frame; /* Regen sequences */ if (nbSeq) { @@ -1723,33 +1861,29 @@ ZSTD_decompressSequencesLong_body( dctx->fseEntropy = 1; { int i; for (i=0; ientropy.rep[i]; } assert(dst != NULL); - assert(iend >= ip); RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), + ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); /* prepare in advance */ - for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNblitBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) - { + if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - if (leftoverLit) - { + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + if (leftoverLit) { RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; @@ -1758,26 +1892,26 @@ ZSTD_decompressSequencesLong_body( litPtr = dctx->litExtraBuffer; litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; dctx->litBufferLocation = ZSTD_not_in_dst; - oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); + assert(!ZSTD_isError(oneSeqSize)); + ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); #endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); - sequences[seqNb & STORED_SEQS_MASK] = sequence; - op += oneSeqSize; - } + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } } else { /* lit buffer is either wholly contained in first or second split, or not split at all*/ - oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); + ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); #endif if (ZSTD_isError(oneSeqSize)) return oneSeqSize; @@ -1786,17 +1920,16 @@ ZSTD_decompressSequencesLong_body( op += oneSeqSize; } } - RETURN_ERROR_IF(seqNblitBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) - { - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - if (leftoverLit) - { + if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { + const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr); + assert(dctx->litBufferEnd >= litPtr); + if (leftoverLit) { RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); sequence->litLength -= leftoverLit; @@ -1805,11 +1938,10 @@ ZSTD_decompressSequencesLong_body( litPtr = dctx->litExtraBuffer; litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; dctx->litBufferLocation = ZSTD_not_in_dst; - { - size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); + ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); #endif if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1822,7 +1954,7 @@ ZSTD_decompressSequencesLong_body( ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); + ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); #endif if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1834,9 +1966,9 @@ ZSTD_decompressSequencesLong_body( } /* last literal segment */ - if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */ - { - size_t const lastLLSize = litBufferEnd - litPtr; + if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ + size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + assert(litBufferEnd >= litPtr); RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memmove(op, litPtr, lastLLSize); @@ -1845,7 +1977,8 @@ ZSTD_decompressSequencesLong_body( litPtr = dctx->litExtraBuffer; litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; } - { size_t const lastLLSize = litBufferEnd - litPtr; + { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); + assert(litBufferEnd >= litPtr); RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memmove(op, litPtr, lastLLSize); @@ -1853,17 +1986,16 @@ ZSTD_decompressSequencesLong_body( } } - return op-ostart; + return (size_t)(op - ostart); } static size_t ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ @@ -1877,20 +2009,18 @@ DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } static BMI2_TARGET_ATTRIBUTE size_t DONT_VECTORIZE ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ @@ -1899,50 +2029,40 @@ static BMI2_TARGET_ATTRIBUTE size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { - return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #endif /* DYNAMIC_BMI2 */ -typedef size_t (*ZSTD_decompressSequences_t)( - ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame); - #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } static size_t ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); #if DYNAMIC_BMI2 if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif - return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ @@ -1957,65 +2077,110 @@ static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame) + const ZSTD_longOffset_e isLongOffset) { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif - return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ +/** + * @returns The total size of the history referenceable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ +static size_t ZSTD_totalHistorySize(void* curPtr, const void* virtualStart) +{ + return (size_t)((char*)curPtr - (const char*)virtualStart); +} -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) -/* ZSTD_getLongOffsetsShare() : +typedef struct { + unsigned longOffsetShare; + unsigned maxNbAdditionalBits; +} ZSTD_OffsetInfo; + +/* ZSTD_getOffsetInfo() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< 22) total += 1; + ZSTD_OffsetInfo info = {0, 0}; + /* If nbSeq == 0, then the offTable is uninitialized, but we have + * no sequences, so both values should be 0. + */ + if (nbSeq != 0) { + const void* ptr = offTable; + U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog; + const ZSTD_seqSymbol* table = offTable + 1; + U32 const max = 1 << tableLog; + U32 u; + DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog); + + assert(max <= (1 << OffFSELog)); /* max not too large */ + for (u=0; u 22) info.longOffsetShare += 1; + } + + assert(tableLog <= OffFSELog); + info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ } - assert(tableLog <= OffFSELog); - total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + return info; +} - return total; +/** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ +static size_t ZSTD_maxShortOffset(void) +{ + if (MEM_64bits()) { + /* We can decode any offset without reloading bits. + * This might change if the max window size grows. + */ + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + return (size_t)-1; + } else { + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; + size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; + assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); + return maxOffset; + } } -#endif size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame, const streaming_operation streaming) + const void* src, size_t srcSize, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. - * We don't expect that to be the case in 64-bit mode. - * In block mode, window size is not known, so we have to be conservative. - * (note: but it could be evaluated from current-lowLimit) - */ - ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); - DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); - - RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); + DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize); + + /* Note : the wording of the specification + * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx). + * This generally does not happen, as it makes little sense, + * since an uncompressed block would feature same size and have no decompression cost. + * Also, note that decoder from reference libzstd before < v1.5.4 + * would consider this edge case as an error. + * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx) + * for broader compatibility with the deployed ecosystem of zstd decoders */ + RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, ""); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); @@ -2027,6 +2192,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, /* Build Decoding Tables */ { + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); + size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)blockSizeMax), (BYTE const*)dctx->virtualStart); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. @@ -2034,6 +2216,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; +#else + /* Set to 1 to avoid computing offset info if we don't need to. + * Otherwise this value is ignored. + */ + int usePrefetchDecoder = 1; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); @@ -2041,40 +2228,55 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, ip += seqHSize; srcSize -= seqHSize; - RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall, + "invalid dst"); -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if ( !usePrefetchDecoder - && (!frame || (dctx->fParams.windowSize > (1<<24))) - && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ - U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); - U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ - usePrefetchDecoder = (shareLongOffsets >= minShare); + /* If we could potentially have long offsets, or we might want to use the prefetch decoder, + * compute information about the share of long offsets, and the maximum nbAdditionalBits. + * NOTE: could probably use a larger nbSeq limit + */ + if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { + ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); + if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { + /* If isLongOffset, but the maximum number of additional bits that we see in our table is small + * enough, then we know it is impossible to have too long an offset in this block, so we can + * use the regular offset decoder. + */ + isLongOffset = ZSTD_lo_isRegularOffset; + } + if (!usePrefetchDecoder) { + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + usePrefetchDecoder = (info.longOffsetShare >= minShare); + } } -#endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if (usePrefetchDecoder) + if (usePrefetchDecoder) { +#else + (void)usePrefetchDecoder; + { #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT - return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); #endif + } #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ if (dctx->litBufferLocation == ZSTD_split) - return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); else - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); #endif } } +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) { if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ @@ -2086,13 +2288,24 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) } -size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { size_t dSize; + dctx->isFrameDecompression = 0; ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming); + FORWARD_IF_ERROR(dSize, ""); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } + + +/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); +} diff --git a/lib/decompress/zstd_decompress_block.h b/lib/decompress/zstd_decompress_block.h index c61a9d0c4b3..ab152404ba0 100644 --- a/lib/decompress/zstd_decompress_block.h +++ b/lib/decompress/zstd_decompress_block.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -47,7 +47,7 @@ typedef enum { */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame, const streaming_operation streaming); + const void* src, size_t srcSize, const streaming_operation streaming); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) @@ -64,5 +64,10 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, unsigned tableLog, void* wksp, size_t wkspSize, int bmi2); +/* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + #endif /* ZSTD_DEC_BLOCK_H */ diff --git a/lib/decompress/zstd_decompress_internal.h b/lib/decompress/zstd_decompress_internal.h index 91e9dceb5d3..e4bffdbc6cf 100644 --- a/lib/decompress/zstd_decompress_internal.h +++ b/lib/decompress/zstd_decompress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -136,7 +136,7 @@ struct ZSTD_DCtx_s const void* virtualStart; /* virtual start of previous segment if it was just before current one */ const void* dictEnd; /* end of previous segment */ size_t expected; - ZSTD_frameHeader fParams; + ZSTD_FrameHeader fParams; U64 processedCSize; U64 decodedSize; blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ @@ -153,7 +153,8 @@ struct ZSTD_DCtx_s size_t litSize; size_t rleSize; size_t staticSize; -#if DYNAMIC_BMI2 != 0 + int isFrameDecompression; +#if DYNAMIC_BMI2 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ #endif @@ -165,6 +166,8 @@ struct ZSTD_DCtx_s ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */ + int disableHufAsm; + int maxBlockSizeParam; /* streaming */ ZSTD_dStreamStage streamStage; @@ -208,11 +211,11 @@ struct ZSTD_DCtx_s }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) { -#if DYNAMIC_BMI2 != 0 - return dctx->bmi2; +#if DYNAMIC_BMI2 + return dctx->bmi2; #else (void)dctx; - return 0; + return 0; #endif } diff --git a/lib/deprecated/zbuff.h b/lib/deprecated/zbuff.h index b83ea0fed58..a968245b36a 100644 --- a/lib/deprecated/zbuff.h +++ b/lib/deprecated/zbuff.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_common.c b/lib/deprecated/zbuff_common.c index e7d01a08180..5a2f2db354f 100644 --- a/lib/deprecated/zbuff_common.c +++ b/lib/deprecated/zbuff_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_compress.c b/lib/deprecated/zbuff_compress.c index 51cf158c4ad..1d8682150b2 100644 --- a/lib/deprecated/zbuff_compress.c +++ b/lib/deprecated/zbuff_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/deprecated/zbuff_decompress.c b/lib/deprecated/zbuff_decompress.c index d73c0f35fac..12a66af7412 100644 --- a/lib/deprecated/zbuff_decompress.c +++ b/lib/deprecated/zbuff_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,6 +13,8 @@ /* ************************************* * Dependencies ***************************************/ +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* suppress warning on ZSTD_initDStream_usingDict */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c index 724675d304b..06d1cb93a5c 100644 --- a/lib/dictBuilder/cover.c +++ b/lib/dictBuilder/cover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,8 +21,21 @@ /*-************************************* * Dependencies ***************************************/ +/* qsort_r is an extension. + * + * Android NDK does not ship qsort_r(). + */ +#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__CYGWIN__) || defined(__MSYS__) +# ifndef _GNU_SOURCE +# define _GNU_SOURCE +# endif +#endif + +#define __STDC_WANT_LIB_EXT1__ 1 /* request C11 Annex K, which includes qsort_s() */ + #include /* fprintf */ -#include /* malloc, free, qsort */ +#include /* malloc, free, qsort_r */ + #include /* memset */ #include /* clock */ @@ -30,9 +43,10 @@ # define ZDICT_STATIC_LINKING_ONLY #endif +#include "../common/debug.h" /* DEBUG_STATIC_ASSERT */ #include "../common/mem.h" /* read */ -#include "../common/pool.h" -#include "../common/threading.h" +#include "../common/pool.h" /* POOL_ctx */ +#include "../common/threading.h" /* ZSTD_pthread_mutex_t */ #include "../common/zstd_internal.h" /* includes zstd.h */ #include "../common/bits.h" /* ZSTD_highbit32 */ #include "../zdict.h" @@ -51,40 +65,58 @@ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define COVER_DEFAULT_SPLITPOINT 1.0 +/** + * Select the qsort() variant used by cover + */ +#define ZDICT_QSORT_MIN 0 +#define ZDICT_QSORT_C90 ZDICT_QSORT_MIN +#define ZDICT_QSORT_GNU 1 +#define ZDICT_QSORT_APPLE 2 +#define ZDICT_QSORT_MSVC 3 +#define ZDICT_QSORT_C11 ZDICT_QSORT_MAX +#define ZDICT_QSORT_MAX 4 + +#ifndef ZDICT_QSORT +# if defined(__APPLE__) +# define ZDICT_QSORT ZDICT_QSORT_APPLE /* uses qsort_r() with a different order for parameters */ +# elif (defined(__linux__) && !defined(__ANDROID__)) || defined(__CYGWIN__) || defined(__MSYS__) +# define ZDICT_QSORT ZDICT_QSORT_GNU /* uses qsort_r() */ +# elif defined(_WIN32) && defined(_MSC_VER) +# define ZDICT_QSORT ZDICT_QSORT_MSVC /* uses qsort_s() with a different order for parameters */ +# elif defined(STDC_LIB_EXT1) && (STDC_LIB_EXT1 > 0) /* C11 Annex K */ +# define ZDICT_QSORT ZDICT_QSORT_C11 /* uses qsort_s() */ +# else +# define ZDICT_QSORT ZDICT_QSORT_C90 /* uses standard qsort() which is not re-entrant (requires global variable) */ +# endif +#endif + + /*-************************************* * Console display +* +* Captures the `displayLevel` variable in the local scope. ***************************************/ -#ifndef LOCALDISPLAYLEVEL -static int g_displayLevel = 0; -#endif #undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } -#undef LOCALDISPLAYLEVEL -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ +#undef DISPLAYLEVEL +#define DISPLAYLEVEL(l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#undef DISPLAYLEVEL -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) -#ifndef LOCALDISPLAYUPDATE -static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; -#endif -#undef LOCALDISPLAYUPDATE -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ +#undef DISPLAYUPDATE +#define DISPLAYUPDATE(lastUpdateTime, l, ...) \ if (displayLevel >= l) { \ - if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ + const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \ + if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \ + lastUpdateTime = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } -#undef DISPLAYUPDATE -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) /*-************************************* * Hash table @@ -179,7 +211,7 @@ static U32 *COVER_map_at(COVER_map_t *map, U32 key) { */ static void COVER_map_remove(COVER_map_t *map, U32 key) { U32 i = COVER_map_index(map, key); - COVER_map_pair_t *del = &map->data[i]; + COVER_map_pair_t* del = &map->data[i]; U32 shift = 1; if (del->value == MAP_EMPTY_VALUE) { return; @@ -230,10 +262,13 @@ typedef struct { U32 *freqs; U32 *dmerAt; unsigned d; + int displayLevel; } COVER_ctx_t; -/* We need a global context for qsort... */ +#if ZDICT_QSORT == ZDICT_QSORT_C90 +/* Use global context for non-reentrant sort functions */ static COVER_ctx_t *g_coverCtx = NULL; +#endif /*-************************************* * Helper functions @@ -276,11 +311,15 @@ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { /** * Same as COVER_cmp() except ties are broken by pointer value - * NOTE: g_coverCtx must be set to call this function. A global is required because - * qsort doesn't take an opaque pointer. */ -static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) { - int result = COVER_cmp(g_coverCtx, lp, rp); +#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE) +static int WIN_CDECL COVER_strict_cmp(void* g_coverCtx, const void* lp, const void* rp) { +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11) +static int COVER_strict_cmp(const void *lp, const void *rp, void *g_coverCtx) { +#else /* C90 fallback.*/ +static int COVER_strict_cmp(const void *lp, const void *rp) { +#endif + int result = COVER_cmp((COVER_ctx_t*)g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } @@ -289,21 +328,60 @@ static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) { /** * Faster version for d <= 8. */ -static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) { - int result = COVER_cmp8(g_coverCtx, lp, rp); +#if (ZDICT_QSORT == ZDICT_QSORT_MSVC) || (ZDICT_QSORT == ZDICT_QSORT_APPLE) +static int WIN_CDECL COVER_strict_cmp8(void* g_coverCtx, const void* lp, const void* rp) { +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) || (ZDICT_QSORT == ZDICT_QSORT_C11) +static int COVER_strict_cmp8(const void *lp, const void *rp, void *g_coverCtx) { +#else /* C90 fallback.*/ +static int COVER_strict_cmp8(const void *lp, const void *rp) { +#endif + int result = COVER_cmp8((COVER_ctx_t*)g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } +/** + * Abstract away divergence of qsort_r() parameters. + * Hopefully when C11 become the norm, we will be able + * to clean it up. + */ +static void stableSort(COVER_ctx_t *ctx) +{ + DEBUG_STATIC_ASSERT(ZDICT_QSORT_MIN <= ZDICT_QSORT && ZDICT_QSORT <= ZDICT_QSORT_MAX); +#if (ZDICT_QSORT == ZDICT_QSORT_APPLE) + qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32), + ctx, + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#elif (ZDICT_QSORT == ZDICT_QSORT_GNU) + qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#elif (ZDICT_QSORT == ZDICT_QSORT_MSVC) + qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#elif (ZDICT_QSORT == ZDICT_QSORT_C11) + qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp), + ctx); +#else /* C90 fallback.*/ + g_coverCtx = ctx; + /* TODO(cavalcanti): implement a reentrant qsort() when _r is not available. */ + qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#endif +} + /** * Returns the first pointer in [first, last) whose element does not compare * less than value. If no such element exists it returns last. */ -static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, +static const size_t *COVER_lower_bound(const size_t* first, const size_t* last, size_t value) { - size_t count = last - first; + size_t count = (size_t)(last - first); + assert(last >= first); while (count != 0) { size_t step = count / 2; const size_t *ptr = first; @@ -549,7 +627,8 @@ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { */ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint) { + unsigned d, double splitPoint, int displayLevel) +{ const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ @@ -557,6 +636,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + ctx->displayLevel = displayLevel; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { @@ -618,17 +698,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } - /* qsort doesn't take an opaque pointer, so pass as a global. - * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. - */ - g_coverCtx = ctx; -#if defined(__OpenBSD__) - mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), - (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#else - qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), - (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#endif + stableSort(ctx); } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): @@ -647,18 +717,18 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { - const double ratio = (double)nbDmers / maxDictSize; + const double ratio = (double)nbDmers / (double)maxDictSize; if (ratio >= 10) { return; } - LOCALDISPLAYLEVEL(displayLevel, 1, - "WARNING: The maximum dictionary size %u is too large " - "compared to the source size %u! " - "size(source)/size(dictionary) = %f, but it should be >= " - "10! This may lead to a subpar dictionary! We recommend " - "training on sources at least 10x, and preferably 100x " - "the size of the dictionary! \n", (U32)maxDictSize, - (U32)nbDmers, ratio); + DISPLAYLEVEL(1, + "WARNING: The maximum dictionary size %u is too large " + "compared to the source size %u! " + "size(source)/size(dictionary) = %f, but it should be >= " + "10! This may lead to a subpar dictionary! We recommend " + "training on sources at least 10x, and preferably 100x " + "the size of the dictionary! \n", (U32)maxDictSize, + (U32)nbDmers, ratio); } COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, @@ -693,6 +763,8 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3)); size_t zeroScoreRun = 0; size_t epoch; + clock_t lastUpdateTime = 0; + const int displayLevel = ctx->displayLevel; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary @@ -726,6 +798,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( + lastUpdateTime, 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } @@ -733,7 +806,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, return tail; } -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) @@ -741,9 +814,8 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( BYTE* const dict = (BYTE*)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; + const int displayLevel = (int)parameters.zParams.notificationLevel; parameters.splitPoint = 1.0; - /* Initialize global data */ - g_displayLevel = (int)parameters.zParams.notificationLevel; /* Checks */ if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); @@ -761,12 +833,12 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( /* Initialize context and activeDmers */ { size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - parameters.d, parameters.splitPoint); + parameters.d, parameters.splitPoint, displayLevel); if (ZSTD_isError(initVal)) { return initVal; } } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel); + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); COVER_ctx_destroy(&ctx); @@ -907,8 +979,10 @@ void COVER_best_start(COVER_best_t *best) { * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ -void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, - COVER_dictSelection_t selection) { +void COVER_best_finish(COVER_best_t* best, + ZDICT_cover_params_t parameters, + COVER_dictSelection_t selection) +{ void* dict = selection.dictContent; size_t compressedSize = selection.totalCompressedSize; size_t dictSize = selection.dictSize; @@ -951,9 +1025,17 @@ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, } } +static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz) +{ + COVER_dictSelection_t ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; +} + COVER_dictSelection_t COVER_dictSelectionError(size_t error) { - COVER_dictSelection_t selection = { NULL, 0, error }; - return selection; + return setDictSelection(NULL, 0, error); } unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { @@ -972,8 +1054,8 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe size_t largestCompressed = 0; BYTE* customDictContentEnd = customDictContent + dictContentSize; - BYTE * largestDictbuffer = (BYTE *)malloc(dictBufferCapacity); - BYTE * candidateDictBuffer = (BYTE *)malloc(dictBufferCapacity); + BYTE* largestDictbuffer = (BYTE*)malloc(dictBufferCapacity); + BYTE* candidateDictBuffer = (BYTE*)malloc(dictBufferCapacity); double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00; if (!largestDictbuffer || !candidateDictBuffer) { @@ -1006,9 +1088,8 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe } if (params.shrinkDict == 0) { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); - return selection; + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } largestDict = dictContentSize; @@ -1040,20 +1121,16 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe return COVER_dictSelectionError(totalCompressedSize); } - if (totalCompressedSize <= largestCompressed * regressionTolerance) { - COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; + if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) { free(largestDictbuffer); - return selection; + return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize ); } dictContentSize *= 2; } dictContentSize = largestDict; totalCompressedSize = largestCompressed; - { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; - free(candidateDictBuffer); - return selection; - } + free(candidateDictBuffer); + return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize ); } /** @@ -1084,6 +1161,7 @@ static void COVER_tryParameters(void *opaque) BYTE* const dict = (BYTE*)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32)); + const int displayLevel = ctx->displayLevel; if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); goto _cleanup; @@ -1116,7 +1194,7 @@ static void COVER_tryParameters(void *opaque) free(freqs); } -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters) @@ -1135,21 +1213,22 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned shrinkDict = 0; /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; + int displayLevel = (int)parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; + clock_t lastUpdateTime = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + DISPLAYLEVEL(1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + DISPLAYLEVEL(1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { @@ -1169,19 +1248,19 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( } /* Initialization */ COVER_best_init(&best); - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + DISPLAYLEVEL(2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ COVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + DISPLAYLEVEL(3, "d=%u\n", d); { - const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint); + /* Turn down global display level to clean up display at level 2 and below */ + const int childDisplayLevel = (displayLevel == 0) ? 0 : displayLevel - 1; + const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, childDisplayLevel); if (ZSTD_isError(initVal)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + DISPLAYLEVEL(1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; @@ -1196,9 +1275,9 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( /* Prepare the arguments */ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( sizeof(COVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + DISPLAYLEVEL(3, "k=%u\n", k); if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + DISPLAYLEVEL(1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); COVER_ctx_destroy(&ctx); POOL_free(pool); @@ -1213,7 +1292,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; - data->parameters.zParams.notificationLevel = g_displayLevel; + data->parameters.zParams.notificationLevel = (unsigned)ctx.displayLevel; /* Check the parameters */ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); @@ -1228,14 +1307,14 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( COVER_tryParameters(data); } /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); + DISPLAYUPDATE(lastUpdateTime, 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); COVER_ctx_destroy(&ctx); } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + DISPLAYLEVEL(2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; diff --git a/lib/dictBuilder/cover.h b/lib/dictBuilder/cover.h index 1aacdddd6fe..a5d7506ef6d 100644 --- a/lib/dictBuilder/cover.h +++ b/lib/dictBuilder/cover.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,14 +12,8 @@ # define ZDICT_STATIC_LINKING_ONLY #endif -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "../common/mem.h" /* read */ -#include "../common/pool.h" -#include "../common/threading.h" -#include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/threading.h" /* ZSTD_pthread_mutex_t */ +#include "../common/mem.h" /* U32, BYTE */ #include "../zdict.h" /** diff --git a/lib/dictBuilder/divsufsort.c b/lib/dictBuilder/divsufsort.c index a2870fb3ba3..b59781f0129 100644 --- a/lib/dictBuilder/divsufsort.c +++ b/lib/dictBuilder/divsufsort.c @@ -25,23 +25,22 @@ */ /*- Compiler specifics -*/ -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wshorten-64-to-32" -#endif - #if defined(_MSC_VER) -# pragma warning(disable : 4244) # pragma warning(disable : 4127) /* C4127 : Condition expression is constant */ #endif /*- Dependencies -*/ #include +#include +#include #include #include #include "divsufsort.h" +#define PTRDIFF_TO_INT(x) (assert((x) <= INT_MAX && (x) >= INT_MIN), (int)(x)) + /*- Constants -*/ #if defined(INLINE) # undef INLINE @@ -199,8 +198,8 @@ ss_isqrt(int x) { int y, e; if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } - e = (x & 0xffff0000) ? - ((x & 0xff000000) ? + e = ((unsigned)x & 0xffff0000) ? + (((unsigned)x & 0xff000000) ? 24 + lg_table[(x >> 24) & 0xff] : 16 + lg_table[(x >> 16) & 0xff]) : ((x & 0x0000ff00) ? @@ -354,7 +353,7 @@ ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { int *middle; int t; - t = last - first; + t = PTRDIFF_TO_INT(last - first); middle = first + t / 2; if(t <= 512) { @@ -409,7 +408,7 @@ ss_mintrosort(const unsigned char *T, const int *PA, int limit; int v, x = 0; - for(ssize = 0, limit = ss_ilg(last - first);;) { + for(ssize = 0, limit = ss_ilg(PTRDIFF_TO_INT(last - first));;) { if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { #if 1 < SS_INSERTIONSORT_THRESHOLD @@ -420,7 +419,7 @@ ss_mintrosort(const unsigned char *T, const int *PA, } Td = T + depth; - if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } + if(limit-- == 0) { ss_heapsort(Td, PA, first, PTRDIFF_TO_INT(last - first)); } if(limit < 0) { for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { if((x = Td[PA[*a]]) != v) { @@ -435,16 +434,16 @@ ss_mintrosort(const unsigned char *T, const int *PA, if((a - first) <= (last - a)) { if(1 < (a - first)) { STACK_PUSH(a, last, depth, -1); - last = a, depth += 1, limit = ss_ilg(a - first); + last = a, depth += 1, limit = ss_ilg(PTRDIFF_TO_INT(a - first)); } else { first = a, limit = -1; } } else { if(1 < (last - a)) { - STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); + STACK_PUSH(first, a, depth + 1, ss_ilg(PTRDIFF_TO_INT(a - first))); first = a, limit = -1; } else { - last = a, depth += 1, limit = ss_ilg(a - first); + last = a, depth += 1, limit = ss_ilg(PTRDIFF_TO_INT(a - first)); } } continue; @@ -481,9 +480,9 @@ ss_mintrosort(const unsigned char *T, const int *PA, if(a <= d) { c = b - 1; - if((s = a - first) > (t = b - a)) { s = t; } + if((s = PTRDIFF_TO_INT(a - first)) > (t = PTRDIFF_TO_INT(b - a))) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } - if((s = d - c) > (t = last - d - 1)) { s = t; } + if((s = PTRDIFF_TO_INT(d - c)) > (t = PTRDIFF_TO_INT(last - d - 1))) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } a = first + (b - a), c = last - (d - c); @@ -491,38 +490,38 @@ ss_mintrosort(const unsigned char *T, const int *PA, if((a - first) <= (last - c)) { if((last - c) <= (c - b)) { - STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(b, c, depth + 1, ss_ilg(PTRDIFF_TO_INT(c - b))); STACK_PUSH(c, last, depth, limit); last = a; } else if((a - first) <= (c - b)) { STACK_PUSH(c, last, depth, limit); - STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(b, c, depth + 1, ss_ilg(PTRDIFF_TO_INT(c - b))); last = a; } else { STACK_PUSH(c, last, depth, limit); STACK_PUSH(first, a, depth, limit); - first = b, last = c, depth += 1, limit = ss_ilg(c - b); + first = b, last = c, depth += 1, limit = ss_ilg(PTRDIFF_TO_INT(c - b)); } } else { if((a - first) <= (c - b)) { - STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(b, c, depth + 1, ss_ilg(PTRDIFF_TO_INT(c - b))); STACK_PUSH(first, a, depth, limit); first = c; } else if((last - c) <= (c - b)) { STACK_PUSH(first, a, depth, limit); - STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(b, c, depth + 1, ss_ilg(PTRDIFF_TO_INT(c - b))); first = c; } else { STACK_PUSH(first, a, depth, limit); STACK_PUSH(c, last, depth, limit); - first = b, last = c, depth += 1, limit = ss_ilg(c - b); + first = b, last = c, depth += 1, limit = ss_ilg(PTRDIFF_TO_INT(c - b)); } } } else { limit += 1; if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, last, depth); - limit = ss_ilg(last - first); + limit = ss_ilg(PTRDIFF_TO_INT(last - first)); } depth += 1; } @@ -551,7 +550,8 @@ void ss_rotate(int *first, int *middle, int *last) { int *a, *b, t; int l, r; - l = middle - first, r = last - middle; + l = PTRDIFF_TO_INT(middle - first); + r = PTRDIFF_TO_INT(last - middle); for(; (0 < l) && (0 < r);) { if(l == r) { ss_blockswap(first, middle, l); break; } if(l < r) { @@ -601,7 +601,7 @@ ss_inplacemerge(const unsigned char *T, const int *PA, for(;;) { if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } else { x = 0; p = PA + *(last - 1); } - for(a = first, len = middle - first, half = len >> 1, r = -1; + for(a = first, len = PTRDIFF_TO_INT(middle - first), half = len >> 1, r = -1; 0 < len; len = half, half >>= 1) { b = a + half; @@ -640,7 +640,7 @@ ss_mergeforward(const unsigned char *T, const int *PA, int r; bufend = buf + (middle - first) - 1; - ss_blockswap(buf, first, middle - first); + ss_blockswap(buf, first, PTRDIFF_TO_INT(middle - first)); for(t = *(a = first), b = buf, c = middle;;) { r = ss_compare(T, PA + *b, PA + *c, depth); @@ -692,7 +692,7 @@ ss_mergebackward(const unsigned char *T, const int *PA, int x; bufend = buf + (last - middle) - 1; - ss_blockswap(buf, middle, last - middle); + ss_blockswap(buf, middle, PTRDIFF_TO_INT(last - middle)); x = 0; if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } @@ -781,7 +781,7 @@ ss_swapmerge(const unsigned char *T, const int *PA, continue; } - for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; + for(m = 0, len = PTRDIFF_TO_INT(MIN(middle - first, last - middle)), half = len >> 1; 0 < len; len = half, half >>= 1) { if(ss_compare(T, PA + GETIDX(*(middle + m + half)), @@ -850,8 +850,8 @@ sssort(const unsigned char *T, const int *PA, ss_mintrosort(T, PA, first, last, depth); #else if((bufsize < SS_BLOCKSIZE) && - (bufsize < (last - first)) && - (bufsize < (limit = ss_isqrt(last - first)))) { + (bufsize < PTRDIFF_TO_INT(last - first)) && + (bufsize < (limit = ss_isqrt(PTRDIFF_TO_INT(last - first))))) { if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } buf = middle = last - limit, bufsize = limit; } else { @@ -863,7 +863,7 @@ sssort(const unsigned char *T, const int *PA, #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); #endif - curbufsize = last - (a + SS_BLOCKSIZE); + curbufsize = PTRDIFF_TO_INT(last - (a + SS_BLOCKSIZE)); curbuf = a + SS_BLOCKSIZE; if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { @@ -1016,7 +1016,7 @@ tr_pivot(const int *ISAd, int *first, int *last) { int *middle; int t; - t = last - first; + t = PTRDIFF_TO_INT(last - first); middle = first + t / 2; if(t <= 512) { @@ -1098,9 +1098,9 @@ tr_partition(const int *ISAd, if(a <= d) { c = b - 1; - if((s = a - first) > (t = b - a)) { s = t; } + if((s = PTRDIFF_TO_INT(a - first)) > (t = PTRDIFF_TO_INT(b - a))) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } - if((s = d - c) > (t = last - d - 1)) { s = t; } + if((s = PTRDIFF_TO_INT(d - c)) > (t = PTRDIFF_TO_INT(last - d - 1))) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } first += (b - a), last -= (d - c); } @@ -1117,17 +1117,17 @@ tr_copy(int *ISA, const int *SA, int *c, *d, *e; int s, v; - v = b - SA - 1; + v = PTRDIFF_TO_INT(b - SA - 1); for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; - ISA[s] = d - SA; + ISA[s] = PTRDIFF_TO_INT(d - SA); } } for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; - ISA[s] = d - SA; + ISA[s] = PTRDIFF_TO_INT(d - SA); } } } @@ -1141,13 +1141,13 @@ tr_partialcopy(int *ISA, const int *SA, int s, v; int rank, lastrank, newrank = -1; - v = b - SA - 1; + v = PTRDIFF_TO_INT(b - SA - 1); lastrank = -1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; rank = ISA[s + depth]; - if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + if(lastrank != rank) { lastrank = rank; newrank = PTRDIFF_TO_INT(d - SA); } ISA[s] = newrank; } } @@ -1155,7 +1155,7 @@ tr_partialcopy(int *ISA, const int *SA, lastrank = -1; for(e = d; first <= e; --e) { rank = ISA[*e]; - if(lastrank != rank) { lastrank = rank; newrank = e - SA; } + if(lastrank != rank) { lastrank = rank; newrank = PTRDIFF_TO_INT(e - SA); } if(newrank != rank) { ISA[*e] = newrank; } } @@ -1164,7 +1164,7 @@ tr_partialcopy(int *ISA, const int *SA, if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; rank = ISA[s + depth]; - if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + if(lastrank != rank) { lastrank = rank; newrank = PTRDIFF_TO_INT(d - SA); } ISA[s] = newrank; } } @@ -1180,23 +1180,23 @@ tr_introsort(int *ISA, const int *ISAd, int *a, *b, *c; int t; int v, x = 0; - int incr = ISAd - ISA; + int incr = PTRDIFF_TO_INT(ISAd - ISA); int limit, next; int ssize, trlink = -1; - for(ssize = 0, limit = tr_ilg(last - first);;) { + for(ssize = 0, limit = tr_ilg(PTRDIFF_TO_INT(last - first));;) { if(limit < 0) { if(limit == -1) { /* tandem repeat partition */ - tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); + tr_partition(ISAd - incr, first, first, last, &a, &b, PTRDIFF_TO_INT(last - SA - 1)); /* update ranks */ if(a < last) { - for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } + for(c = first, v = PTRDIFF_TO_INT(a - SA - 1); c < a; ++c) { ISA[*c] = v; } } if(b < last) { - for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } + for(c = a, v = PTRDIFF_TO_INT(b - SA - 1); c < b; ++c) { ISA[*c] = v; } } /* push */ @@ -1207,19 +1207,19 @@ tr_introsort(int *ISA, const int *ISAd, } if((a - first) <= (last - b)) { if(1 < (a - first)) { - STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); - last = a, limit = tr_ilg(a - first); + STACK_PUSH5(ISAd, b, last, tr_ilg(PTRDIFF_TO_INT(last - b)), trlink); + last = a, limit = tr_ilg(PTRDIFF_TO_INT(a - first)); } else if(1 < (last - b)) { - first = b, limit = tr_ilg(last - b); + first = b, limit = tr_ilg(PTRDIFF_TO_INT(last - b)); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { - STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); - first = b, limit = tr_ilg(last - b); + STACK_PUSH5(ISAd, first, a, tr_ilg(PTRDIFF_TO_INT(a - first)), trlink); + first = b, limit = tr_ilg(PTRDIFF_TO_INT(last - b)); } else if(1 < (a - first)) { - last = a, limit = tr_ilg(a - first); + last = a, limit = tr_ilg(PTRDIFF_TO_INT(a - first)); } else { STACK_POP5(ISAd, first, last, limit, trlink); } @@ -1228,26 +1228,26 @@ tr_introsort(int *ISA, const int *ISAd, /* tandem repeat copy */ a = stack[--ssize].b, b = stack[ssize].c; if(stack[ssize].d == 0) { - tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); + tr_copy(ISA, SA, first, a, b, last, PTRDIFF_TO_INT(ISAd - ISA)); } else { if(0 <= trlink) { stack[trlink].d = -1; } - tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); + tr_partialcopy(ISA, SA, first, a, b, last, PTRDIFF_TO_INT(ISAd - ISA)); } STACK_POP5(ISAd, first, last, limit, trlink); } else { /* sorted partition */ if(0 <= *first) { a = first; - do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); + do { ISA[*a] = PTRDIFF_TO_INT(a - SA); } while((++a < last) && (0 <= *a)); first = a; } if(first < last) { a = first; do { *a = ~*a; } while(*++a < 0); - next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; - if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } + next = (ISA[*a] != ISAd[*a]) ? tr_ilg(PTRDIFF_TO_INT(a - first + 1)) : -1; + if(++a < last) { for(b = first, v = PTRDIFF_TO_INT(a - SA - 1); b < a; ++b) { ISA[*b] = v; } } /* push */ - if(trbudget_check(budget, a - first)) { + if(trbudget_check(budget, PTRDIFF_TO_INT(a - first))) { if((a - first) <= (last - a)) { STACK_PUSH5(ISAd, a, last, -3, trlink); ISAd += incr, last = a, limit = next; @@ -1281,7 +1281,7 @@ tr_introsort(int *ISA, const int *ISAd, } if(limit-- == 0) { - tr_heapsort(ISAd, first, last - first); + tr_heapsort(ISAd, first, PTRDIFF_TO_INT(last - first)); for(a = last - 1; first < a; a = b) { for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } } @@ -1297,14 +1297,14 @@ tr_introsort(int *ISA, const int *ISAd, /* partition */ tr_partition(ISAd, first, first + 1, last, &a, &b, v); if((last - first) != (b - a)) { - next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; + next = (ISA[*a] != v) ? tr_ilg(PTRDIFF_TO_INT(b - a)) : -1; /* update ranks */ - for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } - if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } + for(c = first, v = PTRDIFF_TO_INT(a - SA - 1); c < a; ++c) { ISA[*c] = v; } + if(b < last) { for(c = a, v = PTRDIFF_TO_INT(b - SA - 1); c < b; ++c) { ISA[*c] = v; } } /* push */ - if((1 < (b - a)) && (trbudget_check(budget, b - a))) { + if((1 < (b - a)) && (trbudget_check(budget, PTRDIFF_TO_INT(b - a)))) { if((a - first) <= (last - b)) { if((last - b) <= (b - a)) { if(1 < (a - first)) { @@ -1381,8 +1381,8 @@ tr_introsort(int *ISA, const int *ISAd, } } } else { - if(trbudget_check(budget, last - first)) { - limit = tr_ilg(last - first), ISAd += incr; + if(trbudget_check(budget, PTRDIFF_TO_INT(last - first))) { + limit = tr_ilg(PTRDIFF_TO_INT(last - first)), ISAd += incr; } else { if(0 <= trlink) { stack[trlink].d = -1; } STACK_POP5(ISAd, first, last, limit, trlink); @@ -1420,7 +1420,7 @@ trsort(int *ISA, int *SA, int n, int depth) { budget.count = 0; tr_introsort(ISA, ISAd, SA, first, last, &budget); if(budget.count != 0) { unsorted += budget.count; } - else { skip = first - last; } + else { skip = PTRDIFF_TO_INT(first - last); } } else if((last - first) == 1) { skip = -1; } @@ -1634,7 +1634,7 @@ construct_SA(const unsigned char *T, int *SA, c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { - if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + if(0 <= c2) { BUCKET_B(c2, c1) = PTRDIFF_TO_INT(k - SA); } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); @@ -1658,7 +1658,7 @@ construct_SA(const unsigned char *T, int *SA, c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { - BUCKET_A(c2) = k - SA; + BUCKET_A(c2) = PTRDIFF_TO_INT(k - SA); k = SA + BUCKET_A(c2 = c0); } assert(i < k); @@ -1698,7 +1698,7 @@ construct_BWT(const unsigned char *T, int *SA, *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { - if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + if(0 <= c2) { BUCKET_B(c2, c1) = PTRDIFF_TO_INT(k - SA); } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); @@ -1726,7 +1726,7 @@ construct_BWT(const unsigned char *T, int *SA, *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } if(c0 != c2) { - BUCKET_A(c2) = k - SA; + BUCKET_A(c2) = PTRDIFF_TO_INT(k - SA); k = SA + BUCKET_A(c2 = c0); } assert(i < k); @@ -1738,7 +1738,7 @@ construct_BWT(const unsigned char *T, int *SA, } } - return orig - SA; + return PTRDIFF_TO_INT(orig - SA); } /* Constructs the burrows-wheeler transformed string directly @@ -1776,13 +1776,13 @@ construct_BWT_indexes(const unsigned char *T, int *SA, assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); - if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA; + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = PTRDIFF_TO_INT(j - SA); c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { - if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + if(0 <= c2) { BUCKET_B(c2, c1) = PTRDIFF_TO_INT(k - SA); } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); @@ -1802,7 +1802,7 @@ construct_BWT_indexes(const unsigned char *T, int *SA, the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); if (T[n - 2] < c2) { - if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA; + if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = PTRDIFF_TO_INT(k - SA); *k++ = ~((int)T[n - 2]); } else { @@ -1814,17 +1814,17 @@ construct_BWT_indexes(const unsigned char *T, int *SA, if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); - if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA; + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = PTRDIFF_TO_INT(i - SA); c0 = T[--s]; *i = c0; if(c0 != c2) { - BUCKET_A(c2) = k - SA; + BUCKET_A(c2) = PTRDIFF_TO_INT(k - SA); k = SA + BUCKET_A(c2 = c0); } assert(i < k); if((0 < s) && (T[s - 1] < c0)) { - if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA; + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = PTRDIFF_TO_INT(k - SA); *k++ = ~((int)T[s - 1]); } else *k++ = s; @@ -1835,7 +1835,7 @@ construct_BWT_indexes(const unsigned char *T, int *SA, } } - return orig - SA; + return PTRDIFF_TO_INT(orig - SA); } diff --git a/lib/dictBuilder/divsufsort.h b/lib/dictBuilder/divsufsort.h index 5440994af15..3ed2b287ab1 100644 --- a/lib/dictBuilder/divsufsort.h +++ b/lib/dictBuilder/divsufsort.h @@ -27,11 +27,6 @@ #ifndef _DIVSUFSORT_H #define _DIVSUFSORT_H 1 -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - - /*- Prototypes -*/ /** @@ -59,9 +54,4 @@ divsufsort(const unsigned char *T, int *SA, int n, int openMP); int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - #endif /* _DIVSUFSORT_H */ diff --git a/lib/dictBuilder/fastcover.c b/lib/dictBuilder/fastcover.c index 63a2cee7228..56a08138520 100644 --- a/lib/dictBuilder/fastcover.c +++ b/lib/dictBuilder/fastcover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -49,38 +49,30 @@ /*-************************************* * Console display +* +* Captures the `displayLevel` variable in the local scope. ***************************************/ -#ifndef LOCALDISPLAYLEVEL -static int g_displayLevel = 0; -#endif #undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } -#undef LOCALDISPLAYLEVEL -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ +#undef DISPLAYLEVEL +#define DISPLAYLEVEL(l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#undef DISPLAYLEVEL -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) -#ifndef LOCALDISPLAYUPDATE -static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; -#endif -#undef LOCALDISPLAYUPDATE -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ +#undef DISPLAYUPDATE +#define DISPLAYUPDATE(lastUpdateTime, l, ...) \ if (displayLevel >= l) { \ - if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ + const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; \ + if ((clock() - lastUpdateTime > refreshRate) || (displayLevel >= 4)) { \ + lastUpdateTime = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } -#undef DISPLAYUPDATE -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) /*-************************************* @@ -136,6 +128,7 @@ typedef struct { unsigned d; unsigned f; FASTCOVER_accel_t accelParams; + int displayLevel; } FASTCOVER_ctx_t; @@ -314,7 +307,8 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, unsigned d, double splitPoint, unsigned f, - FASTCOVER_accel_t accelParams) + FASTCOVER_accel_t accelParams, + int displayLevel) { const BYTE* const samples = (const BYTE*)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); @@ -323,6 +317,7 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + ctx->displayLevel = displayLevel; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || @@ -409,7 +404,9 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); const size_t maxZeroScoreRun = 10; + const int displayLevel = ctx->displayLevel; size_t zeroScoreRun = 0; + clock_t lastUpdateTime = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); @@ -447,6 +444,7 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( + lastUpdateTime, 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } @@ -484,6 +482,7 @@ static void FASTCOVER_tryParameters(void* opaque) BYTE *const dict = (BYTE*)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); + const int displayLevel = ctx->displayLevel; if (!segmentFreqs || !dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; @@ -545,7 +544,7 @@ FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, } -ZDICTLIB_API size_t +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, @@ -555,8 +554,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, FASTCOVER_ctx_t ctx; ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; - /* Initialize global data */ - g_displayLevel = (int)parameters.zParams.notificationLevel; + const int displayLevel = (int)parameters.zParams.notificationLevel; /* Assign splitPoint and f if not provided */ parameters.splitPoint = 1.0; parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; @@ -585,13 +583,13 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, - accelParams); + accelParams, displayLevel); if (ZSTD_isError(initVal)) { DISPLAYLEVEL(1, "Failed to initialize context\n"); return initVal; } } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); /* Build the dictionary */ DISPLAYLEVEL(2, "Building dictionary\n"); { @@ -614,7 +612,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, } -ZDICTLIB_API size_t +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, @@ -646,25 +644,26 @@ ZDICT_optimizeTrainFromBuffer_fastCover( COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; + clock_t lastUpdateTime = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); + DISPLAYLEVEL(1, "Incorrect splitPoint\n"); return ERROR(parameter_outOfBound); } if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); + DISPLAYLEVEL(1, "Incorrect accel\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); + DISPLAYLEVEL(1, "Incorrect k\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { - LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); + DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } @@ -679,19 +678,18 @@ ZDICT_optimizeTrainFromBuffer_fastCover( memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(*parameters, &coverParams); accelParams = FASTCOVER_defaultAccelParameters[accel]; - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", - kIterations); + DISPLAYLEVEL(2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ FASTCOVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + DISPLAYLEVEL(3, "d=%u\n", d); { - size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); + /* Turn down global display level to clean up display at level 2 and below */ + const int childDisplayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams, childDisplayLevel); if (ZSTD_isError(initVal)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + DISPLAYLEVEL(1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; @@ -706,9 +704,9 @@ ZDICT_optimizeTrainFromBuffer_fastCover( /* Prepare the arguments */ FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( sizeof(FASTCOVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + DISPLAYLEVEL(3, "k=%u\n", k); if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + DISPLAYLEVEL(1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); @@ -723,7 +721,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover( data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; - data->parameters.zParams.notificationLevel = (unsigned)g_displayLevel; + data->parameters.zParams.notificationLevel = (unsigned)ctx.displayLevel; /* Check the parameters */ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) { @@ -739,14 +737,15 @@ ZDICT_optimizeTrainFromBuffer_fastCover( FASTCOVER_tryParameters(data); } /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); + DISPLAYUPDATE(lastUpdateTime, + 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); FASTCOVER_ctx_destroy(&ctx); } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + DISPLAYLEVEL(2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c index a932276ffbc..befa616b87f 100644 --- a/lib/dictBuilder/zdict.c +++ b/lib/dictBuilder/zdict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -44,7 +44,6 @@ #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif -#define HUF_STATIC_LINKING_ONLY #include "../common/mem.h" /* read */ #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ @@ -75,9 +74,9 @@ static const U32 g_selectivity_default = 9; * Console display ***************************************/ #undef DISPLAY -#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } +#define DISPLAY(...) do { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } while (0) #undef DISPLAYLEVEL -#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#define DISPLAYLEVEL(l, ...) do { if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } } while (0) /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } @@ -169,7 +168,7 @@ static void ZDICT_initDictItem(dictItem* d) #define MINMATCHLENGTH 7 /* heuristic determined experimentally */ static dictItem ZDICT_analyzePos( BYTE* doneMarks, - const int* suffix, U32 start, + const unsigned* suffix, U32 start, const void* buffer, U32 minRatio, U32 notificationLevel) { U32 lengthList[LLIMIT] = {0}; @@ -294,8 +293,10 @@ static dictItem ZDICT_analyzePos( for (i=(int)(maxLength-2); i>=0; i--) cumulLength[i] = cumulLength[i+1] + lengthList[i]; - for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; - maxLength = i; + { unsigned u; + for (u=LLIMIT-1; u>=MINMATCHLENGTH; u--) if (cumulLength[u]>=minRatio) break; + maxLength = u; + } /* reduce maxLength in case of final into repetitive data */ { U32 l = (U32)maxLength; @@ -307,8 +308,10 @@ static dictItem ZDICT_analyzePos( /* calculate savings */ savings[5] = 0; - for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) - savings[i] = savings[i-1] + (lengthList[i] * (i-3)); + { unsigned u; + for (u=MINMATCHLENGTH; u<=maxLength; u++) + savings[u] = savings[u-1] + (lengthList[u] * (u-3)); + } DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / (double)maxLength); @@ -373,7 +376,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) - table[u] = table[u-1], u--; + table[u] = table[u-1], u--; table[u] = elt; return u; } } @@ -384,11 +387,11 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ - int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length); + int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length); /* note: can be negative */ table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ - table[u].length += addedLength; - table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + table[u].length += (unsigned)addedLength; + table[u].savings += elt.savings * (unsigned)addedLength / elt.length; /* rough approx */ } /* sort : improve rank */ elt = table[u]; @@ -400,7 +403,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { - size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); + size_t const addedLength = MAX( elt.length - table[u].length , 1 ); table[u].pos = elt.pos; table[u].savings += (U32)(elt.savings * addedLength / elt.length); table[u].length = MIN(elt.length, table[u].length + 1); @@ -468,8 +471,8 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, const size_t* fileSizes, unsigned nbFiles, unsigned minRatio, U32 notificationLevel) { - int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0)); - int* const suffix = suffix0+1; + unsigned* const suffix0 = (unsigned*)malloc((bufferSize+2)*sizeof(*suffix0)); + unsigned* const suffix = suffix0+1; U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix)); BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */ U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos)); @@ -478,10 +481,16 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10; # undef DISPLAYUPDATE -# define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \ - if (ZDICT_clockSpan(displayClock) > refreshRate) \ - { displayClock = clock(); DISPLAY(__VA_ARGS__); \ - if (notificationLevel>=4) fflush(stderr); } } +# define DISPLAYUPDATE(l, ...) \ + do { \ + if (notificationLevel>=l) { \ + if (ZDICT_clockSpan(displayClock) > refreshRate) { \ + displayClock = clock(); \ + DISPLAY(__VA_ARGS__); \ + } \ + if (notificationLevel>=4) fflush(stderr); \ + } \ + } while (0) /* init */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ @@ -498,11 +507,11 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, /* sort */ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); - { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); + { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, (int*)suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } - suffix[bufferSize] = (int)bufferSize; /* leads into noise */ - suffix0[0] = (int)bufferSize; /* leads into noise */ + suffix[bufferSize] = (unsigned)bufferSize; /* leads into noise */ + suffix0[0] = (unsigned)bufferSize; /* leads into noise */ /* build reverse suffix sort */ { size_t pos; for (pos=0; pos < bufferSize; pos++) @@ -524,7 +533,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, if (solution.length==0) { cursor++; continue; } ZDICT_insertDictItem(dictList, dictListSize, solution, buffer); cursor += solution.length; - DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100); + DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0); } } _cleanup: @@ -567,15 +576,15 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ - { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); + { size_t const errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } } - cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); + cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ - const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); + const SeqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); /* literals stats */ { const BYTE* bytePtr; @@ -603,7 +612,7 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, } if (nbSeq >= 2) { /* rep offsets */ - const seqDef* const seq = seqStorePtr->sequencesStart; + const SeqDef* const seq = seqStorePtr->sequencesStart; U32 offset1 = seq[0].offBase - ZSTD_REP_NUM; U32 offset2 = seq[1].offBase - ZSTD_REP_NUM; if (offset1 >= MAXREPOFFSET) offset1 = 0; @@ -676,6 +685,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; + U32 wksp[HUF_CTABLE_WORKSPACE_SIZE_U32]; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); @@ -716,7 +726,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, } } /* analyze, build stats, starting with literals */ - { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + { size_t maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(maxNbBits)) { eSize = maxNbBits; DISPLAYLEVEL(1, " HUF_buildCTable error \n"); @@ -725,7 +735,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n"); ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */ - maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); assert(maxNbBits==9); } huffLog = (U32)maxNbBits; @@ -766,7 +776,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, llLog = (U32)errorCode; /* write result to buffer */ - { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog); + { size_t const hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(hhSize)) { eSize = hhSize; DISPLAYLEVEL(1, "HUF_writeCTable error \n"); diff --git a/lib/dll/example/Makefile b/lib/dll/example/Makefile index 03b034dd50a..86cf6906e5c 100644 --- a/lib/dll/example/Makefile +++ b/lib/dll/example/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/lib/dll/example/build_package.bat b/lib/dll/example/build_package.bat index 8baabc7b2c6..bd3103de5bd 100644 --- a/lib/dll/example/build_package.bat +++ b/lib/dll/example/build_package.bat @@ -1,20 +1,55 @@ -@ECHO OFF -MKDIR bin\dll bin\static bin\example bin\include -COPY tests\fullbench.c bin\example\ -COPY programs\datagen.c bin\example\ -COPY programs\datagen.h bin\example\ -COPY programs\util.h bin\example\ -COPY programs\platform.h bin\example\ -COPY lib\common\mem.h bin\example\ -COPY lib\common\zstd_internal.h bin\example\ -COPY lib\common\error_private.h bin\example\ -COPY lib\common\xxhash.h bin\example\ -COPY lib\libzstd.a bin\static\libzstd_static.lib -COPY lib\dll\libzstd.* bin\dll\ -COPY lib\dll\example\Makefile bin\example\ -COPY lib\dll\example\fullbench-dll.* bin\example\ -COPY lib\dll\example\README.md bin\ -COPY lib\zstd.h bin\include\ -COPY lib\common\zstd_errors.h bin\include\ -COPY lib\dictBuilder\zdict.h bin\include\ -COPY programs\zstd.exe bin\zstd.exe +@echo off +setlocal + +rem Detect build type based on available files +set BUILD_TYPE=make +if exist "build\cmake\build\lib\Release\zstd_static.lib" set BUILD_TYPE=cmake + +echo Detected build type: %BUILD_TYPE% + +rem Create required directories +mkdir bin\dll bin\static bin\example bin\include + +rem Copy common files using a subroutine. Exits immediately on failure. +call :copyFile "tests\fullbench.c" "bin\example\" +call :copyFile "programs\datagen.c" "bin\example\" +call :copyFile "programs\datagen.h" "bin\example\" +call :copyFile "programs\util.h" "bin\example\" +call :copyFile "programs\platform.h" "bin\example\" +call :copyFile "lib\common\mem.h" "bin\example\" +call :copyFile "lib\common\zstd_internal.h" "bin\example\" +call :copyFile "lib\common\error_private.h" "bin\example\" +call :copyFile "lib\common\xxhash.h" "bin\example\" +call :copyFile "lib\dll\example\Makefile" "bin\example\" +call :copyFile "lib\dll\example\fullbench-dll.*" "bin\example\" +call :copyFile "lib\zstd.h" "bin\include\" +call :copyFile "lib\zstd_errors.h" "bin\include\" +call :copyFile "lib\zdict.h" "bin\include\" + +rem Copy build-specific files +if "%BUILD_TYPE%"=="cmake" ( + echo Copying CMake build artifacts... + call :copyFile "build\cmake\build\lib\Release\zstd_static.lib" "bin\static\libzstd_static.lib" + call :copyFile "build\cmake\build\lib\Release\zstd.dll" "bin\dll\libzstd.dll" + call :copyFile "build\cmake\build\lib\Release\zstd.lib" "bin\dll\zstd.lib" + call :copyFile "build\cmake\build\programs\Release\zstd.exe" "bin\zstd.exe" + call :copyFile "lib\dll\example\README.md" "bin\README.md" +) else ( + echo Copying Make build artifacts... + call :copyFile "lib\libzstd.a" "bin\static\libzstd_static.lib" + call :copyFile "lib\dll\libzstd.*" "bin\dll\" + call :copyFile "programs\zstd.exe" "bin\zstd.exe" + call :copyFile "lib\dll\example\README.md" "bin\" +) + +echo Build package created successfully for %BUILD_TYPE% build! +endlocal +exit /b 0 + +:copyFile +copy "%~1" "%~2" +if errorlevel 1 ( + echo Failed to copy "%~1" + exit 1 +) +exit /b \ No newline at end of file diff --git a/lib/install_oses.mk b/lib/install_oses.mk new file mode 100644 index 00000000000..c2cdd1635e5 --- /dev/null +++ b/lib/install_oses.mk @@ -0,0 +1,17 @@ +# ################################################################ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# You may select, at your option, one of the above-listed licenses. +# ################################################################ + +# This included Makefile provides the following variables : +# UNAME, INSTALL_OS_LIST + +UNAME := $(shell sh -c 'MSYSTEM="MSYS" uname') + +# List of OSes for which target install is supported +INSTALL_OS_LIST ?= Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT% CYGWIN_NT% diff --git a/lib/legacy/zstd_legacy.h b/lib/legacy/zstd_legacy.h index a6f1174b82e..7a8a04e593c 100644 --- a/lib/legacy/zstd_legacy.h +++ b/lib/legacy/zstd_legacy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -124,6 +124,20 @@ MEM_STATIC size_t ZSTD_decompressLegacy( const void* dict,size_t dictSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); + char x; + /* Avoid passing NULL to legacy decoding. */ + if (dst == NULL) { + assert(dstCapacity == 0); + dst = &x; + } + if (src == NULL) { + assert(compressedSize == 0); + src = &x; + } + if (dict == NULL) { + assert(dictSize == 0); + dict = &x; + } (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ switch(version) { @@ -242,6 +256,13 @@ MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } + /* In all cases, decompressedBound == nbBlocks * ZSTD_BLOCKSIZE_MAX. + * So we can compute nbBlocks without having to change every function. + */ + if (frameSizeInfo.decompressedBound != ZSTD_CONTENTSIZE_ERROR) { + assert((frameSizeInfo.decompressedBound & (ZSTD_BLOCKSIZE_MAX - 1)) == 0); + frameSizeInfo.nbBlocks = (size_t)(frameSizeInfo.decompressedBound / ZSTD_BLOCKSIZE_MAX); + } return frameSizeInfo; } @@ -280,6 +301,12 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, const void* dict, size_t dictSize) { + char x; + /* Avoid passing NULL to legacy decoding. */ + if (dict == NULL) { + assert(dictSize == 0); + dict = &x; + } DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); switch(newVersion) @@ -339,6 +366,16 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { + static char x; + /* Avoid passing NULL to legacy decoding. */ + if (output->dst == NULL) { + assert(output->size == 0); + output->dst = &x; + } + if (input->src == NULL) { + assert(input->size == 0); + input->src = &x; + } DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); switch(version) { diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c index 23caaef5647..d3322f93e11 100644 --- a/lib/legacy/zstd_v01.c +++ b/lib/legacy/zstd_v01.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,6 +14,7 @@ ******************************************/ #include /* size_t, ptrdiff_t */ #include "zstd_v01.h" +#include "../common/compiler.h" #include "../common/error_private.h" @@ -190,25 +191,6 @@ typedef signed long long S64; /**************************************************************** * Memory I/O *****************************************************************/ -/* FSE_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define FSE_FORCE_MEMORY_ACCESS 1 -# endif -#endif - static unsigned FSE_32bits(void) { @@ -221,24 +203,6 @@ static unsigned FSE_isLittleEndian(void) return one.c[0]; } -#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) - -static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -254,8 +218,6 @@ static U64 FSE_read64(const void* memPtr) U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } -#endif /* FSE_FORCE_MEMORY_ACCESS */ - static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) @@ -468,7 +430,7 @@ static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_ma static short FSE_abs(short a) { - return a<0? -a : a; + return a<0? (short)-a : a; } @@ -1190,7 +1152,7 @@ static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, si zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1421,7 +1383,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; typedef struct ZSTD_Cctx_s @@ -1429,7 +1391,7 @@ typedef struct ZSTD_Cctx_s const BYTE* base; U32 current; U32 nextUpdate; - seqStore_t seqStore; + SeqStore_t seqStore; #ifdef __AVX2__ __m256i hashTable[HASH_TABLESIZE>>3]; #else @@ -1759,20 +1721,26 @@ static size_t ZSTD_execSequence(BYTE* op, static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; + BYTE* const oLitEnd = op + sequence.litLength; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; - /* check */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); - if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ - if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) - memmove(op, *litPtr, litLength); /* overwrite risk */ - else - ZSTD_wildcopy(op, *litPtr, litLength); + ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */ + op += litLength; *litPtr = litEnd; /* update for next sequence */ @@ -1855,11 +1823,12 @@ static size_t ZSTD_decompressSequences( BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; - size_t errorCode, dumpsLength; + size_t errorCode = 0; + size_t dumpsLength = 0; const BYTE* litPtr = litStart; const BYTE* const litEnd = litStart + litSize; - int nbSeq; - const BYTE* dumps; + int nbSeq = 0; + const BYTE* dumps = NULL; U32* DTableLL = dctx->LLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; @@ -1947,7 +1916,7 @@ size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const voi size_t remainingSize = srcSize; U32 magicNumber; size_t errorCode=0; - blockProperties_t blockProperties; + blockProperties_t blockProperties = { 0 }; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); @@ -2151,6 +2120,7 @@ size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTDv01_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v01.h b/lib/legacy/zstd_v01.h index f777eb6e4c9..6ac876954d1 100644 --- a/lib/legacy/zstd_v01.h +++ b/lib/legacy/zstd_v01.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c index 6eadd3948b7..d1e00385a23 100644 --- a/lib/legacy/zstd_v02.c +++ b/lib/legacy/zstd_v02.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,6 +11,7 @@ #include /* size_t, ptrdiff_t */ #include "zstd_v02.h" +#include "../common/compiler.h" #include "../common/error_private.h" @@ -28,7 +29,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -71,20 +72,6 @@ extern "C" { #include /* memcpy */ -/****************************************** -* Compiler-specific -******************************************/ -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - /**************************************************************** * Basic Types *****************************************************************/ @@ -115,24 +102,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -143,33 +112,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -190,9 +132,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -269,7 +208,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -510,7 +449,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -609,7 +548,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -753,7 +692,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -822,7 +761,7 @@ static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -882,7 +821,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -923,7 +862,7 @@ extern "C" { * Streaming functions ***************************************/ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; +typedef struct ZSTDv02_Dctx_s ZSTD_DCtx; /* Use above functions alternatively. @@ -946,7 +885,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1450,7 +1389,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2609,7 +2548,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2783,7 +2722,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; /* ************************************* @@ -2798,7 +2737,7 @@ static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /* ************************************************************* * Decompression section ***************************************************************/ -struct ZSTD_DCtx_s +struct ZSTDv02_Dctx_s { U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; @@ -3114,12 +3053,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use the pointer check */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3472,6 +3418,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v02.h b/lib/legacy/zstd_v02.h index 1b371953b74..dab0260ee9e 100644 --- a/lib/legacy/zstd_v02.h +++ b/lib/legacy/zstd_v02.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c index 72fc9140d4a..50fa87cf362 100644 --- a/lib/legacy/zstd_v03.c +++ b/lib/legacy/zstd_v03.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,6 +11,7 @@ #include /* size_t, ptrdiff_t */ #include "zstd_v03.h" +#include "../common/compiler.h" #include "../common/error_private.h" @@ -29,7 +30,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -72,20 +73,6 @@ extern "C" { #include /* memcpy */ -/****************************************** -* Compiler-specific -******************************************/ -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - /**************************************************************** * Basic Types *****************************************************************/ @@ -116,24 +103,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -144,33 +113,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -191,10 +133,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -271,7 +209,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -512,7 +450,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -611,7 +549,7 @@ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be mor header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -755,7 +693,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -823,7 +761,7 @@ static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, si Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -883,7 +821,7 @@ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -924,7 +862,7 @@ extern "C" { * Streaming functions ***************************************/ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; +typedef struct ZSTDv03_Dctx_s ZSTD_DCtx; /* Use above functions alternatively. @@ -947,7 +885,7 @@ typedef struct ZSTD_DCtx_s ZSTD_DCtx; FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1173,7 +1111,7 @@ static unsigned FSE_isError(size_t code) { return ERR_isError(code); } ****************************************************************/ static short FSE_abs(short a) { - return a<0 ? -a : a; + return a<0 ? (short)-a : a; } static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1451,7 +1389,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2248,7 +2186,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2424,7 +2362,7 @@ typedef struct { BYTE* matchLength; BYTE* dumpsStart; BYTE* dumps; -} seqStore_t; +} SeqStore_t; /* ************************************* @@ -2439,7 +2377,7 @@ static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /* ************************************************************* * Decompression section ***************************************************************/ -struct ZSTD_DCtx_s +struct ZSTDv03_Dctx_s { U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; @@ -2755,18 +2693,24 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ - { - const BYTE* match = op - sequence.offset; + { const BYTE* match = op - sequence.offset; /* check */ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ @@ -3114,6 +3058,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } diff --git a/lib/legacy/zstd_v03.h b/lib/legacy/zstd_v03.h index 7a00d4304ba..9bf3cce6473 100644 --- a/lib/legacy/zstd_v03.h +++ b/lib/legacy/zstd_v03.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c index a2d281eb97d..31c2052583d 100644 --- a/lib/legacy/zstd_v04.c +++ b/lib/legacy/zstd_v04.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,6 +16,7 @@ #include /* memcpy */ #include "zstd_v04.h" +#include "../common/compiler.h" #include "../common/error_private.h" @@ -37,15 +38,6 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /**************************************************************** @@ -87,24 +79,6 @@ extern "C" { /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -115,33 +89,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -162,9 +109,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -542,7 +486,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -781,7 +725,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -930,7 +874,7 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1150,7 +1094,7 @@ static unsigned FSE_isError(size_t code) { return ERR_isError(code); } ****************************************************************/ static short FSE_abs(short a) { - return a<0 ? -a : a; + return a<0 ? (short)-a : a; } static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1436,7 +1380,7 @@ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, siz header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1514,7 +1458,7 @@ static unsigned HUF_isError(size_t code); /* tells if a return value i header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1601,7 +1545,7 @@ static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2401,7 +2345,7 @@ static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_ zstd - decompression module fo v0.4 legacy format Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2876,13 +2820,19 @@ static size_t ZSTD_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3260,6 +3210,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi } ctx->stage = ZSTDds_decodeBlockHeader; ctx->expected = ZSTD_blockHeaderSize; + if (ZSTD_isError(rSize)) return rSize; ctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3283,7 +3234,7 @@ static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3587,8 +3538,8 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } -size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; } -size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; } +size_t ZBUFFv04_recommendedDInSize(void) { return BLOCKSIZE + 3; } +size_t ZBUFFv04_recommendedDOutSize(void) { return BLOCKSIZE; } diff --git a/lib/legacy/zstd_v04.h b/lib/legacy/zstd_v04.h index 66b97ab8e60..640240d624d 100644 --- a/lib/legacy/zstd_v04.h +++ b/lib/legacy/zstd_v04.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c index 216c67d0194..ee180487e94 100644 --- a/lib/legacy/zstd_v05.c +++ b/lib/legacy/zstd_v05.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -106,24 +106,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -134,37 +116,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -195,9 +146,6 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -262,7 +210,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -286,7 +234,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H @@ -398,7 +346,7 @@ size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -543,7 +491,7 @@ typedef struct { U32 litLengthSum; U32 litSum; U32 offCodeSum; -} seqStore_t; +} SeqStore_t; @@ -553,7 +501,7 @@ typedef struct { header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -672,7 +620,7 @@ size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -901,7 +849,7 @@ MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1051,7 +999,7 @@ MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr) FSEv05 : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1276,7 +1224,7 @@ const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSEv05 NCount encoding-decoding ****************************************************************/ -static short FSEv05_abs(short a) { return a<0 ? -a : a; } +static short FSEv05_abs(short a) { return a<0 ? (short)-a : a; } size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, @@ -1537,7 +1485,7 @@ size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1610,7 +1558,7 @@ const char* HUFv05_getErrorName(size_t code); /* provides error code string (u header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1702,7 +1650,7 @@ size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2547,7 +2495,7 @@ size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3234,13 +3182,19 @@ static size_t ZSTDv05_execSequence(BYTE* op, const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv05_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3646,6 +3600,7 @@ size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSi } dctx->stage = ZSTDv05ds_decodeBlockHeader; dctx->expected = ZSTDv05_blockHeaderSize; + if (ZSTDv05_isError(rSize)) return rSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3746,7 +3701,7 @@ size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4017,8 +3972,15 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst zbc->outStart += flushedSize; if (flushedSize == toFlushSize) { zbc->stage = ZBUFFv05ds_read; - if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize) - zbc->outStart = zbc->outEnd = 0; + if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize) { + /* Not enough room for next block - need to wrap buffer. + * Preserve history: copy the last windowSize bytes to the + * beginning so that back-references can still find valid data. */ + size_t const windowSize = (size_t)1 << zbc->params.windowLog; + size_t const preserveSize = MIN(zbc->outEnd, windowSize); + memmove(zbc->outBuff, zbc->outBuff + zbc->outEnd - preserveSize, preserveSize); + zbc->outStart = zbc->outEnd = preserveSize; + } break; } /* cannot flush everything */ diff --git a/lib/legacy/zstd_v05.h b/lib/legacy/zstd_v05.h index bd423bfc1b9..2dcffc92367 100644 --- a/lib/legacy/zstd_v05.h +++ b/lib/legacy/zstd_v05.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c index b224355d437..fb8c14df914 100644 --- a/lib/legacy/zstd_v06.c +++ b/lib/legacy/zstd_v06.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,6 +14,7 @@ #include /* size_t, ptrdiff_t */ #include /* memcpy */ #include /* malloc, free, qsort */ +#include "../common/compiler.h" #include "../common/error_private.h" @@ -23,7 +24,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -67,15 +68,6 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /*-************************************************************** @@ -108,24 +100,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -136,33 +110,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -183,9 +130,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -281,7 +225,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -305,7 +249,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTDv06_STATIC_H #define ZSTDv06_STATIC_H @@ -412,7 +356,7 @@ ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, siz Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -608,9 +552,9 @@ typedef struct { U32 cachedLitLength; const BYTE* cachedLiterals; ZSTDv06_stats_t stats; -} seqStore_t; +} SeqStore_t; -void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); +void ZSTDv06_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq); #endif /* ZSTDv06_CCOMMON_H_MODULE */ @@ -619,7 +563,7 @@ void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -767,7 +711,7 @@ If there is an error, the function will return an error code, which can be teste header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1002,7 +946,7 @@ MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream) header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1210,7 +1154,7 @@ MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStre Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1258,7 +1202,7 @@ static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ -static short FSEv06_abs(short a) { return a<0 ? -a : a; } +static short FSEv06_abs(short a) { return a<0 ? (short)-a : a; } size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) @@ -1355,7 +1299,7 @@ size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1679,7 +1623,7 @@ size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1749,7 +1693,7 @@ size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */ header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1931,7 +1875,7 @@ MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankSta Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2676,7 +2620,7 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2700,7 +2644,7 @@ size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cS OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2730,7 +2674,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2754,7 +2698,7 @@ const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(er OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -3370,13 +3314,19 @@ static size_t ZSTDv06_execSequence(BYTE* op, const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv06_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3787,6 +3737,7 @@ size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapac } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv06_blockHeaderSize; + if (ZSTDv06_isError(rSize)) return rSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } @@ -3889,7 +3840,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3913,7 +3864,7 @@ size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, s OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -3968,6 +3919,10 @@ ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void) if (zbd==NULL) return NULL; memset(zbd, 0, sizeof(*zbd)); zbd->zd = ZSTDv06_createDCtx(); + if (zbd->zd==NULL) { + ZBUFFv06_freeDCtx(zbd); /* avoid leaking the context */ + return NULL; + } zbd->stage = ZBUFFds_init; return zbd; } @@ -4035,7 +3990,8 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd, size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (ZSTDv06_isError(hSize)) return hSize; if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/lib/legacy/zstd_v06.h b/lib/legacy/zstd_v06.h index 9e32b76e08d..633891010d7 100644 --- a/lib/legacy/zstd_v06.h +++ b/lib/legacy/zstd_v06.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c index c12a091cb55..cdc56288cb2 100644 --- a/lib/legacy/zstd_v07.c +++ b/lib/legacy/zstd_v07.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -24,6 +24,7 @@ #define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */ #define ZSTDv07_STATIC_LINKING_ONLY +#include "../common/compiler.h" #include "../common/error_private.h" @@ -184,7 +185,7 @@ ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockS low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -227,15 +228,6 @@ extern "C" { # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif -#if defined(__GNUC__) -# define MEM_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif /*-************************************************************** @@ -268,24 +260,6 @@ extern "C" { /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -296,33 +270,6 @@ MEM_STATIC unsigned MEM_isLittleEndian(void) return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -343,8 +290,6 @@ MEM_STATIC void MEM_write16(void* memPtr, U16 value) memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -439,7 +384,7 @@ MEM_STATIC size_t MEM_readLEST(const void* memPtr) header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -670,7 +615,7 @@ MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream) Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -978,7 +923,7 @@ MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStre header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1151,7 +1096,7 @@ size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1375,7 +1320,7 @@ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1699,7 +1644,7 @@ size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2577,7 +2522,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2601,7 +2546,7 @@ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2647,7 +2592,7 @@ static void ZSTDv07_defaultFreeFunction(void* opaque, void* address) Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2842,9 +2787,9 @@ typedef struct { U32 cachedLitLength; const BYTE* cachedLiterals; ZSTDv07_stats_t stats; -} seqStore_t; +} SeqStore_t; -void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq); +void ZSTDv07_seqToCodes(const SeqStore_t* seqStorePtr, size_t const nbSeq); /* custom memory allocation functions */ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL }; @@ -2854,7 +2799,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2878,7 +2823,7 @@ static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -3599,11 +3544,14 @@ size_t ZSTDv07_execSequence(BYTE* op, const BYTE* match = oLitEnd - sequence.offset; /* check */ - if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + assert(oend >= op); + if (sequence.litLength + WILDCOPY_OVERLENGTH > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequenceLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + assert(litLimit >= *litPtr); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected);; /* copy Literals */ - ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + ZSTDv07_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3617,7 +3565,7 @@ size_t ZSTDv07_execSequence(BYTE* op, return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = (size_t)(dictEnd - match); memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -4059,8 +4007,8 @@ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapac } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv07_blockHeaderSize; - dctx->previousDstEnd = (char*)dst + rSize; if (ZSTDv07_isError(rSize)) return rSize; + dctx->previousDstEnd = (char*)dst + rSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); return rSize; } @@ -4253,7 +4201,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4277,7 +4225,7 @@ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4417,7 +4365,8 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd, if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/lib/legacy/zstd_v07.h b/lib/legacy/zstd_v07.h index bc35cfa6a33..1ff39041f88 100644 --- a/lib/legacy/zstd_v07.h +++ b/lib/legacy/zstd_v07.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/libzstd.mk b/lib/libzstd.mk index c97db56286c..60928b0ee95 100644 --- a/lib/libzstd.mk +++ b/lib/libzstd.mk @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -8,28 +8,53 @@ # You may select, at your option, one of the above-listed licenses. # ################################################################ +# This included Makefile provides the following variables : +# LIB_SRCDIR, LIB_BINDIR + +# Ensure the file is not included twice +# Note : must be included after setting the default target +ifndef LIBZSTD_MK_INCLUDED +LIBZSTD_MK_INCLUDED := 1 + ################################################################## # Input Variables ################################################################## -# Zstd lib directory -LIBZSTD ?= ./ +# By default, library's directory is same as this included makefile +LIB_SRCDIR ?= $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +LIB_BINDIR ?= $(LIB_SRCDIR) -# Legacy support -ZSTD_LEGACY_SUPPORT ?= 5 +# ZSTD_LIB_MINIFY is a helper variable that +# configures a bunch of other variables to space-optimized defaults. +ZSTD_LIB_MINIFY ?= 0 + +# Legacy support disabled by default +ZSTD_LEGACY_SUPPORT ?= 0 ZSTD_LEGACY_MULTITHREADED_API ?= 0 # Build size optimizations -HUF_FORCE_DECOMPRESS_X1 ?= 0 -HUF_FORCE_DECOMPRESS_X2 ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 -ZSTD_NO_INLINE ?= 0 -ZSTD_STRIP_ERROR_STRINGS ?= 0 +ifneq ($(ZSTD_LIB_MINIFY), 0) + HUF_FORCE_DECOMPRESS_X1 ?= 1 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 1 + ZSTD_STRIP_ERROR_STRINGS ?= 1 +else + HUF_FORCE_DECOMPRESS_X1 ?= 0 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 0 + ZSTD_STRIP_ERROR_STRINGS ?= 0 +endif # Assembly support ZSTD_NO_ASM ?= 0 +ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP ?= 0 +ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP ?= 0 + ################################################################## # libzstd helpers ################################################################## @@ -40,6 +65,7 @@ VOID ?= /dev/null NUM_SYMBOL := \# # define silent mode as default (verbose mode with V=1 or VERBOSE=1) +# Note : must be defined _after_ the default target $(V)$(VERBOSE).SILENT: # When cross-compiling from linux to windows, @@ -49,7 +75,7 @@ $(V)$(VERBOSE).SILENT: TARGET_SYSTEM ?= $(OS) # Version numbers -LIBVER_SRC := $(LIBZSTD)/zstd.h +LIBVER_SRC := $(LIB_SRCDIR)/zstd.h LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` @@ -61,17 +87,8 @@ LIBVER := $(shell echo $(LIBVER_SCRIPT)) CCVER := $(shell $(CC) --version) ZSTD_VERSION?= $(LIBVER) -# ZSTD_LIB_MINIFY is a helper variable that -# configures a bunch of other variables to space-optimized defaults. -ZSTD_LIB_MINIFY ?= 0 ifneq ($(ZSTD_LIB_MINIFY), 0) HAVE_CC_OZ ?= $(shell echo "" | $(CC) -Oz -x c -c - -o /dev/null 2> /dev/null && echo 1 || echo 0) - ZSTD_LEGACY_SUPPORT ?= 0 - ZSTD_LIB_DEPRECATED ?= 0 - HUF_FORCE_DECOMPRESS_X1 ?= 1 - ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 - ZSTD_NO_INLINE ?= 1 - ZSTD_STRIP_ERROR_STRINGS ?= 1 ifneq ($(HAVE_CC_OZ), 0) # Some compilers (clang) support an even more space-optimized setting. CFLAGS += -Oz @@ -125,14 +142,14 @@ ifeq ($(HAVE_COLORNEVER), 1) endif GREP = grep $(GREP_OPTIONS) -ZSTD_COMMON_FILES := $(sort $(wildcard $(LIBZSTD)/common/*.c)) -ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/compress/*.c)) -ZSTD_DECOMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/decompress/*.c)) -ZSTD_DICTBUILDER_FILES := $(sort $(wildcard $(LIBZSTD)/dictBuilder/*.c)) -ZSTD_DEPRECATED_FILES := $(sort $(wildcard $(LIBZSTD)/deprecated/*.c)) +ZSTD_COMMON_FILES := $(sort $(wildcard $(LIB_SRCDIR)/common/*.c)) +ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIB_SRCDIR)/compress/*.c)) +ZSTD_DECOMPRESS_FILES := $(sort $(wildcard $(LIB_SRCDIR)/decompress/*.c)) +ZSTD_DICTBUILDER_FILES := $(sort $(wildcard $(LIB_SRCDIR)/dictBuilder/*.c)) +ZSTD_DEPRECATED_FILES := $(sort $(wildcard $(LIB_SRCDIR)/deprecated/*.c)) ZSTD_LEGACY_FILES := -ZSTD_DECOMPRESS_AMD64_ASM_FILES := $(sort $(wildcard $(LIBZSTD)/decompress/*_amd64.S)) +ZSTD_DECOMPRESS_AMD64_ASM_FILES := $(sort $(wildcard $(LIB_SRCDIR)/decompress/*_amd64.S)) ifneq ($(ZSTD_NO_ASM), 0) CPPFLAGS += -DZSTD_DISABLE_ASM @@ -170,22 +187,31 @@ ifneq ($(ZSTD_LEGACY_MULTITHREADED_API), 0) CFLAGS += -DZSTD_LEGACY_MULTITHREADED_API endif +ifneq ($(ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP), 0) + CFLAGS += -DZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +else +ifneq ($(ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP), 0) + CFLAGS += -DZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -DZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +endif +endif + ifneq ($(ZSTD_LEGACY_SUPPORT), 0) ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) - ZSTD_LEGACY_FILES += $(shell ls $(LIBZSTD)/legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') + ZSTD_LEGACY_FILES += $(shell ls $(LIB_SRCDIR)/legacy/*.c | $(GREP) 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') endif endif CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) -UNAME := $(shell uname) +# Include install_oses.mk from the same directory +include $(dir $(lastword $(MAKEFILE_LIST)))/install_oses.mk +LN ?= ln +CP ?= cp -f ifndef BUILD_DIR ifeq ($(UNAME), Darwin) ifeq ($(shell md5 < /dev/null > /dev/null; echo $$?), 0) HASH ?= md5 endif -else ifeq ($(UNAME), FreeBSD) - HASH ?= gmd5sum else ifeq ($(UNAME), NetBSD) HASH ?= md5 -n else ifeq ($(UNAME), OpenBSD) @@ -201,6 +227,8 @@ ifeq ($(HAVE_HASH),0) endif endif # BUILD_DIR -ZSTD_SUBDIR := $(LIBZSTD)/common $(LIBZSTD)/compress $(LIBZSTD)/decompress $(LIBZSTD)/dictBuilder $(LIBZSTD)/legacy $(LIBZSTD)/deprecated +ZSTD_SUBDIR := $(LIB_SRCDIR)/common $(LIB_SRCDIR)/compress $(LIB_SRCDIR)/decompress $(LIB_SRCDIR)/dictBuilder $(LIB_SRCDIR)/legacy $(LIB_SRCDIR)/deprecated vpath %.c $(ZSTD_SUBDIR) vpath %.S $(ZSTD_SUBDIR) + +endif # LIBZSTD_MK_INCLUDED diff --git a/lib/libzstd.pc.in b/lib/libzstd.pc.in index 43ebaec3576..2220a579733 100644 --- a/lib/libzstd.pc.in +++ b/lib/libzstd.pc.in @@ -1,6 +1,6 @@ # ZSTD - standard compression algorithm -# Copyright (C) 2014-2016, Yann Collet, Facebook -# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) prefix=@PREFIX@ exec_prefix=@EXEC_PREFIX@ @@ -9,8 +9,9 @@ libdir=@LIBDIR@ Name: zstd Description: fast lossless compression algorithm library -URL: http://www.zstd.net/ +URL: https://facebook.github.io/zstd/ Version: @VERSION@ -Libs: -L${libdir} -lzstd +Libs: -L${libdir} -lzstd @LIBS_MT@ Libs.private: @LIBS_PRIVATE@ -Cflags: -I${includedir} +Cflags: -I${includedir} @LIBS_MT@ +License: BSD-3-Clause OR GPL-2.0-only diff --git a/lib/module.modulemap b/lib/module.modulemap index bbb939782e1..e66a210d06b 100644 --- a/lib/module.modulemap +++ b/lib/module.modulemap @@ -1,17 +1,6 @@ module libzstd [extern_c] { header "zstd.h" export * - config_macros [exhaustive] /* zstd.h */ \ - ZSTD_STATIC_LINKING_ONLY, \ - ZSTDLIB_VISIBLE, \ - ZSTD_DLL_EXPORT, \ - ZSTDLIB_STATIC_API, \ - ZSTD_DISABLE_DEPRECATE_WARNINGS, \ - ZSTD_CLEVEL_DEFAULT, \ - /* zdict.h */ ZDICT_STATIC_LINKING_ONLY, \ - ZDICTLIB_VISIBILITY, \ - ZDICT_DISABLE_DEPRECATE_WARNINGS, \ - /* zstd_errors.h */ ZSTDERRORLIB_VISIBILITY module dictbuilder [extern_c] { header "zdict.h" diff --git a/lib/zdict.h b/lib/zdict.h index 8e21ba0f767..599b793013b 100644 --- a/lib/zdict.h +++ b/lib/zdict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,32 +8,43 @@ * You may select, at your option, one of the above-listed licenses. */ -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - -#if defined (__cplusplus) -extern "C" { -#endif +#ifndef ZSTD_ZDICT_H +#define ZSTD_ZDICT_H /*====== Dependencies ======*/ #include /* size_t */ +#if defined (__cplusplus) +extern "C" { +#endif /* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZDICTLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZDICTLIB_VISIBILITY +# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZDICTLIB_VISIBILITY +# define ZDICTLIB_VISIBLE # endif #endif + +#ifndef ZDICTLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZDICTLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY +# define ZDICTLIB_API ZDICTLIB_VISIBLE #endif /******************************************************************************* @@ -201,9 +212,9 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap const size_t* samplesSizes, unsigned nbSamples); typedef struct { - int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) + int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value) * NOTE: The zstd format reserves some dictionary IDs for future use. * You may use them in private settings, but be warned that they * may be used by zstd in a public dictionary registry in the future. @@ -237,7 +248,7 @@ typedef struct { * is presumed that the most profitable content is at the end of the dictionary, * since that is the cheapest to reference. * - * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). + * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN). * * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), * or an error code, which can be tested by ZDICT_isError(). @@ -260,9 +271,29 @@ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictS ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_ZDICT_H */ +#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC) +#define ZSTD_ZDICT_H_STATIC + +#if defined (__cplusplus) +extern "C" { +#endif -#ifdef ZDICT_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZDICTLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE +# else +# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE +# endif +#endif /* ==================================================================================== * The definitions in this section are considered experimental. @@ -318,7 +349,7 @@ typedef struct { * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); @@ -340,7 +371,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); @@ -361,7 +392,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); @@ -384,7 +415,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); @@ -409,7 +440,7 @@ typedef struct { * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); @@ -421,32 +452,30 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] # elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated)) # elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# define ZDICT_DEPRECATED(message) __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# define ZDICT_DEPRECATED(message) # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +ZDICTLIB_STATIC_API size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); - -#endif /* ZDICT_STATIC_LINKING_ONLY */ - #if defined (__cplusplus) } #endif -#endif /* DICTBUILDER_H_001 */ +#endif /* ZSTD_ZDICT_H_STATIC */ diff --git a/lib/zstd.h b/lib/zstd.h index 1867efc93ef..97fef316fd6 100644 --- a/lib/zstd.h +++ b/lib/zstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -7,28 +7,43 @@ * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ -#if defined (__cplusplus) -extern "C" { -#endif #ifndef ZSTD_H_235446 #define ZSTD_H_235446 -/* ====== Dependency ======*/ -#include /* INT_MAX */ + +/* ====== Dependencies ======*/ #include /* size_t */ +#include "zstd_errors.h" /* list of errors */ +#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) +#include /* INT_MAX */ +#endif /* ZSTD_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +extern "C" { +#endif /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #ifndef ZSTDLIB_VISIBLE -# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) + /* Backwards compatibility with old macro name */ +# ifdef ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) # define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) -# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else # define ZSTDLIB_VISIBLE +# endif +#endif + +#ifndef ZSTDLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else # define ZSTDLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) @@ -47,7 +62,7 @@ extern "C" { #else # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZSTD_DEPRECATED(message) [[deprecated(message)]] -# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__) # define ZSTD_DEPRECATED(message) __attribute__((deprecated(message))) # elif defined(__GNUC__) && (__GNUC__ >= 3) # define ZSTD_DEPRECATED(message) __attribute__((deprecated)) @@ -95,8 +110,8 @@ extern "C" { /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 -#define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 3 +#define ZSTD_VERSION_MINOR 6 +#define ZSTD_VERSION_RELEASE 0 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -134,11 +149,12 @@ ZSTDLIB_API const char* ZSTD_versionString(void); /*************************************** -* Simple API +* Simple Core API ***************************************/ /*! ZSTD_compress() : * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, @@ -146,68 +162,106 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, int compressionLevel); /*! ZSTD_decompress() : - * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - * `dstCapacity` is an upper bound of originalSize to regenerate. - * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * Multiple compressed frames can be decompressed at once with this method. + * The result will be the concatenation of all decompressed frames, back to back. + * `dstCapacity` is an upper bound of originalSize to regenerate. + * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + * If maximum upper bound isn't known, prefer using streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); -/*! ZSTD_getFrameContentSize() : requires v1.3.0+ - * `src` should point to the start of a ZSTD encoded frame. - * `srcSize` must be at least as large as the frame header. - * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - * @return : - decompressed size of `src` frame content, if known - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - * note 1 : a 0 return value means the frame is valid but "empty". - * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * Optionally, application can rely on some implicit limit, - * as ZSTD_decompress() only needs an upper bound of decompressed size. - * (For example, data could be necessarily cut into blocks <= 16 KB). - * note 3 : decompressed size is always present when compression is completed using single-pass functions, - * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - * note 4 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure return value fits within application's authorized limits. - * Each application can set its own limits. - * note 6 : This function replaces ZSTD_getDecompressedSize() */ + +/*====== Decompression helper functions ======*/ + +/*! @brief Returns the decompressed content size stored in a ZSTD frame header. + * + * @since v1.3.0 + * + * @param src Pointer to the beginning of a ZSTD encoded frame. + * @param srcSize Size of the buffer pointed to by @p src. It must be at least as large as the frame header. + * Any value greater than or equal to `ZSTD_frameHeaderSize_max` is sufficient. + * @return The decompressed size in bytes when the value is available in the frame header. + * @retval ZSTD_CONTENTSIZE_UNKNOWN The frame does not encode a decompressed size (typical for streaming). + * @retval ZSTD_CONTENTSIZE_ERROR An error occurred (e.g. invalid magic number, @p srcSize too small). + * + * @note The return value is not compatible with `ZSTD_isError()`. + * @note A return value of 0 denotes a valid but empty frame. Skippable frames also report 0. + * @note The decompressed size field is optional. When it is absent (the function returns @c ZSTD_CONTENTSIZE_UNKNOWN), + * the caller must rely on streaming decompression or an application-specific upper bound. `ZSTD_decompress()` + * only requires an upper bound, so applications may enforce their own block limits (for example 16 KB). + * @note The decompressed size is guaranteed to be present when compression was performed with single-pass APIs such as + * `ZSTD_compress()`, `ZSTD_compressCCtx()`, `ZSTD_compress_usingDict()`, or `ZSTD_compress_usingCDict()`. + * @note The decompressed size is a 64-bit value and may exceed the addressable space of the system. Use streaming + * decompression when the value is too large to materialize in contiguous memory. + * @warning When processing untrusted input, validate the returned size against the application's limits; attackers may + * forge an arbitrarily large value. + * @note This function replaces `ZSTD_getDecompressedSize()`. + */ #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); -/*! ZSTD_getDecompressedSize() : - * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). +/*! ZSTD_getDecompressedSize() (obsolete): + * This function is now obsolete, in favor of ZSTD_getFrameContentSize(). * Both functions work the same way, but ZSTD_getDecompressedSize() blends * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize") -ZSTDLIB_API -unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ * `src` should point to the start of a ZSTD frame or skippable frame. * `srcSize` must be >= first frame size * @return : the compressed size of the first frame starting at `src`, * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - * or an error code if input is invalid */ + * or an error code if input is invalid + * Note 1: this method is called _find*() because it's not enough to read the header, + * it may have to scan through the frame's content, to reach its end. + * Note 2: this method also works with Skippable Frames. In which case, + * it returns the size of the complete skippable frame, + * which is always equal to its content size + 8 bytes for headers. */ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); -/*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ -ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ -ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ -ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ -ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ +/*====== Compression helper functions ======*/ + +/*! ZSTD_compressBound() : + * maximum compressed size in worst case single-pass scenario. + * When invoking `ZSTD_compress()`, or any other one-pass compression function, + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + * as it eliminates one potential failure scenario, + * aka not enough room in dst buffer to write the compressed frame. + * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + * In which case, ZSTD_compressBound() will return an error code + * which can be tested using ZSTD_isError(). + * + * ZSTD_COMPRESSBOUND() : + * same as ZSTD_compressBound(), but as a macro. + * It can be used to produce constants, which can be useful for static allocation, + * for example to size a static array on stack. + * Will produce constant value 0 if srcSize is too large. + */ +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U) +#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ + + +/*====== Error helper functions ======*/ +/* ZSTD_isError() : + * Most ZSTD_* functions returning a size_t value can be tested for error, + * using ZSTD_isError(). + * @return 1 if error, 0 otherwise + */ +ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */ +ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */ +ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */ +ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ +ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ /*************************************** @@ -215,25 +269,25 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres ***************************************/ /*= Compression context * When compressing many times, - * it is recommended to allocate a context just once, - * and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. + * it is recommended to allocate a compression context just once, + * and reuse it for each successive compression operation. + * This will make the workload easier for system's memory. * Note : re-using context is just a speed / resource optimization. * It doesn't change the compression ratio, which remains identical. - * Note 2 : In multi-threaded environments, - * use one different context per thread for parallel execution. + * Note 2: For parallel execution in multi-threaded environments, + * use one different context per thread . */ typedef struct ZSTD_CCtx_s ZSTD_CCtx; ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); -ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */ +ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */ /*! ZSTD_compressCCtx() : * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. - * Important : in order to behave similarly to `ZSTD_compress()`, - * this function compresses at requested compression level, - * __ignoring any other parameter__ . + * Important : in order to mirror `ZSTD_compress()` behavior, + * this function compresses at the requested compression level, + * __ignoring any other advanced parameter__ . * If any advanced parameter was set using the advanced API, - * they will all be reset. Only `compressionLevel` remains. + * they will all be reset. Only @compressionLevel remains. */ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, @@ -243,7 +297,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, /*= Decompression context * When decompressing many times, * it is recommended to allocate a context only once, - * and re-use it for each successive compression operation. + * and reuse it for each successive compression operation. * This will make workload friendlier for system's memory. * Use one context per thread for parallel execution. */ typedef struct ZSTD_DCtx_s ZSTD_DCtx; @@ -253,7 +307,7 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer * /*! ZSTD_decompressDCtx() : * Same as ZSTD_decompress(), * requires an allocated ZSTD_DCtx. - * Compatible with sticky parameters. + * Compatible with sticky parameters (see below). */ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, @@ -269,12 +323,12 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, * using ZSTD_CCtx_set*() functions. * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! - * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ . + * __They do not apply to one-shot variants such as ZSTD_compressCCtx()__ . * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * * This API supersedes all other "advanced" API entry points in the experimental section. - * In the future, we expect to remove from experimental API entry points which are redundant with this API. + * In the future, we expect to remove API entry points from experimental which are redundant with this API. */ @@ -357,6 +411,19 @@ typedef enum { * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ + + ZSTD_c_targetCBlockSize=130, /* v1.5.6+ + * Attempts to fit compressed block size into approximately targetCBlockSize. + * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. + * Note that it's not a guarantee, just a convergence target (default:0). + * No target when targetCBlockSize == 0. + * This is helpful in low bandwidth streaming environments to improve end-to-end latency, + * when a client can make use of partial documents (a prominent example being Chrome). + * Note: this parameter is stable since v1.5.6. + * It was present as an experimental parameter in earlier versions, + * but it's not recommended using it with earlier library versions + * due to massive performance regressions. + */ /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio @@ -436,16 +503,18 @@ typedef enum { * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode - * ZSTD_c_targetCBlockSize * ZSTD_c_srcSizeHint * ZSTD_c_enableDedicatedDictSearch * ZSTD_c_stableInBuffer * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences - * ZSTD_c_useBlockSplitter + * ZSTD_c_blockSplitterLevel + * ZSTD_c_splitAfterSequences * ZSTD_c_useRowMatchFinder * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -455,7 +524,7 @@ typedef enum { ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, - ZSTD_c_experimentalParam6=1003, + /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ ZSTD_c_experimentalParam7=1004, ZSTD_c_experimentalParam8=1005, ZSTD_c_experimentalParam9=1006, @@ -465,7 +534,11 @@ typedef enum { ZSTD_c_experimentalParam13=1010, ZSTD_c_experimentalParam14=1011, ZSTD_c_experimentalParam15=1012, - ZSTD_c_experimentalParam16=1013 + ZSTD_c_experimentalParam16=1013, + ZSTD_c_experimentalParam17=1014, + ZSTD_c_experimentalParam18=1015, + ZSTD_c_experimentalParam19=1016, + ZSTD_c_experimentalParam20=1017 } ZSTD_cParameter; typedef struct { @@ -528,7 +601,7 @@ typedef enum { * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. + * This also removes any reference to any dictionary or external sequence producer. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. @@ -537,11 +610,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); /*! ZSTD_compress2() : * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + * (note that this entry point doesn't even expose a compression level parameter). * ZSTD_compress2() always starts a new frame. * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - The function is always blocking, returns when compression is completed. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data, though it is possible it fails for other reasons. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ @@ -578,13 +653,17 @@ typedef enum { * ZSTD_d_stableOutBuffer * ZSTD_d_forceIgnoreChecksum * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly + * ZSTD_d_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ ZSTD_d_experimentalParam1=1000, ZSTD_d_experimentalParam2=1001, ZSTD_d_experimentalParam3=1002, - ZSTD_d_experimentalParam4=1003 + ZSTD_d_experimentalParam4=1003, + ZSTD_d_experimentalParam5=1004, + ZSTD_d_experimentalParam6=1005 } ZSTD_dParameter; @@ -639,14 +718,14 @@ typedef struct ZSTD_outBuffer_s { * A ZSTD_CStream object is required to track streaming operation. * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. * ZSTD_CStream objects can be reused multiple times on consecutive compression operations. -* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. +* It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. * * For parallel execution, use one separate ZSTD_CStream per thread. * * note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. * * Parameters are sticky : when starting a new compression on the same context, -* it will re-use the same sticky parameters as previous compression session. +* it will reuse the same sticky parameters as previous compression session. * When in doubt, it's recommended to fully initialize the context before usage. * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to @@ -735,6 +814,11 @@ typedef enum { * only ZSTD_e_end or ZSTD_e_flush operations are allowed. * Before starting a new compression job, or changing compression parameters, * it is required to fully flush internal buffers. + * - note: if an operation ends with an error, it may leave @cctx in an undefined state. + * Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state. + * In order to be re-employed after an error, a state must be reset, + * which can be done explicitly (ZSTD_CCtx_reset()), + * or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx()) */ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, @@ -763,8 +847,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. - * Streaming in combination with advanced parameters and dictionary compression - * can only be used through the new API. ******************************************************************************/ /*! @@ -773,6 +855,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! @@ -793,7 +878,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); * * A ZSTD_DStream object is required to track streaming operations. * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. -* ZSTD_DStream objects can be re-used multiple times. +* ZSTD_DStream objects can be re-employed multiple times. * * Use ZSTD_initDStream() to start a new decompression operation. * @return : recommended first input size @@ -803,16 +888,21 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); * The function will update both `pos` fields. * If `input.pos < input.size`, some input has not been consumed. * It's up to the caller to present again remaining data. +* * The function tries to flush all data decoded immediately, respecting output buffer size. * If `output.pos < output.size`, decoder has flushed everything it could. -* But if `output.pos == output.size`, there might be some data left within internal buffers., +* +* However, when `output.pos == output.size`, it's more difficult to know. +* If @return > 0, the frame is not complete, meaning +* either there is still some data left to flush within internal buffers, +* or there is more input to read to complete the frame (or both). * In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. * Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. * @return : 0 when a frame is completely decoded and fully flushed, * or an error code, which can be tested using ZSTD_isError(), * or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : * the return value is a suggested next input size (just a hint for better latency) -* that will never request more than the remaining frame size. +* that will never request more than the remaining content of the compressed frame. * *******************************************************************************/ typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ @@ -839,14 +929,21 @@ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); * Function will update both input and output `pos` fields exposing current state via these fields: * - `input.pos < input.size`, some input remaining and caller should provide remaining input * on the next call. - * - `output.pos < output.size`, decoder finished and flushed all remaining buffers. - * - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, - * call ZSTD_decompressStream() again to flush remaining data to output. + * - `output.pos < output.size`, decoder flushed internal output buffer. + * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + * check ZSTD_decompressStream() @return value, + * if > 0, invoke it again to flush remaining data to output. * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. * * @return : 0 when a frame is completely decoded and fully flushed, * or an error code, which can be tested using ZSTD_isError(), * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + * + * Note: when an operation returns with an error code, the @zds state may be left in undefined state. + * It's UB to invoke `ZSTD_decompressStream()` on such a state. + * In order to re-use such a state, it must be first reset, + * which can be done explicitly (`ZSTD_DCtx_reset()`), + * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) */ ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); @@ -978,9 +1075,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and - * only reset with the context is reset with ZSTD_reset_parameters or - * ZSTD_reset_session_and_parameters. Prefixes are single-use. + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). + * Dictionaries are sticky, they remain valid when same context is reused, + * they only reset when the context is reset + * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters. + * In contrast, Prefixes are single-use. ******************************************************************************/ @@ -990,8 +1089,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, @@ -1000,11 +1100,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. */ + * to precisely select how dictionary content must be interpreted. + * Note 5 : This method does not benefit from LDM (long distance mode). + * If you want to employ LDM on some large dictionary content, + * prefer employing ZSTD_CCtx_refPrefix() described below. + */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used for all next compressed frames. + * Reference a prepared dictionary, to be used for all future compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -1023,6 +1127,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). + * This method is compatible with LDM (long distance mode). * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -1039,9 +1144,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". @@ -1065,9 +1170,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s * The memory for the table is allocated on the first call to refDDict, and can be * freed with ZSTD_freeDCtx(). * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ @@ -1104,6 +1210,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); +#if defined (__cplusplus) +} +#endif + #endif /* ZSTD_H_235446 */ @@ -1119,6 +1229,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY +#if defined (__cplusplus) +extern "C" { +#endif + /* This can be overridden externally to hide static symbols. */ #ifndef ZSTDLIB_STATIC_API # if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) @@ -1164,6 +1278,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 +#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */ #define ZSTD_OVERLAPLOG_MIN 0 @@ -1187,7 +1302,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) /* Advanced parameter bounds */ -#define ZSTD_TARGETCBLOCKSIZE_MIN 64 +#define ZSTD_TARGETCBLOCKSIZE_MIN 1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */ #define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX #define ZSTD_SRCSIZEHINT_MIN 0 #define ZSTD_SRCSIZEHINT_MAX INT_MAX @@ -1229,7 +1344,7 @@ typedef struct { * * Note: This field is optional. ZSTD_generateSequences() will calculate the value of * 'rep', but repeat offsets do not necessarily need to be calculated from an external - * sequence provider's perspective. For example, ZSTD_compressSequences() does not + * sequence provider perspective. For example, ZSTD_compressSequences() does not * use this 'rep' field at all (as of now). */ } ZSTD_Sequence; @@ -1334,17 +1449,18 @@ typedef enum { } ZSTD_literalCompressionMode_e; typedef enum { - /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final - * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable - * or ZSTD_ps_disable allow for a force enable/disable the feature. + /* Note: This enum controls features which are conditionally beneficial. + * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto), + * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature. */ ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ ZSTD_ps_enable = 1, /* Force-enable the feature */ ZSTD_ps_disable = 2 /* Do not use the feature */ -} ZSTD_paramSwitch_e; +} ZSTD_ParamSwitch_e; +#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */ /*************************************** -* Frame size functions +* Frame header and size functions ***************************************/ /*! ZSTD_findDecompressedSize() : @@ -1386,15 +1502,87 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. + * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e; +#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */ +typedef struct { + unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ + unsigned blockSizeMax; + ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + unsigned headerSize; + unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ + unsigned checksumFlag; + unsigned _reserved1; + unsigned _reserved2; +} ZSTD_FrameHeader; +#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */ + +/*! ZSTD_getFrameHeader() : + * decode Frame Header into `zfhPtr`, or requires larger `srcSize`. + * @return : 0 => header is complete, `zfhPtr` is correctly filled, + * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled, + * or an error code, which can be tested using ZSTD_isError() */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize); +/*! ZSTD_getFrameHeader_advanced() : + * same as ZSTD_getFrameHeader(), + * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); + +/*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ +ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize); + +/*! ZSTD_DECOMPRESS_MARGIN() : + * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + * the compressed frame, compute it from the original size and the blockSizeLog. + * See ZSTD_decompressionMargin() for details. + * + * WARNING: This macro does not support multi-frame input, the input must be a single + * zstd frame. If you need that support use the function, or implement it yourself. + * + * @param originalSize The original uncompressed size of the data. + * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + * Unless you explicitly set the windowLog smaller than + * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + */ +#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \ + ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \ + 4 /* checksum */ + \ + ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \ + (blockSize) /* One block of margin */ \ + )) + typedef enum { - ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ - ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ -} ZSTD_sequenceFormat_e; + ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */ + ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */ +} ZSTD_SequenceFormat_e; +#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */ /*! ZSTD_sequenceBound() : * `srcSize` : size of the input buffer @@ -1406,25 +1594,38 @@ typedef enum { ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize); /*! ZSTD_generateSequences() : + * WARNING: This function is meant for debugging and informational purposes ONLY! + * Its implementation is flawed, and it will be deleted in a future version. + * It is not guaranteed to succeed, as there are several cases where it will give + * up and fail. You should NOT use this function in production code. + * + * This function is deprecated, and will be removed in a future version. + * * Generate sequences using ZSTD_compress2(), given a source buffer. * + * @param zc The compression context to be used for ZSTD_compress2(). Set any + * compression parameters you need on this context. + * @param outSeqs The output sequences buffer of size @p outSeqsSize + * @param outSeqsCapacity The size of the output sequences buffer. + * ZSTD_sequenceBound(srcSize) is an upper bound on the number + * of sequences that can be generated. + * @param src The source buffer to generate sequences from of size @p srcSize. + * @param srcSize The size of the source buffer. + * * Each block will end with a dummy sequence * with offset == 0, matchLength == 0, and litLength == length of last literals. * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) * simply acts as a block delimiter. * - * @zc can be used to insert custom compression params. - * This function invokes ZSTD_compress2(). - * - * The output of this function can be fed into ZSTD_compressSequences() with CCtx - * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters - * @return : number of sequences generated + * @returns The number of sequences generated, necessarily less than + * ZSTD_sequenceBound(srcSize), or an error code that can be checked + * with ZSTD_isError(). */ - +ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()") ZSTDLIB_STATIC_API size_t -ZSTD_generateSequences( ZSTD_CCtx* zc, - ZSTD_Sequence* outSeqs, size_t outSeqsSize, - const void* src, size_t srcSize); +ZSTD_generateSequences(ZSTD_CCtx* zc, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals @@ -1443,7 +1644,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. * @src contains the entire input (not just the literals). * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals - * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) + * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). * The entire source is compressed into a single frame. * * The compression behavior changes based on cctx params. In particular: @@ -1452,11 +1653,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si * the block size derived from the cctx, and sequences may be split. This is the default setting. * * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain - * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. * - * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined - * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for - * specifics regarding offset/matchlength requirements) then the function will bail out and return an error. + * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + * + * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + * specifics regarding offset/matchlength requirements) and then bail out and return an error. * * In addition to the two adjustable experimental params, there are other important cctx params. * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. @@ -1464,15 +1671,42 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md * - * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. - * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, - * and cannot emit an RLE block that disagrees with the repcode history + * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + * and cannot emit an RLE block that disagrees with the repcode history. * @return : final compressed size, or a ZSTD error code. */ ZSTDLIB_STATIC_API size_t -ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize); +ZSTD_compressSequences(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize); + + +/*! ZSTD_compressSequencesAndLiterals() : + * This is a variant of ZSTD_compressSequences() which, + * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + * aka all the literals, already extracted and laid out into a single continuous buffer. + * This can be useful if the process generating the sequences also happens to generate the buffer of literals, + * thus skipping an extraction + caching stage. + * It's a speed optimization, useful when the right conditions are met, + * but it also features the following limitations: + * - Only supports explicit delimiter mode + * - Currently does not support Sequences validation (so input Sequences are trusted) + * - Not compatible with frame checksum, which must be disabled + * - If any block is incompressible, will fail and return an error + * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + * @litBufCapacity must be at least 8 bytes larger than @litSize. + * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. + * @return : final compressed size, or a ZSTD error code. + */ +ZSTDLIB_STATIC_API size_t +ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t nbSequences, + const void* literals, size_t litSize, size_t litBufCapacity, + size_t decompressedSize); /*! ZSTD_writeSkippableFrame() : @@ -1480,8 +1714,8 @@ ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, * * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. - * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so - * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, + * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. * * Returns an error if destination buffer is not large enough, if the source size is not representable * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). @@ -1489,26 +1723,28 @@ ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, * @return : number of bytes written or a ZSTD error. */ ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, unsigned magicVariant); + const void* src, size_t srcSize, + unsigned magicVariant); /*! ZSTD_readSkippableFrame() : - * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer. * - * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, - * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested - * in the magicVariant. + * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. + * This can be NULL if the caller is not interested in the magicVariant. * * Returns an error if destination buffer is not large enough, or if the frame is not skippable. * * @return : number of bytes written or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, - const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, + unsigned* magicVariant, + const void* src, size_t srcSize); /*! ZSTD_isSkippableFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. */ -ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); +ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); @@ -1519,48 +1755,59 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); /*! ZSTD_estimate*() : * These functions make it possible to estimate memory usage * of a future {D,C}Ctx, before its creation. + * This is useful in combination with ZSTD_initStatic(), + * which makes it possible to employ a static buffer for ZSTD_CCtx* state. * * ZSTD_estimateCCtxSize() will provide a memory budget large enough - * for any compression level up to selected one. - * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate - * does not include space for a window buffer. - * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. + * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + * associated with any compression level up to max specified one. * The estimate will assume the input may be arbitrarily large, * which is the worst case. * + * Note that the size estimation is specific for one-shot compression, + * it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + * nor other potential ways of using a ZSTD_CCtx* state. + * * When srcSize can be bound by a known and rather "small" value, - * this fact can be used to provide a tighter estimation - * because the CCtx compression context will need less memory. - * This tighter estimation can be provided by more advanced functions + * this knowledge can be used to provide a tighter budget estimation + * because the ZSTD_CCtx* state will need less memory for small inputs. + * This tighter estimation can be provided by employing more advanced functions * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. * - * Note 2 : only single-threaded compression is supported. + * Note : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. */ -ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : - * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - * It will also consider src size to be arbitrarily "large", which is worst case. + * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + * using any compression level up to the max specified one. + * It will also consider src size to be arbitrarily "large", which is a worst case scenario. * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. * Note : CStream size estimation is only correct for single-threaded compression. - * ZSTD_DStream memory budget depends on window Size. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + * + * ZSTD_DStream memory budget depends on frame's window Size. * This information can be passed manually, using ZSTD_estimateDStreamSize, * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + * Any frame requesting a window size larger than max specified one will be rejected. * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); + * In this case, get total size by adding ZSTD_estimate?DictSize + */ +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize); ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : @@ -1621,11 +1868,18 @@ ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict( typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); typedef void (*ZSTD_freeFunction) (void* opaque, void* address); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; +#if defined(__clang__) && __clang_major__ >= 5 +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +#endif static #ifdef __GNUC__ __attribute__((__unused__)) #endif ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ +#if defined(__clang__) && __clang_major__ >= 5 +#pragma clang diagnostic pop +#endif ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); @@ -1706,6 +1960,27 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * This function never fails (wide contract) */ ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +/*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @p cparams into the working @p cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + * On failure, no parameters are updated. + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams); + +/*! ZSTD_CCtx_setFParams() : + * Set all parameters provided within @p fparams into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams); + +/*! ZSTD_CCtx_setParams() : + * Set all parameters provided within @p params into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params); + /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. @@ -1713,10 +1988,10 @@ ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressio ZSTD_DEPRECATED("use ZSTD_compress2") ZSTDLIB_STATIC_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : * Note : this function is now DEPRECATED. @@ -1784,7 +2059,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 -/* Controlled with ZSTD_paramSwitch_e enum. +/* Controlled with ZSTD_ParamSwitch_e enum. * Default is ZSTD_ps_auto. * Set to ZSTD_ps_disable to never compress literals. * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals @@ -1796,11 +2071,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 -/* Tries to fit compressed block size to be around targetCBlockSize. - * No target when targetCBlockSize == 0. - * There is no guarantee on compressed block size (default:0) */ -#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6 - /* User's best guess of source size. * Hint is not valid when srcSizeHint == 0. * There is no guarantee that hint is close to actual source size, @@ -1930,22 +2200,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo /* ZSTD_c_validateSequences * Default is 0 == disabled. Set to 1 to enable sequence validation. * - * For use with sequence compression API: ZSTD_compressSequences(). - * Designates whether or not we validate sequences provided to ZSTD_compressSequences() + * For use with sequence compression API: ZSTD_compressSequences*(). + * Designates whether or not provided sequences are validated within ZSTD_compressSequences*() * during function execution. * - * Without validation, providing a sequence that does not conform to the zstd spec will cause - * undefined behavior, and may produce a corrupted block. + * When Sequence validation is disabled (default), Sequences are compressed as-is, + * so they must correct, otherwise it would result in a corruption error. * - * With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for + * Sequence validation adds some protection, by ensuring that all values respect boundary conditions. + * If a Sequence is detected invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and * return an error. - * */ #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 -/* ZSTD_c_useBlockSplitter - * Controlled with ZSTD_paramSwitch_e enum. +/* ZSTD_c_blockSplitterLevel + * note: this parameter only influences the first splitter stage, + * which is active before producing the sequences. + * ZSTD_c_splitAfterSequences controls the next splitter stage, + * which is active after sequence production. + * Note that both can be combined. + * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included. + * 0 means "auto", which will select a value depending on current ZSTD_c_strategy. + * 1 means no splitting. + * Then, values from 2 to 6 are sorted in increasing cpu load order. + * + * Note that currently the first block is never split, + * to ensure expansion guarantees in presence of incompressible data. + */ +#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6 +#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20 + +/* ZSTD_c_splitAfterSequences + * This is a stronger splitter algorithm, + * based on actual sequences previously produced by the selected parser. + * It's also slower, and as a consequence, mostly used for high compression levels. + * While the post-splitter does overlap with the pre-splitter, + * both can nonetheless be combined, + * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX, + * resulting in higher compression ratio than just one of them. + * * Default is ZSTD_ps_auto. * Set to ZSTD_ps_disable to never use block splitter. * Set to ZSTD_ps_enable to always use block splitter. @@ -1953,10 +2247,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use * block splitting based on the compression parameters. */ -#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 +#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13 /* ZSTD_c_useRowMatchFinder - * Controlled with ZSTD_paramSwitch_e enum. + * Controlled with ZSTD_ParamSwitch_e enum. * Default is ZSTD_ps_auto. * Set to ZSTD_ps_disable to never use row-based matchfinder. * Set to ZSTD_ps_enable to force usage of row-based matchfinder. @@ -1988,7 +2282,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo #define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 /* ZSTD_c_prefetchCDictTables - * Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto. + * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto. * * In some situations, zstd uses CDict tables in-place rather than copying them * into the working context. (See docs on ZSTD_dictAttachPref_e above for details). @@ -2010,6 +2304,57 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16 +/* ZSTD_c_enableSeqProducerFallback + * Allowed values are 0 (disable) and 1 (enable). The default setting is 0. + * + * Controls whether zstd will fall back to an internal sequence producer if an + * external sequence producer is registered and returns an error code. This fallback + * is block-by-block: the internal sequence producer will only be called for blocks + * where the external sequence producer returns an error code. Fallback parsing will + * follow any other cParam settings, such as compression level, the same as in a + * normal (fully-internal) compression operation. + * + * The user is strongly encouraged to read the full Block-Level Sequence Producer API + * documentation (below) before setting this parameter. */ +#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17 + +/* ZSTD_c_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * This parameter can be used to set an upper bound on the blocksize + * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper + * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make + * compressBound() inaccurate). Only currently meant to be used for testing. + */ +#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18 + +/* ZSTD_c_repcodeResolution + * This parameter only has an effect if ZSTD_c_blockDelimiters is + * set to ZSTD_sf_explicitBlockDelimiters (may change in the future). + * + * This parameter affects how zstd parses external sequences, + * provided via the ZSTD_compressSequences*() API + * or from an external block-level sequence producer. + * + * If set to ZSTD_ps_enable, the library will check for repeated offsets within + * external sequences, even if those repcodes are not explicitly indicated in + * the "rep" field. Note that this is the only way to exploit repcode matches + * while using compressSequences*() or an external sequence producer, since zstd + * currently ignores the "rep" field of external sequences. + * + * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in + * external sequences, regardless of whether the "rep" field has been set. This + * reduces sequence compression overhead by about 25% while sacrificing some + * compression ratio. + * + * The default value is ZSTD_ps_auto, for which the library will enable/disable + * based on compression level (currently: level<10 disables, level>=10 enables). + */ +#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19 +#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */ + + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. @@ -2219,6 +2564,33 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete */ #define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 +/* ZSTD_d_disableHuffmanAssembly + * Set to 1 to disable the Huffman assembly implementation. + * The default value is 0, which allows zstd to use the Huffman assembly + * implementation if available. + * + * This parameter can be used to disable Huffman assembly at runtime. + * If you want to disable it at compile time you can define the macro + * ZSTD_DISABLE_ASM. + */ +#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5 + +/* ZSTD_d_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * Forces the decompressor to reject blocks whose content size is + * larger than the configured maxBlockSize. When maxBlockSize is + * larger than the windowSize, the windowSize is used instead. + * This saves memory on the decoder when you know all blocks are small. + * + * This option is typically used in conjunction with ZSTD_c_maxBlockSize. + * + * WARNING: This causes the decoder to reject otherwise valid frames + * that have block sizes larger than the configured maxBlockSize. + */ +#define ZSTD_d_maxBlockSize ZSTD_d_experimentalParam6 + /*! ZSTD_DCtx_setFormat() : * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). @@ -2288,12 +2660,9 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, int compressionLevel); /*! ZSTD_initCStream_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd parameter and leave the rest as-is. - * for ((param, value) : params) { - * ZSTD_CCtx_setParameter(zcs, param, value); - * } + * ZSTD_CCtx_setParams(zcs, params); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * @@ -2322,12 +2691,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /*! ZSTD_initCStream_usingCDict_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - * for ((fParam, value) : fParams) { - * ZSTD_CCtx_setParameter(zcs, fParam, value); - * } + * ZSTD_CCtx_setFParams(zcs, fParams); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * @@ -2352,7 +2718,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, * explicitly specified. * * start a new frame, using same parameters from previous frame. - * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. + * This is typically useful to skip dictionary loading stage, since it will reuse it in-place. * Note that zcs must be init at least once before using ZSTD_resetCStream(). * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. @@ -2408,8 +2774,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! @@ -2419,8 +2785,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! @@ -2428,18 +2794,202 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * - * re-use decompression parameters from previous init; saves dictionary loading - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * reuse decompression parameters from previous init; saves dictionary loading */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API ********************* + * + * *** OVERVIEW *** + * The Block-Level Sequence Producer API allows users to provide their own custom + * sequence producer which libzstd invokes to process each block. The produced list + * of sequences (literals and matches) is then post-processed by libzstd to produce + * valid compressed blocks. + * + * This block-level offload API is a more granular complement of the existing + * frame-level offload API compressSequences() (introduced in v1.5.1). It offers + * an easier migration story for applications already integrated with libzstd: the + * user application continues to invoke the same compression functions + * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits + * from the specific advantages of the external sequence producer. For example, + * the sequence producer could be tuned to take advantage of known characteristics + * of the input, to offer better speed / ratio, or could leverage hardware + * acceleration not available within libzstd itself. + * + * See contrib/externalSequenceProducer for an example program employing the + * Block-Level Sequence Producer API. + * + * *** USAGE *** + * The user is responsible for implementing a function of type + * ZSTD_sequenceProducer_F. For each block, zstd will pass the following + * arguments to the user-provided function: + * + * - sequenceProducerState: a pointer to a user-managed state for the sequence + * producer. + * + * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer. + * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory + * backing outSeqs is managed by the CCtx. + * + * - src, srcSize: an input buffer for the sequence producer to parse. + * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX. + * + * - dict, dictSize: a history buffer, which may be empty, which the sequence + * producer may reference as it parses the src buffer. Currently, zstd will + * always pass dictSize == 0 into external sequence producers, but this will + * change in the future. + * + * - compressionLevel: a signed integer representing the zstd compression level + * set by the user for the current operation. The sequence producer may choose + * to use this information to change its compression strategy and speed/ratio + * tradeoff. Note: the compression level does not reflect zstd parameters set + * through the advanced API. + * + * - windowSize: a size_t representing the maximum allowed offset for external + * sequences. Note that sequence offsets are sometimes allowed to exceed the + * windowSize if a dictionary is present, see doc/zstd_compression_format.md + * for details. + * + * The user-provided function shall return a size_t representing the number of + * sequences written to outSeqs. This return value will be treated as an error + * code if it is greater than outSeqsCapacity. The return value must be non-zero + * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided + * for convenience, but any value greater than outSeqsCapacity will be treated as + * an error code. + * + * If the user-provided function does not return an error code, the sequences + * written to outSeqs must be a valid parse of the src buffer. Data corruption may + * occur if the parse is not valid. A parse is defined to be valid if the + * following conditions hold: + * - The sum of matchLengths and literalLengths must equal srcSize. + * - All sequences in the parse, except for the final sequence, must have + * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have + * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0. + * - All offsets must respect the windowSize parameter as specified in + * doc/zstd_compression_format.md. + * - If the final sequence has matchLength == 0, it must also have offset == 0. + * + * zstd will only validate these conditions (and fail compression if they do not + * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence + * validation has a performance cost. + * + * If the user-provided function returns an error, zstd will either fall back + * to an internal sequence producer or fail the compression operation. The user can + * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback + * cParam. Fallback compression will follow any other cParam settings, such as + * compression level, the same as in a normal compression operation. + * + * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F + * function by calling + * ZSTD_registerSequenceProducer(cctx, + * sequenceProducerState, + * sequenceProducer) + * This setting will persist until the next parameter reset of the CCtx. + * + * The sequenceProducerState must be initialized by the user before calling + * ZSTD_registerSequenceProducer(). The user is responsible for destroying the + * sequenceProducerState. + * + * *** LIMITATIONS *** + * This API is compatible with all zstd compression APIs which respect advanced parameters. + * However, there are three limitations: + * + * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported. + * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level + * external sequence producer. + * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some + * cases (see its documentation for details). Users must explicitly set + * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external + * sequence producer is registered. + * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default + * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should + * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence + * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog). + * + * Second, history buffers are not currently supported. Concretely, zstd will always pass + * dictSize == 0 to the external sequence producer (for now). This has two implications: + * - Dictionaries are not currently supported. Compression will *not* fail if the user + * references a dictionary, but the dictionary won't have any effect. + * - Stream history is not currently supported. All advanced compression APIs, including + * streaming APIs, work with external sequence producers, but each block is treated as + * an independent chunk without history from previous blocks. + * + * Third, multi-threading within a single compression is not currently supported. In other words, + * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered. + * Multi-threading across compressions is fine: simply create one CCtx per thread. + * + * Long-term, we plan to overcome all three limitations. There is no technical blocker to + * overcoming them. It is purely a question of engineering effort. + */ + +#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1)) + +typedef size_t (*ZSTD_sequenceProducer_F) ( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +/*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ +ZSTDLIB_STATIC_API void +ZSTD_registerSequenceProducer( + ZSTD_CCtx* cctx, + void* sequenceProducerState, + ZSTD_sequenceProducer_F sequenceProducer +); + +/*! ZSTD_CCtxParams_registerSequenceProducer() : + * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + * + * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + * is required, then this function is for you. Otherwise, you probably don't need it. + * + * See tests/zstreamtest.c for example usage. */ +ZSTDLIB_STATIC_API void +ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params* params, + void* sequenceProducerState, + ZSTD_sequenceProducer_F sequenceProducer +); + + /********************************************************************* -* Buffer-less and synchronous inner streaming functions +* Buffer-less and synchronous inner streaming functions (DEPRECATED) +* +* This API is deprecated, and will be removed in a future version. +* It allows streaming (de)compression with user allocated buffers. +* However, it is hard to use, and not as well tested as the rest of +* our API. * -* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. -* But it's also a complex one, with several restrictions, documented below. -* Prefer normal streaming API for an easier experience. +* Please use the normal streaming API instead: ZSTD_compressStream2, +* and ZSTD_decompressStream. +* If there is functionality that you need, but it doesn't provide, +* please open an issue on our GitHub. ********************************************************************* */ /** @@ -2447,7 +2997,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. - ZSTD_CCtx object can be re-used multiple times within successive compression operations. + ZSTD_CCtx object can be reused multiple times within successive compression operations. Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. @@ -2468,19 +3018,24 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. - `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. + `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again. */ /*===== Buffer-less streaming compression functions =====*/ +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_STATIC_API ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.") +ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ @@ -2495,17 +3050,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. - A ZSTD_DCtx object can be re-used multiple times. + A ZSTD_DCtx object can be reused multiple times. First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). - It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, + It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame, such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. As a consequence, check that values remain within valid application range. @@ -2521,7 +3076,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). + which can return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. @@ -2541,7 +3096,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). @@ -2564,27 +3119,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ */ /*===== Buffer-less streaming decompression functions =====*/ -typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; -typedef struct { - unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ - unsigned blockSizeMax; - ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - unsigned headerSize; - unsigned dictID; - unsigned checksumFlag; -} ZSTD_frameHeader; -/*! ZSTD_getFrameHeader() : - * decode Frame Header, or requires larger `srcSize`. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -/*! ZSTD_getFrameHeader_advanced() : - * same as ZSTD_getFrameHeader(), - * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); @@ -2602,12 +3137,36 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +/*! ZSTD_isDeterministicBuild() : + * Returns 1 if the library is built using standard compilation flags, + * and participates in determinism guarantees with other builds of the + * same version. + * If this function returns 0, it means the library was compiled with + * non-standard compilation flags that change the output of the + * compressor. + * This is mainly used for Zstd's determinism test suite, which is only + * run when this function returns 1. + */ +ZSTDLIB_API int ZSTD_isDeterministicBuild(void); + -/* ============================ */ -/** Block level API */ -/* ============================ */ +/* ========================================= */ +/** Block level API (DEPRECATED) */ +/* ========================================= */ /*! + + This API is deprecated in favor of the regular compression API. + You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. + Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. @@ -2634,14 +3193,17 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); */ /*===== Raw zstd block functions =====*/ +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ - -#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ - #if defined (__cplusplus) } #endif + +#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ diff --git a/lib/zstd_errors.h b/lib/zstd_errors.h index 2ec0b0ab168..8ebc95cbb2a 100644 --- a/lib/zstd_errors.h +++ b/lib/zstd_errors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,24 +15,32 @@ extern "C" { #endif -/*===== dependency =====*/ -#include /* size_t */ - - /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDERRORLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE # endif #endif + +#ifndef ZSTDERRORLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDERRORLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE #endif /*-********************************************* @@ -58,14 +66,17 @@ typedef enum { ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, + ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_cannotProduce_uncompressedBlock = 49, ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, @@ -74,18 +85,18 @@ typedef enum { ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, + ZSTD_error_noForwardProgress_destFull = 80, + ZSTD_error_noForwardProgress_inputEmpty = 82, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_sequenceProducer_failed = 106, + ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; -/*! ZSTD_getErrorCode() : - convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, - which can be used to compare with enum list published above */ -ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ diff --git a/programs/.gitignore b/programs/.gitignore index 2d4edbe45b1..42a7e30dc68 100644 --- a/programs/.gitignore +++ b/programs/.gitignore @@ -9,6 +9,8 @@ zstd-small zstd-nolegacy zstd-dictBuilder zstd-dll +zstd_arm64 +zstd_x64 # Object files *.o diff --git a/programs/Makefile b/programs/Makefile index 7a3d3e0d81b..94f2179d028 100644 --- a/programs/Makefile +++ b/programs/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -15,12 +15,11 @@ # zstd-decompress : decompressor-only version of zstd # ########################################################################## -.PHONY: default -default: zstd-release +# default target (when running `make` with no argument) +zstd-release: -LIBZSTD := ../lib - -include $(LIBZSTD)/libzstd.mk +LIBZSTD_MK_DIR = ../lib +include $(LIBZSTD_MK_DIR)/libzstd.mk ifeq ($(shell $(CC) -v 2>&1 | $(GREP) -c "gcc version "), 1) ALIGN_LOOP = -falign-loops=32 @@ -171,7 +170,7 @@ endif zstd : $(BUILD_DIR)/zstd if [ $(BIN_ISDIFFERENT) -eq 1 ]; then \ - cp -f $<$(EXT) $@$(EXT); \ + $(CP) $<$(EXT) $@$(EXT); \ echo zstd build completed; \ else \ echo zstd already built; \ @@ -223,26 +222,33 @@ zstd-noxz : zstd ## zstd-dll: zstd executable linked to dynamic library libzstd (must have same version) .PHONY: zstd-dll -zstd-dll : LDFLAGS+= -L$(LIBZSTD) +zstd-dll : LDFLAGS+= -L$(LIB_BINDIR) zstd-dll : LDLIBS += -lzstd -zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c +zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c pool.c threading.c zstd-dll : zstd ## zstd-pgo: zstd executable optimized with PGO. .PHONY: zstd-pgo +zstd-pgo : LLVM_PROFDATA?=llvm-profdata +zstd-pgo : PROF_GENERATE_FLAGS=-fprofile-generate $(if $(findstring gcc,$(CC)),-fprofile-dir=.) +zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Wno-error=missing-profile -Wno-error=coverage-mismatch) zstd-pgo : - $(MAKE) clean - $(MAKE) zstd MOREFLAGS=-fprofile-generate + $(MAKE) clean HASH_DIR=$(HASH_DIR) + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_GENERATE_FLAGS)" ./zstd -b19i1 $(PROFILE_WITH) ./zstd -b16i1 $(PROFILE_WITH) ./zstd -b9i2 $(PROFILE_WITH) ./zstd -b $(PROFILE_WITH) ./zstd -b7i2 $(PROFILE_WITH) ./zstd -b5 $(PROFILE_WITH) - $(RM) zstd *.o - case $(CC) in *clang*) if ! [ -e default.profdata ]; then llvm-profdata merge -output=default.profdata default*.profraw; fi ;; esac - $(MAKE) zstd MOREFLAGS=-fprofile-use +ifndef BUILD_DIR + $(RM) zstd obj/$(HASH_DIR)/zstd obj/$(HASH_DIR)/*.o +else + $(RM) zstd $(BUILD_DIR)/zstd $(BUILD_DIR)/*.o +endif + case $(CC) in *clang*) if ! [ -e default.profdata ]; then $(LLVM_PROFDATA) merge -output=default.profdata default*.profraw; fi ;; esac + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_USE_FLAGS)" ## zstd-small: minimal target, supporting only zstd compression and decompression. no bench. no legacy. no other format. CLEAN += zstd-small zstd-frugal @@ -265,14 +271,14 @@ zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zst CLEAN += zstdmt zstdmt: zstd - ln -sf zstd zstdmt + $(LN) -sf zstd zstdmt .PHONY: generate_res generate_res: $(RES64_FILE) $(RES32_FILE) ifneq (,$(filter Windows%,$(OS))) RC ?= windres -# http://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable +# https://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable $(RES64_FILE): windres/zstd.rc $(RC) -o $@ -I ../lib -I windres -i $< -O coff -F pe-x86-64 $(RES32_FILE): windres/zstd.rc @@ -339,7 +345,7 @@ include $(wildcard $(DEPFILES)) #----------------------------------------------------------------------------- # make install is validated only for Linux, macOS, BSD, Hurd and Solaris targets #----------------------------------------------------------------------------- -ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX)) +ifneq (,$(filter $(INSTALL_OS_LIST),$(UNAME))) HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= @@ -382,14 +388,14 @@ datarootdir ?= $(PREFIX)/share mandir ?= $(datarootdir)/man man1dir ?= $(mandir)/man1 -ifneq (,$(filter $(UNAME),OpenBSD FreeBSD NetBSD DragonFly SunOS)) +ifneq (,$(filter OpenBSD NetBSD DragonFly SunOS,$(UNAME))) MANDIR ?= $(PREFIX)/man MAN1DIR ?= $(MANDIR)/man1 else MAN1DIR ?= $(man1dir) endif -ifneq (,$(filter $(UNAME),SunOS)) +ifneq (,$(filter SunOS,$(UNAME))) INSTALL ?= ginstall else INSTALL ?= install @@ -408,15 +414,15 @@ install: [ -e $(DESTDIR)$(MAN1DIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(MAN1DIR)/ @echo Installing binaries $(INSTALL_PROGRAM) zstd$(EXT) $(DESTDIR)$(BINDIR)/zstd$(EXT) - ln -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdcat$(EXT) - ln -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/unzstd$(EXT) - ln -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdmt$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdcat$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/unzstd$(EXT) + $(LN) -sf zstd$(EXT) $(DESTDIR)$(BINDIR)/zstdmt$(EXT) $(INSTALL_SCRIPT) zstdless $(DESTDIR)$(BINDIR)/zstdless $(INSTALL_SCRIPT) zstdgrep $(DESTDIR)$(BINDIR)/zstdgrep @echo Installing man pages $(INSTALL_MAN) zstd.1 $(DESTDIR)$(MAN1DIR)/zstd.1 - ln -sf zstd.1 $(DESTDIR)$(MAN1DIR)/zstdcat.1 - ln -sf zstd.1 $(DESTDIR)$(MAN1DIR)/unzstd.1 + $(LN) -sf zstd.1 $(DESTDIR)$(MAN1DIR)/zstdcat.1 + $(LN) -sf zstd.1 $(DESTDIR)$(MAN1DIR)/unzstd.1 $(INSTALL_MAN) zstdgrep.1 $(DESTDIR)$(MAN1DIR)/zstdgrep.1 $(INSTALL_MAN) zstdless.1 $(DESTDIR)$(MAN1DIR)/zstdless.1 @echo zstd installation completed diff --git a/programs/README.md b/programs/README.md index c192b802693..6522b647dd3 100644 --- a/programs/README.md +++ b/programs/README.md @@ -79,6 +79,13 @@ There are however other Makefile targets that create different variations of CLI This can be useful to produce smaller binaries. A corresponding `Makefile` target using this ability is `zstd-compress`. +- __ZSTD_DISPLAY_LEVEL_DEFAULT__ : Controls the default verbosity level of `zstd` output. + The default value is `2`. Lower values (e.g., `1`) reduce output verbosity, + while higher values (e.g., `3`) increase it. + This allows setting preferred verbosity at compile time, + rather than passing `-q` or `-v` flags at runtime. + Example : `CPPFLAGS="-DZSTD_DISPLAY_LEVEL_DEFAULT=1" make` + - __BACKTRACE__ : `zstd` can display a stack backtrace when execution generates a runtime exception. By default, this feature may be degraded/disabled on some platforms unless additional compiler directives are @@ -129,89 +136,131 @@ CLI includes in-memory compression benchmark module for zstd. The benchmark is conducted using given filenames. The files are read into memory and joined together. It makes benchmark more precise as it eliminates I/O overhead. Multiple filenames can be supplied, as multiple parameters, with wildcards, -or names of directories can be used as parameters with `-r` option. +or directory names can be used with `-r` option. +If no file is provided, the benchmark will use a procedurally generated "lorem ipsum" content. The benchmark measures ratio, compressed size, compression and decompression speed. One can select compression levels starting from `-b` and ending with `-e`. The `-i` parameter selects minimal time used for each of tested levels. +The benchmark can also be used to test specific parameters, +such as number of threads (`-T#`), or advanced parameters (`--zstd=#`), or dictionary compression (`-D DICTIONARY`), +and many others available on command for regular compression and decompression. + ### Usage of Command Line Interface The full list of options can be obtained with `-h` or `-H` parameter: ``` -Usage : - zstd [args] [FILE(s)] [-o file] - -FILE : a filename - with no FILE, or when FILE is - , read standard input -Arguments : - -# : # compression level (1-19, default: 3) - -d : decompression - -D DICT: use DICT as Dictionary for compression or decompression - -o file: result stored into `file` (only 1 output file) - -f : overwrite output without prompting, also (de)compress links ---rm : remove source file(s) after successful de/compression - -k : preserve source file(s) (default) - -h/-H : display help/long help and exit - -Advanced arguments : - -V : display Version number and exit - -c : write to standard output (even if it is the console) - -v : verbose mode; specify multiple times to increase verbosity - -q : suppress warnings; specify twice to suppress errors too ---no-progress : do not display the progress counter - -r : operate recursively on directories ---filelist FILE : read list of files to operate upon from FILE ---output-dir-flat DIR : processed files are stored into DIR ---output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure ---[no-]asyncio : use asynchronous IO (default: enabled) ---[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled). If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate). --- : All arguments after "--" are treated as files - -Advanced compression arguments : ---ultra : enable levels beyond 19, up to 22 (requires more memory) ---long[=#]: enable long distance matching with given window log (default: 27) ---fast[=#]: switch to very fast compression levels (default: 1) ---adapt : dynamically adapt compression level to I/O conditions ---patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine - -T# : spawns # compression threads (default: 1, 0==# cores) - -B# : select size of each job (default: 0==automatic) ---single-thread : use a single thread for both I/O and compression (result slightly different than -T1) ---rsyncable : compress using a rsync-friendly method (-B sets block size) ---exclude-compressed: only compress files that are not already compressed ---stream-size=# : specify size of streaming input from `stdin` ---size-hint=# optimize compression parameters for streaming input of approximately this size ---target-compressed-block-size=# : generate compressed block of approximately targeted size ---no-dictID : don't write dictID into header (dictionary compression only) ---[no-]compress-literals : force (un)compressed literals ---format=zstd : compress files to the .zst format (default) ---format=gzip : compress files to the .gz format ---format=xz : compress files to the .xz format ---format=lzma : compress files to the .lzma format ---format=lz4 : compress files to the .lz4 format - -Advanced decompression arguments : - -l : print information about zstd compressed files ---test : test compressed file integrity - -M# : Set a memory usage limit for decompression ---[no-]sparse : sparse mode (default: disabled) - -Dictionary builder : ---train ## : create a dictionary from a training set of files ---train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] : use the cover algorithm with optional args ---train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] : use the fast cover algorithm with optional args ---train-legacy[=s=#] : use the legacy algorithm with selectivity (default: 9) - -o DICT : DICT is dictionary name (default: dictionary) ---maxdict=# : limit dictionary to specified size (default: 112640) ---dictID=# : force dictionary ID to specified value (default: random) - -Benchmark arguments : - -b# : benchmark file(s), using # compression level (default: 3) - -e# : test all compression levels successively from -b# to -e# (default: 1) - -i# : minimum evaluation time in seconds (default: 3s) - -B# : cut file into independent chunks of size # (default: no chunking) - -S : output one benchmark result per input file (default: consolidated result) ---priority=rt : set process priority to real-time +*** Zstandard CLI (64-bit) v1.5.6, by Yann Collet *** + +Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided. + +Usage: zstd [OPTIONS...] [INPUT... | -] [-o OUTPUT] + +Options: + -o OUTPUT Write output to a single file, OUTPUT. + -k, --keep Preserve INPUT file(s). [Default] + --rm Remove INPUT file(s) after successful (de)compression to file. + + -# Desired compression level, where `#` is a number between 1 and 19; + lower numbers provide faster compression, higher numbers yield + better compression ratios. [Default: 3] + + -d, --decompress Perform decompression. + -D DICT Use DICT as the dictionary for compression or decompression. + + -f, --force Disable input and output checks. Allows overwriting existing files, + receiving input from the console, printing output to STDOUT, and + operating on links, block devices, etc. Unrecognized formats will be + passed-through through as-is. + + -h Display short usage and exit. + -H, --help Display full help and exit. + -V, --version Display the program version and exit. + +Advanced options: + -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s). + + -v, --verbose Enable verbose output; pass multiple times to increase verbosity. + -q, --quiet Suppress warnings; pass twice to suppress errors. + --trace LOG Log tracing information to LOG. + + --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed + output to terminal will mix with progress counter text. + + -r Operate recursively on directories. + --filelist LIST Read a list of files to operate on from LIST. + --output-dir-flat DIR Store processed files in DIR. + --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure. + --[no-]asyncio Use asynchronous IO. [Default: Enabled] + + --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate] + If `-d` is present, ignore/validate checksums during decompression. + + -- Treat remaining arguments after `--` as files. + +Advanced compression options: + --ultra Enable levels beyond 19, up to 22; requires more memory. + --fast[=#] Use to very fast compression levels. [Default: 1] + --adapt Dynamically adapt compression level to I/O conditions. + --long[=#] Enable long distance matching with window log #. [Default: 27] + --patch-from=REF Use REF as the reference point for Zstandard's diff engine. + + -T# Spawn # compression threads. [Default: 1; pass 0 for core count.] + --single-thread Share a single thread for I/O and compression (slightly different than `-T1`). + --auto-threads={physical|logical} + Use physical/logical cores when using `-T0`. [Default: Physical] + + -B# Set job size to #. [Default: 0 (automatic)] + --rsyncable Compress using a rsync-friendly method (`-B` sets block size). + + --exclude-compressed Only compress files that are not already compressed. + + --stream-size=# Specify size of streaming input from STDIN. + --size-hint=# Optimize compression parameters for streaming input of approximately size #. + --target-compressed-block-size=# + Generate compressed blocks of approximately # size. + + --no-dictID Don't write `dictID` into the header (dictionary compression only). + --[no-]compress-literals Force (un)compressed literals. + --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for + the 'greedy', 'lazy', and 'lazy2' strategies. + + --format=zstd Compress files to the `.zst` format. [Default] + --[no-]mmap-dict Memory-map dictionary file rather than mallocing and loading all at once + --format=gzip Compress files to the `.gz` format. + --format=xz Compress files to the `.xz` format. + --format=lzma Compress files to the `.lzma` format. + --format=lz4 Compress files to the `.lz4` format. + +Advanced decompression options: + -l Print information about Zstandard-compressed files. + --test Test compressed file integrity. + -M# Set the memory usage limit to # megabytes. + --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.] + --[no-]pass-through Pass through uncompressed files as-is. [Default: Disabled] + +Dictionary builder: + --train Create a dictionary from a training set of files. + + --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] + Use the cover algorithm (with optional arguments). + --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] + Use the fast cover algorithm (with optional arguments). + + --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: 9] + -o NAME Use NAME as dictionary name. [Default: dictionary] + --maxdict=# Limit dictionary to specified size #. [Default: 112640] + --dictID=# Force dictionary ID to #. [Default: Random] + +Benchmark options: + -b# Perform benchmarking with compression level #. [Default: 3] + -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1] + -i# Set the minimum evaluation to time # seconds. [Default: 3] + -B# Cut file into independent chunks of size #. [Default: No chunking] + -S Output one benchmark result per input file. [Default: Consolidated result] + -D dictionary Benchmark using dictionary + --priority=rt Set process priority to real-time. ``` ### Passing parameters through Environment Variables @@ -276,7 +325,7 @@ compression speed (for lower levels) with minimal change in compression ratio. The below table illustrates this on the [Silesia compression corpus]. -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia | Method | Compression ratio | Compression speed | Decompression speed | |:-------|------------------:|------------------:|---------------------:| diff --git a/programs/benchfn.c b/programs/benchfn.c index 1aadbdd9136..3e042cf38f8 100644 --- a/programs/benchfn.c +++ b/programs/benchfn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -108,7 +108,6 @@ static BMK_runOutcome_t BMK_setValid_runTime(BMK_runTime_t runTime) BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p, unsigned nbLoops) { - size_t dstSize = 0; nbLoops += !nbLoops; /* minimum nbLoops is 1 */ /* init */ @@ -118,7 +117,8 @@ BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p, } } /* benchmark */ - { UTIL_time_t const clockStart = UTIL_getTime(); + { size_t dstSize = 0; + UTIL_time_t const clockStart = UTIL_getTime(); unsigned loopNb, blockNb; if (p.initFn != NULL) p.initFn(p.initPayload); for (loopNb = 0; loopNb < nbLoops; loopNb++) { @@ -229,9 +229,9 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->timeSpent_ns += (unsigned long long)loopDuration_ns; /* estimate nbLoops for next run to last approximately 1 second */ - if (loopDuration_ns > (runBudget_ns / 50)) { + if (loopDuration_ns > ((double)runBudget_ns / 50)) { double const fastestRun_ns = MIN(bestRunTime.nanoSecPerRun, newRunTime.nanoSecPerRun); - cont->nbLoops = (unsigned)(runBudget_ns / fastestRun_ns) + 1; + cont->nbLoops = (unsigned)((double)runBudget_ns / fastestRun_ns) + 1; } else { /* previous run was too short : blindly increase workload by x multiplier */ const unsigned multiplier = 10; @@ -239,7 +239,7 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont, cont->nbLoops *= multiplier; } - if(loopDuration_ns < runTimeMin_ns) { + if(loopDuration_ns < (double)runTimeMin_ns) { /* don't report results for which benchmark run time was too small : increased risks of rounding errors */ assert(completed == 0); continue; diff --git a/programs/benchfn.h b/programs/benchfn.h index 99d13ac47c1..3fc6e0d0455 100644 --- a/programs/benchfn.h +++ b/programs/benchfn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -15,17 +15,12 @@ * or detecting and returning an error */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef BENCH_FN_H_23876 #define BENCH_FN_H_23876 /* === Dependencies === */ #include /* size_t */ - /* ==== Benchmark any function, iterated on a set of blocks ==== */ /* BMK_runTime_t: valid result return type */ @@ -175,9 +170,4 @@ typedef union { } BMK_timedFnState_shell; BMK_timedFnState_t* BMK_initStatic_timedFnState(void* buffer, size_t size, unsigned total_ms, unsigned run_ms); - #endif /* BENCH_FN_H_23876 */ - -#if defined (__cplusplus) -} -#endif diff --git a/programs/benchzstd.c b/programs/benchzstd.c index 6ceca020c59..f55c8697504 100644 --- a/programs/benchzstd.c +++ b/programs/benchzstd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,197 +8,285 @@ * You may select, at your option, one of the above-listed licenses. */ - /* ************************************** -* Tuning parameters -****************************************/ -#ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ -# define BMK_TIMETEST_DEFAULT_S 3 + * Tuning parameters + ****************************************/ +#ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ +# define BMK_TIMETEST_DEFAULT_S 3 #endif - /* ************************************* -* Includes -***************************************/ -#include "platform.h" /* Large Files support */ -#include "util.h" /* UTIL_getFileSize, UTIL_sleep */ -#include /* malloc, free */ -#include /* memset, strerror */ -#include /* fprintf, fopen */ -#include -#include /* assert */ + * Includes + ***************************************/ +/* this must be included first */ +#include "platform.h" /* Large Files support, compiler specifics */ -#include "timefn.h" /* UTIL_time_t */ -#include "benchfn.h" +/* then following system includes */ +#include /* assert */ +#include +#include /* fprintf, fopen */ +#include /* malloc, free */ +#include /* memset, strerror */ +#include "util.h" /* UTIL_getFileSize, UTIL_sleep */ #include "../lib/common/mem.h" +#include "benchfn.h" +#include "timefn.h" /* UTIL_time_t */ #ifndef ZSTD_STATIC_LINKING_ONLY -#define ZSTD_STATIC_LINKING_ONLY +# define ZSTD_STATIC_LINKING_ONLY #endif #include "../lib/zstd.h" -#include "datagen.h" /* RDG_genBuffer */ +#include "datagen.h" /* RDG_genBuffer */ +#include "lorem.h" /* LOREM_genBuffer */ #ifndef XXH_INLINE_ALL -#define XXH_INLINE_ALL +# define XXH_INLINE_ALL #endif #include "../lib/common/xxhash.h" -#include "benchzstd.h" #include "../lib/zstd_errors.h" - +#include "benchzstd.h" /* ************************************* -* Constants -***************************************/ + * Constants + ***************************************/ #ifndef ZSTD_GIT_COMMIT -# define ZSTD_GIT_COMMIT_STRING "" +# define ZSTD_GIT_COMMIT_STRING "" #else -# define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT) +# define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT) #endif -#define TIMELOOP_MICROSEC (1*1000000ULL) /* 1 second */ -#define TIMELOOP_NANOSEC (1*1000000000ULL) /* 1 second */ -#define ACTIVEPERIOD_MICROSEC (70*TIMELOOP_MICROSEC) /* 70 seconds */ -#define COOLPERIOD_SEC 10 +#define TIMELOOP_MICROSEC (1 * 1000000ULL) /* 1 second */ +#define TIMELOOP_NANOSEC (1 * 1000000000ULL) /* 1 second */ +#define ACTIVEPERIOD_MICROSEC (70 * TIMELOOP_MICROSEC) /* 70 seconds */ +#define COOLPERIOD_SEC 10 -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) +#define KB *(1 << 10) +#define MB *(1 << 20) +#define GB *(1U << 30) #define BMK_RUNTEST_DEFAULT_MS 1000 -static const size_t maxMemory = (sizeof(size_t)==4) ? - /* 32-bit */ (2 GB - 64 MB) : - /* 64-bit */ (size_t)(1ULL << ((sizeof(size_t)*8)-31)); - +static const size_t maxMemory = (sizeof(size_t) == 4) + ? + /* 32-bit */ (2 GB - 64 MB) + : + /* 64-bit */ (size_t)(1ULL << ((sizeof(size_t) * 8) - 31)); /* ************************************* -* console display -***************************************/ -#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush(NULL); } -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } -/* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ -#define OUTPUT(...) { fprintf(stdout, __VA_ARGS__); fflush(NULL); } -#define OUTPUTLEVEL(l, ...) if (displayLevel>=l) { OUTPUT(__VA_ARGS__); } - + * console display + ***************************************/ +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(NULL); \ + } +#define DISPLAYLEVEL(l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } +/* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + + * progression; 4 : + information */ +#define OUTPUT(...) \ + { \ + fprintf(stdout, __VA_ARGS__); \ + fflush(NULL); \ + } +#define OUTPUTLEVEL(l, ...) \ + if (displayLevel >= l) { \ + OUTPUT(__VA_ARGS__); \ + } /* ************************************* -* Exceptions -***************************************/ + * Exceptions + ***************************************/ #ifndef DEBUG -# define DEBUG 0 +# define DEBUG 0 #endif -#define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); } - -#define RETURN_ERROR_INT(errorNum, ...) { \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", errorNum); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - return errorNum; \ -} +#define DEBUGOUTPUT(...) \ + { \ + if (DEBUG) \ + DISPLAY(__VA_ARGS__); \ + } + +#define RETURN_ERROR_INT(errorNum, ...) \ + { \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "Error %i : ", errorNum); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + return errorNum; \ + } + +#define CHECK_Z(zf) \ + { \ + size_t const zerr = zf; \ + if (ZSTD_isError(zerr)) { \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAY("Error : "); \ + DISPLAY("%s failed : %s", #zf, ZSTD_getErrorName(zerr)); \ + DISPLAY(" \n"); \ + exit(1); \ + } \ + } -#define CHECK_Z(zf) { \ - size_t const zerr = zf; \ - if (ZSTD_isError(zerr)) { \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAY("Error : "); \ - DISPLAY("%s failed : %s", \ - #zf, ZSTD_getErrorName(zerr)); \ - DISPLAY(" \n"); \ - exit(1); \ - } \ +#define RETURN_ERROR(errorNum, retType, ...) \ + { \ + retType r; \ + memset(&r, 0, sizeof(retType)); \ + DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "Error %i : ", errorNum); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + r.tag = errorNum; \ + return r; \ + } + +static size_t uintSize(unsigned value) +{ + size_t size = 1; + while (value >= 10) { + size++; + value /= 10; + } + return size; } -#define RETURN_ERROR(errorNum, retType, ...) { \ - retType r; \ - memset(&r, 0, sizeof(retType)); \ - DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", errorNum); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - r.tag = errorNum; \ - return r; \ +/* Note: presume @buffer is large enough */ +static void writeUint_varLen(char* buffer, size_t capacity, unsigned value) +{ + int endPos = (int)uintSize(value) - 1; + assert(uintSize(value) >= 1); + assert(uintSize(value) < capacity); (void)capacity; + while (endPos >= 0) { + char c = '0' + (char)(value % 10); + buffer[endPos--] = c; + value /= 10; + } } +/* replacement for snprintf(), which is not supported by C89. + * sprintf() would be the supported one, but it's labelled unsafe: + * modern static analyzer will flag sprintf() as dangerous, making it unusable. + * formatString_u() replaces snprintf() for the specific case where there is only one %u argument */ +static int formatString_u(char* buffer, size_t buffer_size, const char* formatString, unsigned int value) +{ + size_t const valueSize = uintSize(value); + size_t written = 0; + int i; + + for (i = 0; formatString[i] != '\0' && written < buffer_size - 1; i++) { + if (formatString[i] != '%') { + buffer[written++] = formatString[i]; + continue; + } + + i++; + if (formatString[i] == 'u') { + if (written + valueSize >= buffer_size) abort(); /* buffer not large enough */ + writeUint_varLen(buffer + written, buffer_size - written, value); + written += valueSize; + } else if (formatString[i] == '%') { /* Check for escaped percent sign */ + buffer[written++] = '%'; + } else { + abort(); /* unsupported format */ + } + } + + if (written < buffer_size) { + buffer[written] = '\0'; + } else { + abort(); /* buffer not large enough */ + } + + return (int)written; +} /* ************************************* -* Benchmark Parameters -***************************************/ + * Benchmark Parameters + ***************************************/ -BMK_advancedParams_t BMK_initAdvancedParams(void) { +BMK_advancedParams_t BMK_initAdvancedParams(void) +{ BMK_advancedParams_t const res = { - BMK_both, /* mode */ + BMK_both, /* mode */ BMK_TIMETEST_DEFAULT_S, /* nbSeconds */ - 0, /* blockSize */ - 0, /* nbWorkers */ - 0, /* realTime */ - 0, /* additionalParam */ - 0, /* ldmFlag */ - 0, /* ldmMinMatch */ - 0, /* ldmHashLog */ - 0, /* ldmBuckSizeLog */ - 0, /* ldmHashRateLog */ - ZSTD_ps_auto, /* literalCompressionMode */ - 0 /* useRowMatchFinder */ + 0, /* chunkSizeMax */ + 0, /* targetCBlockSize */ + 0, /* nbWorkers */ + 0, /* realTime */ + 0, /* additionalParam */ + 0, /* ldmFlag */ + 0, /* ldmMinMatch */ + 0, /* ldmHashLog */ + 0, /* ldmBuckSizeLog */ + 0, /* ldmHashRateLog */ + ZSTD_ps_auto, /* literalCompressionMode */ + 0 /* useRowMatchFinder */ }; return res; } - /* ******************************************************** -* Bench functions -**********************************************************/ -typedef struct { - const void* srcPtr; - size_t srcSize; - void* cPtr; - size_t cRoom; - size_t cSize; - void* resPtr; - size_t resSize; -} blockParam_t; - + * Bench functions + **********************************************************/ #undef MIN #undef MAX -#define MIN(a,b) ((a) < (b) ? (a) : (b)) -#define MAX(a,b) ((a) > (b) ? (a) : (b)) - -static void -BMK_initCCtx(ZSTD_CCtx* ctx, - const void* dictBuffer, size_t dictBufferSize, - int cLevel, - const ZSTD_compressionParameters* comprParams, - const BMK_advancedParams_t* adv) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +static void BMK_initCCtx( + ZSTD_CCtx* ctx, + const void* dictBuffer, + size_t dictBufferSize, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const BMK_advancedParams_t* adv) { ZSTD_CCtx_reset(ctx, ZSTD_reset_session_and_parameters); - if (adv->nbWorkers==1) { + if (adv->nbWorkers == 1) { CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_nbWorkers, 0)); } else { CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_nbWorkers, adv->nbWorkers)); } CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_compressionLevel, cLevel)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_useRowMatchFinder, adv->useRowMatchFinder)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_enableLongDistanceMatching, adv->ldmFlag)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_useRowMatchFinder, adv->useRowMatchFinder)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_enableLongDistanceMatching, adv->ldmFlag)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmMinMatch, adv->ldmMinMatch)); CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmHashLog, adv->ldmHashLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmBucketSizeLog, adv->ldmBucketSizeLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_ldmHashRateLog, adv->ldmHashRateLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_windowLog, (int)comprParams->windowLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_hashLog, (int)comprParams->hashLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_chainLog, (int)comprParams->chainLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_searchLog, (int)comprParams->searchLog)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_minMatch, (int)comprParams->minMatch)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_targetLength, (int)comprParams->targetLength)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_literalCompressionMode, (int)adv->literalCompressionMode)); - CHECK_Z(ZSTD_CCtx_setParameter(ctx, ZSTD_c_strategy, (int)comprParams->strategy)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_ldmBucketSizeLog, adv->ldmBucketSizeLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_ldmHashRateLog, adv->ldmHashRateLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_windowLog, (int)comprParams->windowLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_hashLog, (int)comprParams->hashLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_chainLog, (int)comprParams->chainLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_searchLog, (int)comprParams->searchLog)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_minMatch, (int)comprParams->minMatch)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_targetLength, (int)comprParams->targetLength)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, + ZSTD_c_literalCompressionMode, + (int)adv->literalCompressionMode)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_strategy, (int)comprParams->strategy)); + CHECK_Z(ZSTD_CCtx_setParameter( + ctx, ZSTD_c_targetCBlockSize, (int)adv->targetCBlockSize)); CHECK_Z(ZSTD_CCtx_loadDictionary(ctx, dictBuffer, dictBufferSize)); } -static void BMK_initDCtx(ZSTD_DCtx* dctx, - const void* dictBuffer, size_t dictBufferSize) { +static void +BMK_initDCtx(ZSTD_DCtx* dctx, const void* dictBuffer, size_t dictBufferSize) +{ CHECK_Z(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters)); CHECK_Z(ZSTD_DCtx_loadDictionary(dctx, dictBuffer, dictBufferSize)); } - typedef struct { ZSTD_CCtx* cctx; const void* dictBuffer; @@ -208,9 +296,16 @@ typedef struct { const BMK_advancedParams_t* adv; } BMK_initCCtxArgs; -static size_t local_initCCtx(void* payload) { +static size_t local_initCCtx(void* payload) +{ BMK_initCCtxArgs* ag = (BMK_initCCtxArgs*)payload; - BMK_initCCtx(ag->cctx, ag->dictBuffer, ag->dictBufferSize, ag->cLevel, ag->comprParams, ag->adv); + BMK_initCCtx( + ag->cctx, + ag->dictBuffer, + ag->dictBufferSize, + ag->cLevel, + ag->comprParams, + ag->adv); return 0; } @@ -220,18 +315,20 @@ typedef struct { size_t dictBufferSize; } BMK_initDCtxArgs; -static size_t local_initDCtx(void* payload) { +static size_t local_initDCtx(void* payload) +{ BMK_initDCtxArgs* ag = (BMK_initDCtxArgs*)payload; BMK_initDCtx(ag->dctx, ag->dictBuffer, ag->dictBufferSize); return 0; } - /* `addArgs` is the context */ static size_t local_defaultCompress( - const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstSize, - void* addArgs) + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstSize, + void* addArgs) { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)addArgs; return ZSTD_compress2(cctx, dstBuffer, dstSize, srcBuffer, srcSize); @@ -239,18 +336,24 @@ static size_t local_defaultCompress( /* `addArgs` is the context */ static size_t local_defaultDecompress( - const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstCapacity, - void* addArgs) + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstCapacity, + void* addArgs) { - size_t moreToFlush = 1; + size_t moreToFlush = 1; ZSTD_DCtx* const dctx = (ZSTD_DCtx*)addArgs; ZSTD_inBuffer in; ZSTD_outBuffer out; - in.src = srcBuffer; in.size = srcSize; in.pos = 0; - out.dst = dstBuffer; out.size = dstCapacity; out.pos = 0; + in.src = srcBuffer; + in.size = srcSize; + in.pos = 0; + out.dst = dstBuffer; + out.size = dstCapacity; + out.pos = 0; while (moreToFlush) { - if(out.pos == out.size) { + if (out.pos == out.size) { return (size_t)-ZSTD_error_dstSize_tooSmall; } moreToFlush = ZSTD_decompressStream(dctx, &out, &in); @@ -259,10 +362,8 @@ static size_t local_defaultDecompress( } } return out.pos; - } - /* ================================================================= */ /* Benchmark Zstandard, mem-to-mem scenarios */ /* ================================================================= */ @@ -286,99 +387,146 @@ static BMK_benchOutcome_t BMK_benchOutcome_error(void) return b; } -static BMK_benchOutcome_t BMK_benchOutcome_setValidResult(BMK_benchResult_t result) +static BMK_benchOutcome_t BMK_benchOutcome_setValidResult( + BMK_benchResult_t result) { BMK_benchOutcome_t b; - b.tag = 0; + b.tag = 0; b.internal_never_use_directly = result; return b; } - /* benchMem with no allocation */ -static BMK_benchOutcome_t -BMK_benchMemAdvancedNoAlloc( - const void** srcPtrs, size_t* srcSizes, - void** cPtrs, size_t* cCapacities, size_t* cSizes, - void** resPtrs, size_t* resSizes, - void** resultBufferPtr, void* compressedBuffer, - size_t maxCompressedSize, - BMK_timedFnState_t* timeStateCompress, - BMK_timedFnState_t* timeStateDecompress, - - const void* srcBuffer, size_t srcSize, - const size_t* fileSizes, unsigned nbFiles, - const int cLevel, - const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - ZSTD_CCtx* cctx, ZSTD_DCtx* dctx, - int displayLevel, const char* displayName, - const BMK_advancedParams_t* adv) +static BMK_benchOutcome_t BMK_benchMemAdvancedNoAlloc( + const void** srcPtrs, + size_t* srcSizes, + void** cPtrs, + size_t* cCapacities, + size_t* cSizes, + void** resPtrs, + size_t* resSizes, + void** resultBufferPtr, + void* compressedBuffer, + size_t maxCompressedSize, + BMK_timedFnState_t* timeStateCompress, + BMK_timedFnState_t* timeStateDecompress, + + const void* srcBuffer, + size_t srcSize, + const size_t* fileSizes, + unsigned nbFiles, + const int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + ZSTD_CCtx* cctx, + ZSTD_DCtx* dctx, + int displayLevel, + const char* displayName, + const BMK_advancedParams_t* adv) { - size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize); /* avoid div by 0 */ + size_t const chunkSizeMax = + ((adv->chunkSizeMax >= 32 && (adv->mode != BMK_decodeOnly)) + ? adv->chunkSizeMax + : srcSize) + + (!srcSize); /* avoid div by 0 */ BMK_benchResult_t benchResult; size_t const loadedCompressedSize = srcSize; - size_t cSize = 0; - double ratio = 0.; - U32 nbBlocks; + size_t cSize = 0; + double ratio = 0.; + U32 nbChunks = 0; - assert(cctx != NULL); assert(dctx != NULL); + assert(cctx != NULL); + assert(dctx != NULL); /* init */ memset(&benchResult, 0, sizeof(benchResult)); - if (strlen(displayName)>17) displayName += strlen(displayName) - 17; /* display last 17 characters */ - if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */ + if (strlen(displayName) > 17) + displayName += + strlen(displayName) - 17; /* display last 17 characters */ + if (adv->mode == BMK_decodeOnly) { + /* benchmark only decompression : source must be already compressed */ const char* srcPtr = (const char*)srcBuffer; - U64 totalDSize64 = 0; + U64 totalDSize64 = 0; U32 fileNb; - for (fileNb=0; fileNb decodedSize) { /* size_t overflow */ + RETURN_ERROR( + 32, + BMK_benchOutcome_t, + "decompressed size is too large for local system"); + } *resultBufferPtr = malloc(decodedSize); if (!(*resultBufferPtr)) { - RETURN_ERROR(33, BMK_benchOutcome_t, "not enough memory"); - } - if (totalDSize64 > decodedSize) { /* size_t overflow */ - free(*resultBufferPtr); - RETURN_ERROR(32, BMK_benchOutcome_t, "original size is too large"); + RETURN_ERROR( + 33, + BMK_benchOutcome_t, + "allocation error: not enough memory"); } - cSize = srcSize; + cSize = srcSize; srcSize = decodedSize; - ratio = (double)srcSize / (double)cSize; + ratio = (double)srcSize / (double)cSize; } } - /* Init data blocks */ - { const char* srcPtr = (const char*)srcBuffer; - char* cPtr = (char*)compressedBuffer; - char* resPtr = (char*)(*resultBufferPtr); - U32 fileNb; - for (nbBlocks=0, fileNb=0; fileNbmode == BMK_decodeOnly) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize); - U32 const blockEnd = nbBlocks + nbBlocksforThisFile; - for ( ; nbBlocksmode == BMK_decodeOnly) ? thisBlockSize : ZSTD_compressBound(thisBlockSize); - resPtrs[nbBlocks] = resPtr; - resSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize; - srcPtr += thisBlockSize; - cPtr += cCapacities[nbBlocks]; - resPtr += thisBlockSize; - remaining -= thisBlockSize; + /* Init data chunks */ + { + const char* srcPtr = (const char*)srcBuffer; + char* cPtr = (char*)compressedBuffer; + char* resPtr = (char*)(*resultBufferPtr); + U32 fileNb, chunkID; + for (chunkID = 0, fileNb = 0; fileNb < nbFiles; fileNb++) { + size_t remaining = fileSizes[fileNb]; + U32 const nbChunksforThisFile = (adv->mode == BMK_decodeOnly) + ? 1 + : (U32)((remaining + (chunkSizeMax - 1)) / chunkSizeMax); + U32 const chunkIdEnd = chunkID + nbChunksforThisFile; + for (; chunkID < chunkIdEnd; chunkID++) { + size_t const chunkSize = MIN(remaining, chunkSizeMax); + srcPtrs[chunkID] = srcPtr; + srcSizes[chunkID] = chunkSize; + cPtrs[chunkID] = cPtr; + cCapacities[chunkID] = (adv->mode == BMK_decodeOnly) + ? chunkSize + : ZSTD_compressBound(chunkSize); + resPtrs[chunkID] = resPtr; + resSizes[chunkID] = (adv->mode == BMK_decodeOnly) + ? (size_t)ZSTD_findDecompressedSize( + srcPtr, chunkSize) + : chunkSize; + srcPtr += chunkSize; + cPtr += cCapacities[chunkID]; + resPtr += chunkSize; + remaining -= chunkSize; if (adv->mode == BMK_decodeOnly) { - cSizes[nbBlocks] = thisBlockSize; - benchResult.cSize = thisBlockSize; - } } } } + cSizes[chunkID] = chunkSize; + benchResult.cSize = chunkSize; + } + } + } + nbChunks = chunkID; + } /* warming up `compressedBuffer` */ if (adv->mode == BMK_decodeOnly) { @@ -387,240 +535,328 @@ BMK_benchMemAdvancedNoAlloc( RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); } -#if defined(UTIL_TIME_USES_C90_CLOCK) - if (adv->nbWorkers > 1) { - OUTPUTLEVEL(2, "Warning : time measurements restricted to C90 clock_t. \n") - OUTPUTLEVEL(2, "Warning : using C90 clock_t leads to incorrect measurements in multithreading mode. \n") + if (!UTIL_support_MT_measurements() && adv->nbWorkers > 1) { + OUTPUTLEVEL( + 2, + "Warning : time measurements may be incorrect in multithreading mode... \n") } -#endif /* Bench */ - { U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0); -# define NB_MARKS 4 + { + U64 const crcOrig = (adv->mode == BMK_decodeOnly) + ? 0 + : XXH64(srcBuffer, srcSize, 0); +#define NB_MARKS 4 const char* marks[NB_MARKS] = { " |", " /", " =", " \\" }; - U32 markNb = 0; - int compressionCompleted = (adv->mode == BMK_decodeOnly); - int decompressionCompleted = (adv->mode == BMK_compressOnly); + U32 markNb = 0; + int compressionCompleted = (adv->mode == BMK_decodeOnly); + int decompressionCompleted = (adv->mode == BMK_compressOnly); BMK_benchParams_t cbp, dbp; BMK_initCCtxArgs cctxprep; BMK_initDCtxArgs dctxprep; - cbp.benchFn = local_defaultCompress; /* ZSTD_compress2 */ - cbp.benchPayload = cctx; - cbp.initFn = local_initCCtx; /* BMK_initCCtx */ - cbp.initPayload = &cctxprep; - cbp.errorFn = ZSTD_isError; - cbp.blockCount = nbBlocks; - cbp.srcBuffers = srcPtrs; - cbp.srcSizes = srcSizes; - cbp.dstBuffers = cPtrs; + cbp.benchFn = local_defaultCompress; /* ZSTD_compress2 */ + cbp.benchPayload = cctx; + cbp.initFn = local_initCCtx; /* BMK_initCCtx */ + cbp.initPayload = &cctxprep; + cbp.errorFn = ZSTD_isError; + cbp.blockCount = nbChunks; + cbp.srcBuffers = srcPtrs; + cbp.srcSizes = srcSizes; + cbp.dstBuffers = cPtrs; cbp.dstCapacities = cCapacities; - cbp.blockResults = cSizes; + cbp.blockResults = cSizes; - cctxprep.cctx = cctx; - cctxprep.dictBuffer = dictBuffer; + cctxprep.cctx = cctx; + cctxprep.dictBuffer = dictBuffer; cctxprep.dictBufferSize = dictBufferSize; - cctxprep.cLevel = cLevel; - cctxprep.comprParams = comprParams; - cctxprep.adv = adv; - - dbp.benchFn = local_defaultDecompress; - dbp.benchPayload = dctx; - dbp.initFn = local_initDCtx; - dbp.initPayload = &dctxprep; - dbp.errorFn = ZSTD_isError; - dbp.blockCount = nbBlocks; - dbp.srcBuffers = (const void* const *) cPtrs; - dbp.srcSizes = cSizes; - dbp.dstBuffers = resPtrs; + cctxprep.cLevel = cLevel; + cctxprep.comprParams = comprParams; + cctxprep.adv = adv; + + dbp.benchFn = local_defaultDecompress; + dbp.benchPayload = dctx; + dbp.initFn = local_initDCtx; + dbp.initPayload = &dctxprep; + dbp.errorFn = ZSTD_isError; + dbp.blockCount = nbChunks; + dbp.srcBuffers = (const void* const*)cPtrs; + dbp.srcSizes = cSizes; + dbp.dstBuffers = resPtrs; dbp.dstCapacities = resSizes; - dbp.blockResults = NULL; + dbp.blockResults = NULL; - dctxprep.dctx = dctx; - dctxprep.dictBuffer = dictBuffer; + dctxprep.dctx = dctx; + dctxprep.dictBuffer = dictBuffer; dctxprep.dictBufferSize = dictBufferSize; - OUTPUTLEVEL(2, "\r%70s\r", ""); /* blank line */ + OUTPUTLEVEL(2, "\r%70s\r", ""); /* blank line */ assert(srcSize < UINT_MAX); - OUTPUTLEVEL(2, "%2s-%-17.17s :%10u -> \r", marks[markNb], displayName, (unsigned)srcSize); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u -> \r", + marks[markNb], + displayName, + (unsigned)srcSize); while (!(compressionCompleted && decompressionCompleted)) { if (!compressionCompleted) { - BMK_runOutcome_t const cOutcome = BMK_benchTimedFn( timeStateCompress, cbp); + BMK_runOutcome_t const cOutcome = + BMK_benchTimedFn(timeStateCompress, cbp); if (!BMK_isSuccessful_runOutcome(cOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "compression error"); } - { BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); - cSize = cResult.sumOfReturn; + { + BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); + cSize = cResult.sumOfReturn; ratio = (double)srcSize / (double)cSize; - { BMK_benchResult_t newResult; - newResult.cSpeed = (U64)((double)srcSize * TIMELOOP_NANOSEC / cResult.nanoSecPerRun); + { + BMK_benchResult_t newResult; + newResult.cSpeed = + (U64)((double)srcSize * TIMELOOP_NANOSEC + / cResult.nanoSecPerRun); benchResult.cSize = cSize; if (newResult.cSpeed > benchResult.cSpeed) benchResult.cSpeed = newResult.cSpeed; - } } + } + } - { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; + { + int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.); assert(cSize < UINT_MAX); - OUTPUTLEVEL(2, "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s \r", - marks[markNb], displayName, - (unsigned)srcSize, (unsigned)cSize, - ratioAccuracy, ratio, - benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s \r", + marks[markNb], + displayName, + (unsigned)srcSize, + (unsigned)cSize, + ratioDigits, + ratio, + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, + (double)benchResult.cSpeed / MB_UNIT); } - compressionCompleted = BMK_isCompleted_TimedFn(timeStateCompress); + compressionCompleted = + BMK_isCompleted_TimedFn(timeStateCompress); } - if(!decompressionCompleted) { - BMK_runOutcome_t const dOutcome = BMK_benchTimedFn(timeStateDecompress, dbp); + if (!decompressionCompleted) { + BMK_runOutcome_t const dOutcome = + BMK_benchTimedFn(timeStateDecompress, dbp); - if(!BMK_isSuccessful_runOutcome(dOutcome)) { - return BMK_benchOutcome_error(); + if (!BMK_isSuccessful_runOutcome(dOutcome)) { + RETURN_ERROR(30, BMK_benchOutcome_t, "decompression error"); } - { BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); - U64 const newDSpeed = (U64)((double)srcSize * TIMELOOP_NANOSEC / dResult.nanoSecPerRun); + { + BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); + U64 const newDSpeed = + (U64)((double)srcSize * TIMELOOP_NANOSEC + / dResult.nanoSecPerRun); if (newDSpeed > benchResult.dSpeed) benchResult.dSpeed = newDSpeed; } - { int const ratioAccuracy = (ratio < 10.) ? 3 : 2; - OUTPUTLEVEL(2, "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s, %6.1f MB/s\r", - marks[markNb], displayName, - (unsigned)srcSize, (unsigned)cSize, - ratioAccuracy, ratio, - benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT, + { + int const ratioDigits = 1 + (ratio < 100.) + (ratio < 10.); + OUTPUTLEVEL( + 2, + "%2s-%-17.17s :%10u ->%10u (x%5.*f), %6.*f MB/s, %6.1f MB/s\r", + marks[markNb], + displayName, + (unsigned)srcSize, + (unsigned)cSize, + ratioDigits, + ratio, + benchResult.cSpeed < (10 * MB_UNIT) ? 2 : 1, + (double)benchResult.cSpeed / MB_UNIT, (double)benchResult.dSpeed / MB_UNIT); } - decompressionCompleted = BMK_isCompleted_TimedFn(timeStateDecompress); + decompressionCompleted = + BMK_isCompleted_TimedFn(timeStateDecompress); } - markNb = (markNb+1) % NB_MARKS; - } /* while (!(compressionCompleted && decompressionCompleted)) */ + markNb = (markNb + 1) % NB_MARKS; + } /* while (!(compressionCompleted && decompressionCompleted)) */ /* CRC Checking */ { const BYTE* resultBuffer = (const BYTE*)(*resultBufferPtr); - U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); - if ((adv->mode == BMK_both) && (crcOrig!=crcCheck)) { + U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); + if ((adv->mode == BMK_both) && (crcOrig != crcCheck)) { size_t u; DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", - displayName, (unsigned)crcOrig, (unsigned)crcCheck); - for (u=0; u u) break; + for (segNb = 0; segNb < nbChunks; segNb++) { + if (bacc + srcSizes[segNb] > u) + break; bacc += srcSizes[segNb]; } pos = (U32)(u - bacc); bNb = pos / (128 KB); - DISPLAY("(sample %u, block %u, pos %u) \n", segNb, bNb, pos); - { size_t const lowest = (u>5) ? 5 : u; + DISPLAY("(sample %u, chunk %u, pos %u) \n", + segNb, + bNb, + pos); + { + size_t const lowest = (u > 5) ? 5 : u; size_t n; DISPLAY("origin: "); - for (n=lowest; n>0; n--) - DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u-n]); + for (n = lowest; n > 0; n--) + DISPLAY("%02X ", + ((const BYTE*)srcBuffer)[u - n]); DISPLAY(" :%02X: ", ((const BYTE*)srcBuffer)[u]); - for (n=1; n<3; n++) - DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u+n]); + for (n = 1; n < 3; n++) + DISPLAY("%02X ", + ((const BYTE*)srcBuffer)[u + n]); DISPLAY(" \n"); DISPLAY("decode: "); - for (n=lowest; n>0; n--) - DISPLAY("%02X ", resultBuffer[u-n]); + for (n = lowest; n > 0; n--) + DISPLAY("%02X ", resultBuffer[u - n]); DISPLAY(" :%02X: ", resultBuffer[u]); - for (n=1; n<3; n++) - DISPLAY("%02X ", resultBuffer[u+n]); + for (n = 1; n < 3; n++) + DISPLAY("%02X ", resultBuffer[u + n]); DISPLAY(" \n"); } break; } - if (u==srcSize-1) { /* should never happen */ + if (u == srcSize - 1) { /* should never happen */ DISPLAY("no difference detected\n"); } - } /* for (u=0; umode == BMK_both) && (crcOrig!=crcCheck)) */ - } /* CRC Checking */ + } /* for (u=0; umode == BMK_both) && (crcOrig!=crcCheck)) */ + } /* CRC Checking */ - if (displayLevel == 1) { /* hidden display mode -q, used by python speed benchmark */ + if (displayLevel + == 1) { /* hidden display mode -q, used by python speed benchmark */ double const cSpeed = (double)benchResult.cSpeed / MB_UNIT; double const dSpeed = (double)benchResult.dSpeed / MB_UNIT; if (adv->additionalParam) { - OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, adv->additionalParam); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", + cLevel, + (int)cSize, + ratio, + cSpeed, + dSpeed, + displayName, + adv->additionalParam); } else { - OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); + OUTPUT("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", + cLevel, + (int)cSize, + ratio, + cSpeed, + dSpeed, + displayName); } } OUTPUTLEVEL(2, "%2i#\n", cLevel); - } /* Bench */ + } /* Bench */ - benchResult.cMem = (1ULL << (comprParams->windowLog)) + ZSTD_sizeof_CCtx(cctx); + benchResult.cMem = + (1ULL << (comprParams->windowLog)) + ZSTD_sizeof_CCtx(cctx); return BMK_benchOutcome_setValidResult(benchResult); } -BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, - void* dstBuffer, size_t dstCapacity, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName, const BMK_advancedParams_t* adv) +BMK_benchOutcome_t BMK_benchMemAdvanced( + const void* srcBuffer, + size_t srcSize, + void* dstBuffer, + size_t dstCapacity, + const size_t* fileSizes, + unsigned nbFiles, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName, + const BMK_advancedParams_t* adv) { - int const dstParamsError = !dstBuffer ^ !dstCapacity; /* must be both NULL or none */ + int const dstParamsError = + !dstBuffer ^ !dstCapacity; /* must be both NULL or none */ - size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ; - U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles; + size_t const chunkSize = + ((adv->chunkSizeMax >= 32 && (adv->mode != BMK_decodeOnly)) + ? adv->chunkSizeMax + : srcSize) + + (!srcSize) /* avoid div by 0 */; + U32 const nbChunksMax = + (U32)((srcSize + (chunkSize - 1)) / chunkSize) + nbFiles; - /* these are the blockTable parameters, just split up */ - const void ** const srcPtrs = (const void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const srcSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); + const void** const srcPtrs = + (const void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const srcSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); + void** const cPtrs = (void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const cSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); + size_t* const cCapacities = (size_t*)malloc(nbChunksMax * sizeof(size_t)); - void ** const cPtrs = (void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const cSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); - size_t* const cCapacities = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); + void** const resPtrs = (void**)malloc(nbChunksMax * sizeof(void*)); + size_t* const resSizes = (size_t*)malloc(nbChunksMax * sizeof(size_t)); - void ** const resPtrs = (void**)malloc(maxNbBlocks * sizeof(void*)); - size_t* const resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t)); - - BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); - BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); + BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState( + adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); + BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState( + adv->nbSeconds * 1000, BMK_RUNTEST_DEFAULT_MS); ZSTD_CCtx* const cctx = ZSTD_createCCtx(); ZSTD_DCtx* const dctx = ZSTD_createDCtx(); - const size_t maxCompressedSize = dstCapacity ? dstCapacity : ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); + const size_t maxCompressedSize = dstCapacity + ? dstCapacity + : ZSTD_compressBound(srcSize) + (nbChunksMax * 1024); - void* const internalDstBuffer = dstBuffer ? NULL : malloc(maxCompressedSize); + void* const internalDstBuffer = + dstBuffer ? NULL : malloc(maxCompressedSize); void* const compressedBuffer = dstBuffer ? dstBuffer : internalDstBuffer; - BMK_benchOutcome_t outcome = BMK_benchOutcome_error(); /* error by default */ + BMK_benchOutcome_t outcome = + BMK_benchOutcome_error(); /* error by default */ void* resultBuffer = srcSize ? malloc(srcSize) : NULL; - int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || - !cSizes || !cCapacities || !resPtrs || !resSizes || - !timeStateCompress || !timeStateDecompress || - !cctx || !dctx || - !compressedBuffer || !resultBuffer; - + int const allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || !cSizes + || !cCapacities || !resPtrs || !resSizes || !timeStateCompress + || !timeStateDecompress || !cctx || !dctx || !compressedBuffer + || !resultBuffer; if (!allocationincomplete && !dstParamsError) { - outcome = BMK_benchMemAdvancedNoAlloc(srcPtrs, srcSizes, - cPtrs, cCapacities, cSizes, - resPtrs, resSizes, - &resultBuffer, - compressedBuffer, maxCompressedSize, - timeStateCompress, timeStateDecompress, - srcBuffer, srcSize, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - cctx, dctx, - displayLevel, displayName, adv); + outcome = BMK_benchMemAdvancedNoAlloc( + srcPtrs, + srcSizes, + cPtrs, + cCapacities, + cSizes, + resPtrs, + resSizes, + &resultBuffer, + compressedBuffer, + maxCompressedSize, + timeStateCompress, + timeStateDecompress, + srcBuffer, + srcSize, + fileSizes, + nbFiles, + cLevel, + comprParams, + dictBuffer, + dictBufferSize, + cctx, + dctx, + displayLevel, + displayName, + adv); } /* clean up */ @@ -641,104 +877,167 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, free(resPtrs); free(resSizes); - if(allocationincomplete) { - RETURN_ERROR(31, BMK_benchOutcome_t, "allocation error : not enough memory"); + if (allocationincomplete) { + RETURN_ERROR( + 31, BMK_benchOutcome_t, "allocation error : not enough memory"); } - if(dstParamsError) { + if (dstParamsError) { RETURN_ERROR(32, BMK_benchOutcome_t, "Dst parameters not coherent"); } return outcome; } -BMK_benchOutcome_t BMK_benchMem(const void* srcBuffer, size_t srcSize, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName) { - +BMK_benchOutcome_t BMK_benchMem( + const void* srcBuffer, + size_t srcSize, + const size_t* fileSizes, + unsigned nbFiles, + int cLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName) +{ BMK_advancedParams_t const adv = BMK_initAdvancedParams(); - return BMK_benchMemAdvanced(srcBuffer, srcSize, - NULL, 0, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, &adv); + return BMK_benchMemAdvanced( + srcBuffer, + srcSize, + NULL, + 0, + fileSizes, + nbFiles, + cLevel, + comprParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + &adv); } -static BMK_benchOutcome_t BMK_benchCLevel(const void* srcBuffer, size_t benchedSize, - const size_t* fileSizes, unsigned nbFiles, - int cLevel, const ZSTD_compressionParameters* comprParams, - const void* dictBuffer, size_t dictBufferSize, - int displayLevel, const char* displayName, - BMK_advancedParams_t const * const adv) +/* @return: 0 on success, !0 if error */ +static int BMK_benchCLevels( + const void* srcBuffer, + size_t benchedSize, + const size_t* fileSizes, + unsigned nbFiles, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* comprParams, + const void* dictBuffer, + size_t dictBufferSize, + int displayLevel, + const char* displayName, + BMK_advancedParams_t const* const adv) { + int level; const char* pch = strrchr(displayName, '\\'); /* Windows */ - if (!pch) pch = strrchr(displayName, '/'); /* Linux */ - if (pch) displayName = pch+1; + if (!pch) + pch = strrchr(displayName, '/'); /* Linux */ + if (pch) + displayName = pch + 1; + + if (endCLevel > ZSTD_maxCLevel()) { + DISPLAYLEVEL(1, "Invalid Compression Level \n"); + return 15; + } + if (endCLevel < startCLevel) { + DISPLAYLEVEL(1, "Invalid Compression Level Range \n"); + return 15; + } if (adv->realTime) { DISPLAYLEVEL(2, "Note : switching to real-time priority \n"); SET_REALTIME_PRIORITY; } - if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ - OUTPUT("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", - ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, - (unsigned)benchedSize, adv->nbSeconds, (unsigned)(adv->blockSize>>10)); - - return BMK_benchMemAdvanced(srcBuffer, benchedSize, - NULL, 0, - fileSizes, nbFiles, - cLevel, comprParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, adv); + if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ + OUTPUT("bench %s %s: input %u bytes, %u seconds, %u KB chunks\n", + ZSTD_VERSION_STRING, + ZSTD_GIT_COMMIT_STRING, + (unsigned)benchedSize, + adv->nbSeconds, + (unsigned)(adv->chunkSizeMax >> 10)); + + for (level = startCLevel; level <= endCLevel; level++) { + BMK_benchOutcome_t res = BMK_benchMemAdvanced( + srcBuffer, + benchedSize, + NULL, + 0, + fileSizes, + nbFiles, + level, + comprParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + adv); + if (!BMK_isSuccessful_benchOutcome(res)) return 1; + } + return 0; } -BMK_benchOutcome_t BMK_syntheticTest(int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv) +int BMK_syntheticTest( + double compressibility, + int startingCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, + const BMK_advancedParams_t* adv) { - char name[20] = {0}; - size_t const benchedSize = 10000000; - void* srcBuffer; - BMK_benchOutcome_t res; - - if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); - } + char nameBuff[20] = { 0 }; + const char* name = nameBuff; + size_t const benchedSize = adv->chunkSizeMax ? adv->chunkSizeMax : 10000000; /* Memory allocation */ - srcBuffer = malloc(benchedSize); - if (!srcBuffer) RETURN_ERROR(21, BMK_benchOutcome_t, "not enough memory"); + void* const srcBuffer = malloc(benchedSize); + if (!srcBuffer) { + DISPLAYLEVEL(1, "allocation error : not enough memory \n"); + return 16; + } /* Fill input buffer */ - RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); + if (compressibility < 0.0) { + LOREM_genBuffer(srcBuffer, benchedSize, 0); + name = "Lorem ipsum"; + } else { + RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); + formatString_u( + nameBuff, + sizeof(nameBuff), + "Synthetic %u%%", + (unsigned)(compressibility * 100)); + } /* Bench */ - snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100)); - res = BMK_benchCLevel(srcBuffer, benchedSize, - &benchedSize /* ? */, 1 /* ? */, - cLevel, compressionParams, - NULL, 0, /* dictionary */ - displayLevel, name, adv); - - /* clean up */ - free(srcBuffer); - - return res; + { int res = BMK_benchCLevels( + srcBuffer, + benchedSize, + &benchedSize, + 1, + startingCLevel, endCLevel, + compressionParams, + NULL, + 0, /* dictionary */ + displayLevel, + name, + adv); + free(srcBuffer); + return res; + } } - - static size_t BMK_findMaxMem(U64 requiredMem) { size_t const step = 64 MB; - BYTE* testmem = NULL; + BYTE* testmem = NULL; requiredMem = (((requiredMem >> 26) + 1) << 26); requiredMem += step; - if (requiredMem > maxMemory) requiredMem = maxMemory; + if (requiredMem > maxMemory) + requiredMem = maxMemory; do { testmem = (BYTE*)malloc((size_t)requiredMem); @@ -752,133 +1051,197 @@ static size_t BMK_findMaxMem(U64 requiredMem) /*! BMK_loadFiles() : * Loads `buffer` with content of files listed within `fileNamesTable`. * At most, fills `buffer` entirely. */ -static int BMK_loadFiles(void* buffer, size_t bufferSize, - size_t* fileSizes, - const char* const * fileNamesTable, unsigned nbFiles, - int displayLevel) +static int BMK_loadFiles( + void* buffer, + size_t bufferSize, + size_t* fileSizes, + const char* const* fileNamesTable, + unsigned nbFiles, + int displayLevel) { size_t pos = 0, totalSize = 0; unsigned n; - for (n=0; n bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */ - { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); - if (readSize != (size_t)fileSize) RETURN_ERROR_INT(11, "could not read %s", fileNamesTable[n]); + if (fileSize > bufferSize - pos) { + /* buffer too small - limit quantity loaded */ + fileSize = bufferSize - pos; + nbFiles = n; /* stop after this file */ + } + + { FILE* const f = fopen(filename, "rb"); + if (f == NULL) { + RETURN_ERROR_INT( + 10, "cannot open file %s", filename); + } + OUTPUTLEVEL(2, "Loading %s... \r", filename); + { size_t const readSize = + fread(((char*)buffer) + pos, 1, (size_t)fileSize, f); + if (readSize != (size_t)fileSize) { + fclose(f); + RETURN_ERROR_INT( + 11, "invalid read %s", filename); + } pos += readSize; } fileSizes[n] = (size_t)fileSize; totalSize += (size_t)fileSize; fclose(f); - } } + } + } - if (totalSize == 0) RETURN_ERROR_INT(12, "no data to bench"); + if (totalSize == 0) + RETURN_ERROR_INT(12, "no data to bench"); return 0; } -BMK_benchOutcome_t BMK_benchFilesAdvanced( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, int cLevel, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv) +int BMK_benchFilesAdvanced( + const char* const* fileNamesTable, + unsigned nbFiles, + const char* dictFileName, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, + const BMK_advancedParams_t* adv) { void* srcBuffer = NULL; size_t benchedSize; - void* dictBuffer = NULL; + void* dictBuffer = NULL; size_t dictBufferSize = 0; - size_t* fileSizes = NULL; - BMK_benchOutcome_t res; + size_t* fileSizes = NULL; + int res = 1; U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); if (!nbFiles) { - RETURN_ERROR(14, BMK_benchOutcome_t, "No Files to Benchmark"); + DISPLAYLEVEL(1, "No Files to Benchmark"); + return 13; } - if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); + if (endCLevel > ZSTD_maxCLevel()) { + DISPLAYLEVEL(1, "Invalid Compression Level"); + return 14; } if (totalSizeToLoad == UTIL_FILESIZE_UNKNOWN) { - RETURN_ERROR(9, BMK_benchOutcome_t, "Error loading files"); + DISPLAYLEVEL(1, "Error loading files"); + return 15; } fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t)); - if (!fileSizes) RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory for fileSizes"); + if (!fileSizes) { + DISPLAYLEVEL(1, "not enough memory for fileSizes"); + return 16; + } /* Load dictionary */ if (dictFileName != NULL) { U64 const dictFileSize = UTIL_getFileSize(dictFileName); if (dictFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYLEVEL(1, "error loading %s : %s \n", dictFileName, strerror(errno)); + DISPLAYLEVEL( + 1, + "error loading %s : %s \n", + dictFileName, + strerror(errno)); free(fileSizes); - RETURN_ERROR(9, BMK_benchOutcome_t, "benchmark aborted"); + DISPLAYLEVEL(1, "benchmark aborted"); + return 17; } if (dictFileSize > 64 MB) { free(fileSizes); - RETURN_ERROR(10, BMK_benchOutcome_t, "dictionary file %s too large", dictFileName); + DISPLAYLEVEL(1, "dictionary file %s too large", dictFileName); + return 18; } dictBufferSize = (size_t)dictFileSize; - dictBuffer = malloc(dictBufferSize); - if (dictBuffer==NULL) { + dictBuffer = malloc(dictBufferSize); + if (dictBuffer == NULL) { free(fileSizes); - RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)", - (unsigned)dictBufferSize); + DISPLAYLEVEL( + 1, + "not enough memory for dictionary (%u bytes)", + (unsigned)dictBufferSize); + return 19; } - { int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize, - fileSizes, &dictFileName /*?*/, - 1 /*?*/, displayLevel); + { + int const errorCode = BMK_loadFiles( + dictBuffer, + dictBufferSize, + fileSizes, + &dictFileName /*?*/, + 1 /*?*/, + displayLevel); if (errorCode) { - res = BMK_benchOutcome_error(); goto _cleanUp; - } } + } + } } /* Memory allocation & restrictions */ benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; - if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; + if ((U64)benchedSize > totalSizeToLoad) + benchedSize = (size_t)totalSizeToLoad; if (benchedSize < totalSizeToLoad) - DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20)); + DISPLAY("Not enough memory; testing %u MB only...\n", + (unsigned)(benchedSize >> 20)); srcBuffer = benchedSize ? malloc(benchedSize) : NULL; if (!srcBuffer) { free(dictBuffer); free(fileSizes); - RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory"); + DISPLAYLEVEL(1, "not enough memory for srcBuffer"); + return 20; } /* Load input buffer */ - { int const errorCode = BMK_loadFiles(srcBuffer, benchedSize, - fileSizes, fileNamesTable, nbFiles, - displayLevel); + { + int const errorCode = BMK_loadFiles( + srcBuffer, + benchedSize, + fileSizes, + fileNamesTable, + nbFiles, + displayLevel); if (errorCode) { - res = BMK_benchOutcome_error(); goto _cleanUp; - } } + } + } /* Bench */ - { char mfName[20] = {0}; - snprintf (mfName, sizeof(mfName), " %u files", nbFiles); - { const char* const displayName = (nbFiles > 1) ? mfName : fileNamesTable[0]; - res = BMK_benchCLevel(srcBuffer, benchedSize, - fileSizes, nbFiles, - cLevel, compressionParams, - dictBuffer, dictBufferSize, - displayLevel, displayName, - adv); - } } + { + char mfName[20] = { 0 }; + formatString_u(mfName, sizeof(mfName), " %u files", nbFiles); + { const char* const displayName = + (nbFiles > 1) ? mfName : fileNamesTable[0]; + res = BMK_benchCLevels( + srcBuffer, + benchedSize, + fileSizes, + nbFiles, + startCLevel, endCLevel, + compressionParams, + dictBuffer, + dictBufferSize, + displayLevel, + displayName, + adv); + } + } _cleanUp: free(srcBuffer); @@ -887,13 +1250,21 @@ BMK_benchOutcome_t BMK_benchFilesAdvanced( return res; } - -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel) +int BMK_benchFiles( + const char* const* fileNamesTable, + unsigned nbFiles, + const char* dictFileName, + int cLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel) { BMK_advancedParams_t const adv = BMK_initAdvancedParams(); - return BMK_benchFilesAdvanced(fileNamesTable, nbFiles, dictFileName, cLevel, compressionParams, displayLevel, &adv); + return BMK_benchFilesAdvanced( + fileNamesTable, + nbFiles, + dictFileName, + cLevel, cLevel, + compressionParams, + displayLevel, + &adv); } diff --git a/programs/benchzstd.h b/programs/benchzstd.h index 11ac85da7f9..d62a33c0aec 100644 --- a/programs/benchzstd.h +++ b/programs/benchzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,10 +14,6 @@ * and display progress result and final summary */ -#if defined (__cplusplus) -extern "C" { -#endif - #ifndef BENCH_ZSTD_H_3242387 #define BENCH_ZSTD_H_3242387 @@ -26,7 +22,6 @@ extern "C" { #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ #include "../lib/zstd.h" /* ZSTD_compressionParameters */ - /* === Constants === */ #define MB_UNIT 1000000 @@ -81,21 +76,13 @@ BMK_benchResult_t BMK_extract_benchResult(BMK_benchOutcome_t outcome); * 2 : + result + interaction + warnings; * 3 : + information; * 4 : + debug - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel); +int BMK_benchFiles( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int cLevel, const ZSTD_compressionParameters* compressionParams, + int displayLevel); typedef enum { @@ -105,9 +92,10 @@ typedef enum { } BMK_mode_t; typedef struct { - BMK_mode_t mode; /* 0: all, 1: compress only 2: decode only */ + BMK_mode_t mode; /* 0: both, 1: compress only 2: decode only */ unsigned nbSeconds; /* default timing is in nbSeconds */ - size_t blockSize; /* Maximum size of each block*/ + size_t chunkSizeMax; /* Maximum size of each independent chunk */ + size_t targetCBlockSize;/* Approximative size of compressed blocks */ int nbWorkers; /* multithreading */ unsigned realTime; /* real time priority */ int additionalParam; /* used by python speed benchmark */ @@ -116,7 +104,7 @@ typedef struct { int ldmHashLog; int ldmBucketSizeLog; int ldmHashRateLog; - ZSTD_paramSwitch_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; int useRowMatchFinder; /* use row-based matchfinder if possible */ } BMK_advancedParams_t; @@ -126,33 +114,27 @@ BMK_advancedParams_t BMK_initAdvancedParams(void); /*! BMK_benchFilesAdvanced(): * Same as BMK_benchFiles(), * with more controls, provided through advancedParams_t structure */ -BMK_benchOutcome_t BMK_benchFilesAdvanced( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_benchFilesAdvanced( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int startCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); /*! BMK_syntheticTest() -- called from zstdcli */ -/* Generates a sample with datagen, using compressibility argument */ -/* cLevel - compression level to benchmark, errors if invalid - * compressibility - determines compressibility of sample - * compressionParams - basic compression Parameters - * displayLevel - see benchFiles - * adv - see advanced_Params_t - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context +/* Generates a sample with datagen, using @compressibility argument + * @cLevel - compression level to benchmark, errors if invalid + * @compressibility - determines compressibility of sample, range [0.0 - 1.0] + * if @compressibility < 0.0, uses the lorem ipsum generator + * @compressionParams - basic compression Parameters + * @displayLevel - see benchFiles + * @adv - see advanced_Params_t + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_syntheticTest( - int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_syntheticTest(double compressibility, + int startingCLevel, int endCLevel, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); @@ -190,8 +172,8 @@ BMK_benchOutcome_t BMK_benchMem(const void* srcBuffer, size_t srcSize, int displayLevel, const char* displayName); -/* BMK_benchMemAdvanced() : same as BMK_benchMem() - * with following additional options : +/* BMK_benchMemAdvanced() : used by Paramgrill + * same as BMK_benchMem() with following additional options : * dstBuffer - destination buffer to write compressed output in, NULL if none provided. * dstCapacity - capacity of destination buffer, give 0 if dstBuffer = NULL * adv = see advancedParams_t @@ -207,7 +189,3 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize, #endif /* BENCH_ZSTD_H_3242387 */ - -#if defined (__cplusplus) -} -#endif diff --git a/programs/datagen.c b/programs/datagen.c index 3b4f9e5c7b6..ddc690bb1b7 100644 --- a/programs/datagen.c +++ b/programs/datagen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/datagen.h b/programs/datagen.h index b76ae2a2225..461fb716c43 100644 --- a/programs/datagen.h +++ b/programs/datagen.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,6 +14,10 @@ #include /* size_t */ +#if defined (__cplusplus) +extern "C" { +#endif + void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed); void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed); /*!RDG_genBuffer @@ -27,4 +31,8 @@ void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba Same as RDG_genBuffer, but generates data into stdout */ +#if defined (__cplusplus) +} /* extern "C" */ +#endif + #endif diff --git a/programs/dibio.c b/programs/dibio.c index 8643bc378f3..dc629103ba2 100644 --- a/programs/dibio.c +++ b/programs/dibio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -274,21 +274,27 @@ static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t int n; memset(&fs, 0, sizeof(fs)); - // We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX + /* We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX */ assert( chunkSize <= SAMPLESIZE_MAX ); for (n=0; n 0) - { - // TODO: is there a minimum sample size? Can we have a 1-byte sample? + if (chunkSize > 0) { + /* TODO: is there a minimum sample size? Can we have a 1-byte sample? */ fs.nbSamples += (int)((fileSize + chunkSize-1) / chunkSize); fs.totalSizeToLoad += fileSize; } @@ -299,7 +305,7 @@ static fileStats DiB_fileStats(const char** fileNamesTable, int nbFiles, size_t fs.oneSampleTooLarge |= (fileSize > 2*SAMPLESIZE_MAX); /* Limit to the first SAMPLESIZE_MAX (128kB) of the file */ - DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB", + DISPLAYLEVEL(3, "Sample file '%s' is too large, limiting to %d KB\n", fileNamesTable[n], SAMPLESIZE_MAX / (1 KB)); } fs.nbSamples += 1; @@ -363,9 +369,9 @@ int DiB_trainFromFiles(const char* dictFileName, size_t maxDictSize, DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX); } if (fs.nbSamples < 5) { - DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n"); - DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n"); - DISPLAYLEVEL(2, "! Alternatively, split files into fixed-size blocks representative of samples, with -B# \n"); + DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing !\n"); + DISPLAYLEVEL(2, "! Please provide _one file per sample_.\n"); + DISPLAYLEVEL(2, "! Alternatively, split file(s) into fixed-size samples, with --split=#\n"); EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */ } if (fs.totalSizeToLoad < (S64)maxDictSize * 8) { diff --git a/programs/dibio.h b/programs/dibio.h index 666c1e66180..a96104c36d7 100644 --- a/programs/dibio.h +++ b/programs/dibio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/fileio.c b/programs/fileio.c index 96cf602a300..78a906d533b 100644 --- a/programs/fileio.c +++ b/programs/fileio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,6 +28,7 @@ #include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ #include /* malloc, free */ #include /* strcmp, strlen */ +#include /* clock_t, to measure process time */ #include /* O_WRONLY */ #include #include /* errno */ @@ -113,13 +114,282 @@ char const* FIO_lzmaVersion(void) #define FNSPACE 30 /* Default file permissions 0666 (modulated by umask) */ +/* Temporary restricted file permissions are used when we're going to + * chmod/chown at the end of the operation. */ #if !defined(_WIN32) /* These macros aren't defined on windows. */ #define DEFAULT_FILE_PERMISSIONS (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) +#define TEMPORARY_FILE_PERMISSIONS (S_IRUSR|S_IWUSR) #else #define DEFAULT_FILE_PERMISSIONS (0666) +#define TEMPORARY_FILE_PERMISSIONS (0600) #endif + +#ifndef ZSTD_NOCOMPRESS + +/* ************************************* +* Synchronous compression IO helpers +* Lightweight wrapper used by compression paths to manage buffered +* reads/writes without the async job machinery. +***************************************/ +typedef struct { + const FIO_prefs_t* prefs; + FILE* srcFile; + FILE* dstFile; + unsigned storedSkips; + U8* inBuffer; + size_t inCapacity; + U8* srcBuffer; + size_t srcBufferLoaded; + U8* outBuffer; + size_t outCapacity; +} FIO_SyncCompressIO; + +static void FIO_SyncCompressIO_init(FIO_SyncCompressIO* io, + const FIO_prefs_t* prefs, + size_t inCapacity, + size_t outCapacity); +static void FIO_SyncCompressIO_destroy(FIO_SyncCompressIO* io); +static void FIO_SyncCompressIO_setSrc(FIO_SyncCompressIO* io, FILE* file); +static void FIO_SyncCompressIO_clearSrc(FIO_SyncCompressIO* io); +static void FIO_SyncCompressIO_setDst(FIO_SyncCompressIO* io, FILE* file); +static int FIO_SyncCompressIO_closeDst(FIO_SyncCompressIO* io); +static size_t FIO_SyncCompressIO_fillBuffer(FIO_SyncCompressIO* io, size_t minToHave); +static void FIO_SyncCompressIO_consumeBytes(FIO_SyncCompressIO* io, size_t n); +static void FIO_SyncCompressIO_commitOut(FIO_SyncCompressIO* io, const void* buffer, size_t size); +static void FIO_SyncCompressIO_finish(FIO_SyncCompressIO* io); + + +static unsigned FIO_sparseWrite(FILE* file, + const void* buffer, size_t bufferSize, + const FIO_prefs_t* const prefs, + unsigned storedSkips) +{ + const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ + size_t bufferSizeT = bufferSize / sizeof(size_t); + const size_t* const bufferTEnd = bufferT + bufferSizeT; + const size_t* ptrT = bufferT; + static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ + + if (prefs->testMode) return 0; /* do not output anything in test mode */ + + if (!prefs->sparseFileSupport) { /* normal write */ + size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); + if (sizeCheck != bufferSize) + EXM_THROW(70, "Write error : cannot write block : %s", + strerror(errno)); + return 0; + } + + /* avoid int overflow */ + if (storedSkips > 1 GB) { + if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) + EXM_THROW(91, "1 GB skip error (sparse file support)"); + storedSkips -= 1 GB; + } + + while (ptrT < bufferTEnd) { + size_t nb0T; + + /* adjust last segment if < 32 KB */ + size_t seg0SizeT = segmentSizeT; + if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; + bufferSizeT -= seg0SizeT; + + /* count leading zeroes */ + for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; + storedSkips += (unsigned)(nb0T * sizeof(size_t)); + + if (nb0T != seg0SizeT) { /* not all 0s */ + size_t const nbNon0ST = seg0SizeT - nb0T; + /* skip leading zeros */ + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + storedSkips = 0; + /* write the rest */ + if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) + EXM_THROW(93, "Write error : cannot write block : %s", + strerror(errno)); + } + ptrT += seg0SizeT; + } + + { static size_t const maskT = sizeof(size_t)-1; + if (bufferSize & maskT) { + /* size not multiple of sizeof(size_t) : implies end of block */ + const char* const restStart = (const char*)bufferTEnd; + const char* restPtr = restStart; + const char* const restEnd = (const char*)buffer + bufferSize; + assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); + for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; + storedSkips += (unsigned) (restPtr - restStart); + if (restPtr != restEnd) { + /* not all remaining bytes are 0 */ + size_t const restSize = (size_t)(restEnd - restPtr); + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + if (fwrite(restPtr, 1, restSize, file) != restSize) + EXM_THROW(95, "Write error : cannot write end of decoded block : %s", + strerror(errno)); + storedSkips = 0; + } } } + + return storedSkips; +} + +static void FIO_sparseWriteEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) +{ + if (file == NULL) return; + if (prefs->testMode) { + assert(storedSkips == 0); + return; + } + if (storedSkips>0) { + assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ + if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) + EXM_THROW(69, "Final skip error (sparse file support)"); + /* last zero must be explicitly written, + * so that skipped ones get implicitly translated as zero by FS */ + { const char lastZeroByte[1] = { 0 }; + if (fwrite(lastZeroByte, 1, 1, file) != 1) + EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); + } + } +} + +static void FIO_SyncCompressIO_init(FIO_SyncCompressIO* io, + const FIO_prefs_t* prefs, + size_t inCapacity, + size_t outCapacity) +{ + memset(io, 0, sizeof(*io)); + io->prefs = prefs; + io->inCapacity = inCapacity; + io->outCapacity = outCapacity; + io->inBuffer = (U8*)malloc(inCapacity); + if (!io->inBuffer) + EXM_THROW(101, "Allocation error : not enough memory"); + io->outBuffer = (U8*)malloc(outCapacity); + if (!io->outBuffer) { + free(io->inBuffer); + io->inBuffer = NULL; + EXM_THROW(101, "Allocation error : not enough memory"); + } + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_destroy(FIO_SyncCompressIO* io) +{ + if (!io) return; + free(io->inBuffer); + free(io->outBuffer); + io->inBuffer = NULL; + io->outBuffer = NULL; + io->srcBuffer = NULL; + io->srcBufferLoaded = 0; + io->srcFile = NULL; + io->dstFile = NULL; + io->storedSkips = 0; +} + +static void FIO_SyncCompressIO_setSrc(FIO_SyncCompressIO* io, FILE* file) +{ + io->srcFile = file; + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_clearSrc(FIO_SyncCompressIO* io) +{ + io->srcFile = NULL; + io->srcBuffer = io->inBuffer; + io->srcBufferLoaded = 0; +} + +static void FIO_SyncCompressIO_setDst(FIO_SyncCompressIO* io, FILE* file) +{ + io->dstFile = file; + io->storedSkips = 0; +} + +static int FIO_SyncCompressIO_closeDst(FIO_SyncCompressIO* io) +{ + int result = 0; + if (io->dstFile != NULL) { + FIO_SyncCompressIO_finish(io); + result = fclose(io->dstFile); + io->dstFile = NULL; + } + return result; +} + +static size_t FIO_SyncCompressIO_fillBuffer(FIO_SyncCompressIO* io, size_t minToHave) +{ + size_t added = 0; + if (io->srcFile == NULL) + return 0; + + if (minToHave > io->inCapacity) + minToHave = io->inCapacity; + + if (io->srcBufferLoaded >= minToHave) + return 0; + + if (io->srcBuffer != io->inBuffer) { + if (io->srcBufferLoaded > 0) + memmove(io->inBuffer, io->srcBuffer, io->srcBufferLoaded); + io->srcBuffer = io->inBuffer; + } + + while (io->srcBufferLoaded < minToHave) { + size_t const toRead = io->inCapacity - io->srcBufferLoaded; + size_t const readBytes = fread(io->inBuffer + io->srcBufferLoaded, 1, toRead, io->srcFile); + if (readBytes == 0) { + if (ferror(io->srcFile)) + EXM_THROW(37, "Read error"); + break; /* EOF */ + } + io->srcBufferLoaded += readBytes; + added += readBytes; + if (readBytes < toRead) + break; + } + + return added; +} + +static void FIO_SyncCompressIO_consumeBytes(FIO_SyncCompressIO* io, size_t n) +{ + assert(n <= io->srcBufferLoaded); + io->srcBuffer += n; + io->srcBufferLoaded -= n; + if (io->srcBufferLoaded == 0) + io->srcBuffer = io->inBuffer; +} + +static void FIO_SyncCompressIO_commitOut(FIO_SyncCompressIO* io, const void* buffer, size_t size) +{ + if (size == 0) + return; + if (io->dstFile == NULL) { + assert(io->prefs->testMode); + return; + } + io->storedSkips = FIO_sparseWrite(io->dstFile, buffer, size, io->prefs, io->storedSkips); +} + +static void FIO_SyncCompressIO_finish(FIO_SyncCompressIO* io) +{ + if (io->dstFile == NULL) + return; + FIO_sparseWriteEnd(io->prefs, io->dstFile, io->storedSkips); + io->storedSkips = 0; +} + +#endif /* ZSTD_NOCOMPRESS */ + /*-************************************ * Signal (Ctrl-C trapping) **************************************/ @@ -249,6 +519,18 @@ struct FIO_ctx_s { size_t totalBytesOutput; }; +static int FIO_shouldDisplayFileSummary(FIO_ctx_t const* fCtx) +{ + return fCtx->nbFilesTotal <= 1 || g_display_prefs.displayLevel >= 3; +} + +static int FIO_shouldDisplayMultipleFileSummary(FIO_ctx_t const* fCtx) +{ + int const shouldDisplay = (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1); + assert(shouldDisplay || FIO_shouldDisplayFileSummary(fCtx) || fCtx->nbFilesProcessed == 0); + return shouldDisplay; +} + /*-************************************* * Parameters: Initialization @@ -271,7 +553,7 @@ FIO_prefs_t* FIO_createPreferences(void) ret->removeSrcFile = 0; ret->memLimit = 0; ret->nbWorkers = 1; - ret->blockSize = 0; + ret->jobSize = 0; ret->overlapLog = FIO_OVERLAP_LOG_NOTSET; ret->adaptiveMode = 0; ret->rsyncable = 0; @@ -345,7 +627,7 @@ void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag) { prefs->dictID void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag) { prefs->checksumFlag = checksumFlag; } -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag) { prefs->removeSrcFile = (flag>0); } +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag) { prefs->removeSrcFile = (flag!=0); } void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit) { prefs->memLimit = memLimit; } @@ -360,10 +642,10 @@ void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompresse void FIO_setAllowBlockDevices(FIO_prefs_t* const prefs, int allowBlockDevices) { prefs->allowBlockDevices = allowBlockDevices; } -void FIO_setBlockSize(FIO_prefs_t* const prefs, int blockSize) { - if (blockSize && prefs->nbWorkers==0) +void FIO_setJobSize(FIO_prefs_t* const prefs, int jobSize) { + if (jobSize && prefs->nbWorkers==0) DISPLAYLEVEL(2, "Setting block size is useless in single-thread mode \n"); - prefs->blockSize = blockSize; + prefs->jobSize = jobSize; } void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog){ @@ -406,7 +688,7 @@ void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode) { void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_paramSwitch_e mode) { + ZSTD_ParamSwitch_e mode) { prefs->literalCompressionMode = mode; } @@ -468,6 +750,11 @@ void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value) { prefs->passThrough = (value != 0); } +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value) +{ + prefs->mmapDict = value; +} + /* FIO_ctx_t functions */ void FIO_setHasStdoutOutput(FIO_ctx_t* const fCtx, int value) { @@ -505,7 +792,7 @@ static int FIO_removeFile(const char* path) DISPLAYLEVEL(2, "zstd: Refusing to remove non-regular file %s\n", path); return 0; } -#if defined(_WIN32) || defined(WIN32) +#if defined(_WIN32) /* windows doesn't allow remove read-only files, * so try to make it writable first */ if (!(statbuf.st_mode & _S_IWRITE)) { @@ -516,28 +803,32 @@ static int FIO_removeFile(const char* path) } /** FIO_openSrcFile() : - * condition : `srcFileName` must be non-NULL. `prefs` may be NULL. + * condition : `srcFileName` must be non-NULL. + * optional: `prefs` may be NULL. * @result : FILE* to `srcFileName`, or NULL if it fails */ -static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName) +static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName, stat_t* statbuf) { - stat_t statbuf; int allowBlockDevices = prefs != NULL ? prefs->allowBlockDevices : 0; assert(srcFileName != NULL); - if (!strcmp (srcFileName, stdinmark)) { + assert(statbuf != NULL); + + if (!strcmp(srcFileName, stdinmark)) { DISPLAYLEVEL(4,"Using stdin for input \n"); SET_BINARY_MODE(stdin); return stdin; } - if (!UTIL_stat(srcFileName, &statbuf)) { + if (!UTIL_stat(srcFileName, statbuf)) { DISPLAYLEVEL(1, "zstd: can't stat %s : %s -- ignored \n", srcFileName, strerror(errno)); return NULL; } - if (!UTIL_isRegularFileStat(&statbuf) - && !UTIL_isFIFOStat(&statbuf) - && !(allowBlockDevices && UTIL_isBlockDevStat(&statbuf)) + /* Accept regular files, FIFOs, and process substitution file descriptors */ + if (!UTIL_isRegularFileStat(statbuf) + && !UTIL_isFIFOStat(statbuf) + && !UTIL_isFileDescriptorPipe(srcFileName) /* Process substitution support */ + && !(allowBlockDevices && UTIL_isBlockDevStat(statbuf)) ) { DISPLAYLEVEL(1, "zstd: %s is not a regular file -- ignored \n", srcFileName); @@ -578,10 +869,6 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, return NULL; } - if (prefs->sparseFileSupport == 1) { - prefs->sparseFileSupport = ZSTD_SPARSE_DEFAULT; - } - if (UTIL_isRegularFile(dstFileName)) { /* Check if destination file already exists */ #if !defined(_WIN32) @@ -595,7 +882,7 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, if (!prefs->overwrite) { if (g_display_prefs.displayLevel <= 1) { /* No interaction possible */ - DISPLAY("zstd: %s already exists; not overwritten \n", + DISPLAYLEVEL(1, "zstd: %s already exists; not overwritten \n", dstFileName); return NULL; } @@ -608,6 +895,7 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, } { + int isDstRegFile; #if defined(_WIN32) /* Windows requires opening the file as a "binary" file to avoid * mangling. This macro doesn't exist on unix. */ @@ -625,57 +913,87 @@ FIO_openDstFile(FIO_ctx_t* fCtx, FIO_prefs_t* const prefs, f = fdopen(fd, "wb"); } #endif + + /* Check regular file after opening with O_CREAT */ + isDstRegFile = UTIL_isFdRegularFile(fd); + if (prefs->sparseFileSupport == 1) { + prefs->sparseFileSupport = ZSTD_SPARSE_DEFAULT; + if (!isDstRegFile) { + prefs->sparseFileSupport = 0; + DISPLAYLEVEL(4, "Sparse File Support is disabled when output is not a file \n"); + } + } + if (f == NULL) { - DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); - } - /* An increased buffer size can provide a significant performance boost on some platforms. - * Note that providing a NULL buf with a size that's not 0 is not defined in ANSI C, but is defined - * in an extension. There are three possibilities here - - * 1. Libc supports the extended version and everything is good. - * 2. Libc ignores the size when buf is NULL, in which case everything will continue as if we didn't - * call `setvbuf`. - * 3. We fail the call and execution continues but a warning message might be shown. - * In all cases due execution continues. For now, I believe that this is a more cost-effective - * solution than managing the buffers allocations ourselves (will require an API change). */ - if(setvbuf(f, NULL, _IOFBF, 1 MB)) - DISPLAYLEVEL(2, "Warning: setvbuf failed for %s\n", dstFileName); + if (UTIL_isFileDescriptorPipe(dstFileName)) { + DISPLAYLEVEL(1, "zstd: error: no output specified (use -o or -c). \n"); + } else { + DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); + } + } else { + /* An increased buffer size can provide a significant performance + * boost on some platforms. Note that providing a NULL buf with a + * size that's not 0 is not defined in ANSI C, but is defined in an + * extension. There are three possibilities here: + * 1. Libc supports the extended version and everything is good. + * 2. Libc ignores the size when buf is NULL, in which case + * everything will continue as if we didn't call `setvbuf()`. + * 3. We fail the call and execution continues but a warning + * message might be shown. + * In all cases due execution continues. For now, I believe that + * this is a more cost-effective solution than managing the buffers + * allocations ourselves (will require an API change). + */ + if (setvbuf(f, NULL, _IOFBF, 1 MB)) { + DISPLAYLEVEL(2, "Warning: setvbuf failed for %s\n", dstFileName); + } + } return f; } } -/*! FIO_createDictBuffer() : - * creates a buffer, pointed by `*bufferPtr`, + +/* FIO_getDictFileStat() : + */ +static void FIO_getDictFileStat(const char* fileName, stat_t* dictFileStat) { + assert(dictFileStat != NULL); + if (fileName == NULL) return; + + if (!UTIL_stat(fileName, dictFileStat)) { + EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); + } + + if (!UTIL_isRegularFileStat(dictFileStat)) { + EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); + } +} + +/* FIO_setDictBufferMalloc() : + * allocates a buffer, pointed by `dict->dictBuffer`, * loads `filename` content into it, up to DICTSIZE_MAX bytes. * @return : loaded size * if fileName==NULL, returns 0 and a NULL pointer */ -static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_prefs_t* const prefs) +static size_t FIO_setDictBufferMalloc(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) { FILE* fileHandle; U64 fileSize; - stat_t statbuf; + void** bufferPtr = &dict->dictBuffer; assert(bufferPtr != NULL); + assert(dictFileStat != NULL); *bufferPtr = NULL; if (fileName == NULL) return 0; DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); - if (!UTIL_stat(fileName, &statbuf)) { - EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); - } - - if (!UTIL_isRegularFileStat(&statbuf)) { - EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); - } - fileHandle = fopen(fileName, "rb"); if (fileHandle == NULL) { EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); } - fileSize = UTIL_getFileSizeStat(&statbuf); + fileSize = UTIL_getFileSizeStat(dictFileStat); { size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; if (fileSize > dictSizeMax) { @@ -695,6 +1013,132 @@ static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_p return (size_t)fileSize; } +#if (PLATFORM_POSIX_VERSION > 0) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + munmap(dict->dictBuffer, dict->dictBufferSize); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + int fileHandle; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = open(fileName, O_RDONLY); + + if (fileHandle == -1) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + *bufferPtr = mmap(NULL, (size_t)fileSize, PROT_READ, MAP_PRIVATE, fileHandle, 0); + if (*bufferPtr == MAP_FAILED) { + EXM_THROW(34, "%s", strerror(errno)) + } + + close(fileHandle); + return (size_t)fileSize; +} +#elif defined(_MSC_VER) || defined(_WIN32) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + UnmapViewOfFile(dict->dictBuffer); + CloseHandle(dict->dictHandle); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + HANDLE fileHandle, mapping; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = CreateFileA(fileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_READONLY, NULL); + + if (fileHandle == INVALID_HANDLE_VALUE) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + mapping = CreateFileMapping(fileHandle, NULL, PAGE_READONLY, 0, 0, NULL); + if (mapping == NULL) { + EXM_THROW(35, "Couldn't map dictionary %s: %s", fileName, strerror(errno)); + } + + *bufferPtr = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, (DWORD)fileSize); /* we can only cast to DWORD here because dictSize <= 2GB */ + if (*bufferPtr==NULL) EXM_THROW(36, "%s", strerror(errno)); + + dict->dictHandle = fileHandle; + return (size_t)fileSize; +} +#else +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + return FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); +} +static void FIO_munmap(FIO_Dict_t* dict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +#endif + +static void FIO_freeDict(FIO_Dict_t* dict) { + if (dict->dictBufferType == FIO_mallocDict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; + } else if (dict->dictBufferType == FIO_mmapDict) { + FIO_munmap(dict); + } else { + assert(0); /* Should not reach this case */ + } +} + +static void FIO_initDict(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat, FIO_dictBufferType_t dictBufferType) { + dict->dictBufferType = dictBufferType; + if (dict->dictBufferType == FIO_mallocDict) { + dict->dictBufferSize = FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); + } else if (dict->dictBufferType == FIO_mmapDict) { + dict->dictBufferSize = FIO_setDictBufferMMap(dict, fileName, prefs, dictFileStat); + } else { + assert(0); /* Should not reach this case */ + } +} /* FIO_checkFilenameCollisions() : @@ -706,7 +1150,7 @@ int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles) { filenameTableSorted = (const char**) malloc(sizeof(char*) * nbFiles); if (!filenameTableSorted) { - DISPLAY("Unable to malloc new str array, not checking for name collisions\n"); + DISPLAYLEVEL(1, "Allocation error during filename collision checking \n"); return 1; } @@ -723,7 +1167,7 @@ int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles) { prevElem = filenameTableSorted[0]; for (u = 1; u < nbFiles; ++u) { if (strcmp(prevElem, filenameTableSorted[u]) == 0) { - DISPLAY("WARNING: Two files have same filename: %s\n", prevElem); + DISPLAYLEVEL(2, "WARNING: Two files have same filename: %s\n", prevElem); } prevElem = filenameTableSorted[u]; } @@ -806,45 +1250,89 @@ static void FIO_adjustMemLimitForPatchFromMode(FIO_prefs_t* const prefs, FIO_setMemLimit(prefs, (unsigned)maxSize); } -/* FIO_removeMultiFilesWarning() : +/* FIO_multiFilesConcatWarning() : + * This function handles logic when processing multiple files with -o or -c, displaying the appropriate warnings/prompts. * Returns 1 if the console should abort, 0 if console should proceed. - * This function handles logic when processing multiple files with -o, displaying the appropriate warnings/prompts. * - * If -f is specified, or there is just 1 file, zstd will always proceed as usual. - * If --rm is specified, there will be a prompt asking for user confirmation. - * If -f is specified with --rm, zstd will proceed as usual - * If -q is specified with --rm, zstd will abort pre-emptively - * If neither flag is specified, zstd will prompt the user for confirmation to proceed. - * If --rm is not specified, then zstd will print a warning to the user (which can be silenced with -q). - * However, if the output is stdout, we will always abort rather than displaying the warning prompt. + * If output is stdout or test mode is active, check that `--rm` disabled. + * + * If there is just 1 file to process, zstd will proceed as usual. + * If each file get processed into its own separate destination file, proceed as usual. + * + * When multiple files are processed into a single output, + * display a warning message, then disable --rm if it's set. + * + * If -f is specified or if output is stdout, just proceed. + * If output is set with -o, prompt for confirmation. */ -static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* const prefs, const char* outFileName, int displayLevelCutoff) +static int FIO_multiFilesConcatWarning(const FIO_ctx_t* fCtx, FIO_prefs_t* prefs, const char* outFileName, int displayLevelCutoff) { - int error = 0; - if (fCtx->nbFilesTotal > 1 && !prefs->overwrite) { - if (g_display_prefs.displayLevel <= displayLevelCutoff) { - if (prefs->removeSrcFile) { - DISPLAYLEVEL(1, "zstd: Aborting... not deleting files and processing into dst: %s\n", outFileName); - error = 1; - } - } else { - if (!strcmp(outFileName, stdoutmark)) { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); - } else { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); - } - DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate the original directory tree. \n") - if (prefs->removeSrcFile) { - if (fCtx->hasStdoutOutput) { - DISPLAYLEVEL(1, "Aborting. Use -f if you really want to delete the files and output to stdout\n"); - error = 1; - } else { - error = g_display_prefs.displayLevel > displayLevelCutoff && UTIL_requireUserConfirmation("This is a destructive operation. Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); - } - } - } + if (fCtx->hasStdoutOutput) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "It's not allowed to remove input files when processed output is piped to stdout. " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + } + if (prefs->testMode) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "Test mode shall not remove input files! " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + return 0; } - return error; + + if (fCtx->nbFilesTotal == 1) return 0; + assert(fCtx->nbFilesTotal > 1); + + if (!outFileName) return 0; + + if (fCtx->hasStdoutOutput) { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); + } else { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); + } + DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate original file names nor directory structure. \n") + + /* multi-input into single output : --rm is not allowed */ + if (prefs->removeSrcFile) { + DISPLAYLEVEL(2, "Since it's a destructive operation, input files will not be removed. \n"); + prefs->removeSrcFile = 0; + } + + if (fCtx->hasStdoutOutput) return 0; + if (prefs->overwrite) return 0; + + /* multiple files concatenated into single destination file using -o without -f */ + if (g_display_prefs.displayLevel <= displayLevelCutoff) { + /* quiet mode => no prompt => fail automatically */ + DISPLAYLEVEL(1, "Concatenating multiple processed inputs into a single output loses file metadata. \n"); + DISPLAYLEVEL(1, "Aborting. \n"); + return 1; + } + /* normal mode => prompt */ + return UTIL_requireUserConfirmation("Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); +} + +static ZSTD_inBuffer setInBuffer(const void* buf, size_t s, size_t pos) +{ + ZSTD_inBuffer i; + i.src = buf; + i.size = s; + i.pos = pos; + return i; +} + +static ZSTD_outBuffer setOutBuffer(void* buf, size_t s, size_t pos) +{ + ZSTD_outBuffer o; + o.dst = buf; + o.size = s; + o.pos = pos; + return o; } #ifndef ZSTD_NOCOMPRESS @@ -853,12 +1341,11 @@ static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* * Compression ************************************************************************/ typedef struct { - void* dictBuffer; - size_t dictBufferSize; + FIO_Dict_t dict; const char* dictFileName; + stat_t dictFileStat; ZSTD_CStream* cctx; - WritePoolCtx_t *writeCtx; - ReadPoolCtx_t *readCtx; + FIO_SyncCompressIO io; } cRess_t; /** ZSTD_cycleLog() : @@ -884,21 +1371,25 @@ static void FIO_adjustParamsForPatchFromMode(FIO_prefs_t* const prefs, comprParams->windowLog = MAX(ZSTD_WINDOWLOG_MIN, MIN(ZSTD_WINDOWLOG_MAX, fileWindowLog)); if (fileWindowLog > ZSTD_cycleLog(cParams.chainLog, cParams.strategy)) { if (!prefs->ldmFlag) - DISPLAYLEVEL(1, "long mode automatically triggered\n"); + DISPLAYLEVEL(2, "long mode automatically triggered\n"); FIO_setLdmFlag(prefs, 1); } if (cParams.strategy >= ZSTD_btopt) { - DISPLAYLEVEL(1, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n"); - DISPLAYLEVEL(1, "- Use --single-thread mode in the zstd cli\n"); - DISPLAYLEVEL(1, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n"); - DISPLAYLEVEL(1, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); - DISPLAYLEVEL(1, "Also consider playing around with searchLog and hashLog\n"); + DISPLAYLEVEL(4, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n"); + DISPLAYLEVEL(4, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n"); + DISPLAYLEVEL(4, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); + DISPLAYLEVEL(4, "- Set a larger LDM hashLog (e.g. --zstd=ldmHashLog=%u)\n", ZSTD_LDM_HASHLOG_MAX); + DISPLAYLEVEL(4, "- Set a smaller LDM rateLog (e.g. --zstd=ldmHashRateLog=%u)\n", ZSTD_LDM_HASHRATELOG_MIN); + DISPLAYLEVEL(4, "Also consider playing around with searchLog and hashLog\n"); } } static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, const char* dictFileName, unsigned long long const maxSrcFileSize, int cLevel, ZSTD_compressionParameters comprParams) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + FIO_dictBufferType_t dictBufferType; cRess_t ress; memset(&ress, 0, sizeof(ress)); @@ -908,19 +1399,24 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, EXM_THROW(30, "allocation error (%s): can't create ZSTD_CCtx", strerror(errno)); + FIO_getDictFileStat(dictFileName, &ress.dictFileStat); + /* need to update memLimit before calling createDictBuffer * because of memLimit check inside it */ if (prefs->patchFromMode) { + U64 const dictSize = UTIL_getFileSizeStat(&ress.dictFileStat); unsigned long long const ssSize = (unsigned long long)prefs->streamSrcSize; - FIO_adjustParamsForPatchFromMode(prefs, &comprParams, UTIL_getFileSize(dictFileName), ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); + useMMap |= dictSize > prefs->memLimit; + FIO_adjustParamsForPatchFromMode(prefs, &comprParams, dictSize, ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); } - ress.dictBufferSize = FIO_createDictBuffer(&ress.dictBuffer, dictFileName, prefs); /* works with dictFileName==NULL */ - ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_CStreamOutSize()); - ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_CStreamInSize()); + dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &ress.dictFileStat, dictBufferType); /* works with dictFileName==NULL */ + + FIO_SyncCompressIO_init(&ress.io, prefs, ZSTD_CStreamInSize(), ZSTD_CStreamOutSize()); /* Advanced parameters, including dictionary */ - if (dictFileName && (ress.dictBuffer==NULL)) + if (dictFileName && (ress.dict.dictBuffer==NULL)) EXM_THROW(32, "allocation error : can't create dictBuffer"); ress.dictFileName = dictFileName; @@ -961,7 +1457,7 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, #ifdef ZSTD_MULTITHREAD DISPLAYLEVEL(5,"set nb workers = %u \n", prefs->nbWorkers); CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_nbWorkers, prefs->nbWorkers) ); - CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_jobSize, prefs->blockSize) ); + CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_jobSize, prefs->jobSize) ); if (prefs->overlapLog != FIO_OVERLAP_LOG_NOTSET) { DISPLAYLEVEL(3,"set overlapLog = %u \n", prefs->overlapLog); CHECK( ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_overlapLog, prefs->overlapLog) ); @@ -970,32 +1466,31 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, #endif /* dictionary */ if (prefs->patchFromMode) { - CHECK( ZSTD_CCtx_refPrefix(ress.cctx, ress.dictBuffer, ress.dictBufferSize) ); + CHECK( ZSTD_CCtx_refPrefix(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); } else { - CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, ress.dictBuffer, ress.dictBufferSize) ); + CHECK( ZSTD_CCtx_loadDictionary_byReference(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); } return ress; } -static void FIO_freeCResources(const cRess_t* const ress) +static void FIO_freeCResources(cRess_t* const ress) { - free(ress->dictBuffer); - AIO_WritePool_free(ress->writeCtx); - AIO_ReadPool_free(ress->readCtx); + FIO_freeDict(&(ress->dict)); + FIO_SyncCompressIO_destroy(&ress->io); ZSTD_freeCStream(ress->cctx); /* never fails */ } #ifdef ZSTD_GZCOMPRESS static unsigned long long -FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but not changed */ +FIO_compressGzFrame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize) { + FIO_SyncCompressIO* const syncIO = &ress->io; unsigned long long inFileSize = 0, outFileSize = 0; z_stream strm; - IOJob_t *writeJob = NULL; if (compressionLevel > Z_BEST_COMPRESSION) compressionLevel = Z_BEST_COMPRESSION; @@ -1006,62 +1501,62 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no { int const ret = deflateInit2(&strm, compressionLevel, Z_DEFLATED, 15 /* maxWindowLogSize */ + 16 /* gzip only */, - 8, Z_DEFAULT_STRATEGY); /* see http://www.zlib.net/manual.html */ + 8, Z_DEFAULT_STRATEGY); /* see https://www.zlib.net/manual.html */ if (ret != Z_OK) { EXM_THROW(71, "zstd: %s: deflateInit2 error %d \n", srcFileName, ret); } } - writeJob = AIO_WritePool_acquireJob(ress->writeCtx); strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; while (1) { int ret; if (strm.avail_in == 0) { - AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); - if (ress->readCtx->srcBufferLoaded == 0) break; - inFileSize += ress->readCtx->srcBufferLoaded; - strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; - strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + if (syncIO->srcBufferLoaded == 0) break; + inFileSize += added; + *readsize += added; + strm.next_in = (z_const unsigned char*)syncIO->srcBuffer; + strm.avail_in = (uInt)syncIO->srcBufferLoaded; } { size_t const availBefore = strm.avail_in; ret = deflate(&strm, Z_NO_FLUSH); - AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); + FIO_SyncCompressIO_consumeBytes(syncIO, availBefore - strm.avail_in); } if (ret != Z_OK) EXM_THROW(72, "zstd: %s: deflate error %d \n", srcFileName, ret); - { size_t const cSize = writeJob->bufferSize - strm.avail_out; + { size_t const cSize = (size_t)((uInt)syncIO->outCapacity - strm.avail_out); if (cSize) { - writeJob->usedBufferSize = cSize; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, cSize); outFileSize += cSize; - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + DISPLAYUPDATE_PROGRESS( + "\rRead : %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); - } } + DISPLAYUPDATE_PROGRESS( + "\rRead : %u / %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), + (double)outFileSize/(double)inFileSize*100); + } } while (1) { int const ret = deflate(&strm, Z_FINISH); - { size_t const cSize = writeJob->bufferSize - strm.avail_out; + { size_t const cSize = (size_t)((uInt)syncIO->outCapacity - strm.avail_out); if (cSize) { - writeJob->usedBufferSize = cSize; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, cSize); outFileSize += cSize; - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_out = (Bytef*)syncIO->outBuffer; + strm.avail_out = (uInt)syncIO->outCapacity; } } if (ret == Z_STREAM_END) break; if (ret != Z_BUF_ERROR) @@ -1073,8 +1568,7 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no EXM_THROW(79, "zstd: %s: deflateEnd error %d \n", srcFileName, ret); } } *readsize = inFileSize; - AIO_WritePool_releaseIoJob(writeJob); - AIO_WritePool_sparseWriteEnd(ress->writeCtx); + FIO_SyncCompressIO_finish(syncIO); return outFileSize; } #endif @@ -1086,11 +1580,11 @@ FIO_compressLzmaFrame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize, int plain_lzma) { + FIO_SyncCompressIO* const syncIO = &ress->io; unsigned long long inFileSize = 0, outFileSize = 0; lzma_stream strm = LZMA_STREAM_INIT; lzma_action action = LZMA_RUN; lzma_ret ret; - IOJob_t *writeJob = NULL; if (compressionLevel < 0) compressionLevel = 0; if (compressionLevel > 9) compressionLevel = 9; @@ -1108,54 +1602,51 @@ FIO_compressLzmaFrame(cRess_t* ress, EXM_THROW(83, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret); } - writeJob =AIO_WritePool_acquireJob(ress->writeCtx); - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_out = (BYTE*)syncIO->outBuffer; + strm.avail_out = syncIO->outCapacity; strm.next_in = 0; strm.avail_in = 0; while (1) { if (strm.avail_in == 0) { - size_t const inSize = AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); - if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; - inFileSize += inSize; - strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; - strm.avail_in = ress->readCtx->srcBufferLoaded; + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + if (syncIO->srcBufferLoaded == 0) action = LZMA_FINISH; + inFileSize += added; + *readsize += added; + strm.next_in = (BYTE const*)syncIO->srcBuffer; + strm.avail_in = syncIO->srcBufferLoaded; } { size_t const availBefore = strm.avail_in; ret = lzma_code(&strm, action); - AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); + FIO_SyncCompressIO_consumeBytes(syncIO, availBefore - strm.avail_in); } - if (ret != LZMA_OK && ret != LZMA_STREAM_END) EXM_THROW(84, "zstd: %s: lzma_code encoding error %d", srcFileName, ret); - { size_t const compBytes = writeJob->bufferSize - strm.avail_out; + { size_t const compBytes = syncIO->outCapacity - strm.avail_out; if (compBytes) { - writeJob->usedBufferSize = compBytes; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, compBytes); outFileSize += compBytes; - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = writeJob->bufferSize; + strm.next_out = (BYTE*)syncIO->outBuffer; + strm.avail_out = syncIO->outCapacity; } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) else - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); if (ret == LZMA_STREAM_END) break; } lzma_end(&strm); *readsize = inFileSize; - AIO_WritePool_releaseIoJob(writeJob); - AIO_WritePool_sparseWriteEnd(ress->writeCtx); + FIO_SyncCompressIO_finish(syncIO); return outFileSize; } @@ -1176,21 +1667,20 @@ FIO_compressLz4Frame(cRess_t* ress, int compressionLevel, int checksumFlag, U64* readsize) { + FIO_SyncCompressIO* const syncIO = &ress->io; const size_t blockSize = FIO_LZ4_GetBlockSize_FromBlockId(LZ4F_max64KB); unsigned long long inFileSize = 0, outFileSize = 0; LZ4F_preferences_t prefs; LZ4F_compressionContext_t ctx; - IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx); - LZ4F_errorCode_t const errorCode = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION); if (LZ4F_isError(errorCode)) EXM_THROW(31, "zstd: failed to create lz4 compression context"); memset(&prefs, 0, sizeof(prefs)); - assert(blockSize <= ress->readCtx->base.jobBufferSize); + assert(blockSize <= syncIO->inCapacity); /* autoflush off to mitigate a bug in lz4<=1.9.3 for compression level 12 */ prefs.autoFlush = 0; @@ -1201,78 +1691,74 @@ FIO_compressLz4Frame(cRess_t* ress, #if LZ4_VERSION_NUMBER >= 10600 prefs.frameInfo.contentSize = (srcFileSize==UTIL_FILESIZE_UNKNOWN) ? 0 : srcFileSize; #endif - assert(LZ4F_compressBound(blockSize, &prefs) <= writeJob->bufferSize); + assert(LZ4F_compressBound(blockSize, &prefs) <= syncIO->outCapacity); { - size_t headerSize = LZ4F_compressBegin(ctx, writeJob->buffer, writeJob->bufferSize, &prefs); + size_t headerSize = LZ4F_compressBegin(ctx, syncIO->outBuffer, syncIO->outCapacity, &prefs); if (LZ4F_isError(headerSize)) EXM_THROW(33, "File header generation failed : %s", LZ4F_getErrorName(headerSize)); - writeJob->usedBufferSize = headerSize; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, headerSize); outFileSize += headerSize; - /* Read first block */ - inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + { + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, blockSize); + inFileSize += added; + *readsize += added; + } - /* Main Loop */ - while (ress->readCtx->srcBufferLoaded) { - size_t inSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); - size_t const outSize = LZ4F_compressUpdate(ctx, writeJob->buffer, writeJob->bufferSize, - ress->readCtx->srcBuffer, inSize, NULL); + while (syncIO->srcBufferLoaded) { + size_t const inSize = MIN(blockSize, syncIO->srcBufferLoaded); + size_t const outSize = LZ4F_compressUpdate(ctx, syncIO->outBuffer, syncIO->outCapacity, + syncIO->srcBuffer, inSize, NULL); if (LZ4F_isError(outSize)) EXM_THROW(35, "zstd: %s: lz4 compression failed : %s", srcFileName, LZ4F_getErrorName(outSize)); outFileSize += outSize; if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); } - /* Write Block */ - writeJob->usedBufferSize = outSize; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, outSize); - /* Read next block */ - AIO_ReadPool_consumeBytes(ress->readCtx, inSize); - inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + FIO_SyncCompressIO_consumeBytes(syncIO, inSize); + { + size_t const added = FIO_SyncCompressIO_fillBuffer(syncIO, blockSize); + inFileSize += added; + *readsize += added; + } } - /* End of Stream mark */ - headerSize = LZ4F_compressEnd(ctx, writeJob->buffer, writeJob->bufferSize, NULL); + headerSize = LZ4F_compressEnd(ctx, syncIO->outBuffer, syncIO->outCapacity, NULL); if (LZ4F_isError(headerSize)) EXM_THROW(38, "zstd: %s: lz4 end of file generation failed : %s", srcFileName, LZ4F_getErrorName(headerSize)); - writeJob->usedBufferSize = headerSize; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, headerSize); outFileSize += headerSize; } - *readsize = inFileSize; LZ4F_freeCompressionContext(ctx); - AIO_WritePool_releaseIoJob(writeJob); - AIO_WritePool_sparseWriteEnd(ress->writeCtx); + FIO_SyncCompressIO_finish(syncIO); return outFileSize; } #endif - static unsigned long long FIO_compressZstdFrame(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - const cRess_t* ressPtr, + cRess_t* ress, const char* srcFileName, U64 fileSize, int compressionLevel, U64* readsize) { - cRess_t const ress = *ressPtr; - IOJob_t *writeJob = AIO_WritePool_acquireJob(ressPtr->writeCtx); + FIO_SyncCompressIO* const syncIO = &ress->io; U64 compressedfilesize = 0; ZSTD_EndDirective directive = ZSTD_e_continue; @@ -1287,6 +1773,9 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, unsigned inputPresented = 0; unsigned inputBlocked = 0; unsigned lastJobID = 0; + UTIL_time_t lastAdaptTime = UTIL_getTime(); + U64 const adaptEveryMicro = REFRESH_RATE; + UTIL_HumanReadableSize_t const file_hrs = UTIL_makeHumanReadableSize(fileSize); DISPLAYLEVEL(6, "compression using zstd format \n"); @@ -1294,17 +1783,16 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, /* init */ if (fileSize != UTIL_FILESIZE_UNKNOWN) { pledgedSrcSize = fileSize; - CHECK(ZSTD_CCtx_setPledgedSrcSize(ress.cctx, fileSize)); + CHECK(ZSTD_CCtx_setPledgedSrcSize(ress->cctx, fileSize)); } else if (prefs->streamSrcSize > 0) { /* unknown source size; use the declared stream size */ pledgedSrcSize = prefs->streamSrcSize; - CHECK( ZSTD_CCtx_setPledgedSrcSize(ress.cctx, prefs->streamSrcSize) ); + CHECK( ZSTD_CCtx_setPledgedSrcSize(ress->cctx, prefs->streamSrcSize) ); } - { - int windowLog; + { int windowLog; UTIL_HumanReadableSize_t windowSize; - CHECK(ZSTD_CCtx_getParameter(ress.cctx, ZSTD_c_windowLog, &windowLog)); + CHECK(ZSTD_CCtx_getParameter(ress->cctx, ZSTD_c_windowLog, &windowLog)); if (windowLog == 0) { if (prefs->ldmFlag) { /* If long mode is set without a window size libzstd will set this size internally */ @@ -1317,18 +1805,17 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize))); DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix); } - (void)srcFileName; /* Main compression loop */ do { size_t stillToFlush; /* Fill input Buffer */ - size_t const inSize = AIO_ReadPool_fillBuffer(ress.readCtx, ZSTD_CStreamInSize()); - ZSTD_inBuffer inBuff = { ress.readCtx->srcBuffer, ress.readCtx->srcBufferLoaded, 0 }; + size_t const inSize = FIO_SyncCompressIO_fillBuffer(syncIO, ZSTD_CStreamInSize()); + ZSTD_inBuffer inBuff = setInBuffer( syncIO->srcBuffer, syncIO->srcBufferLoaded, 0 ); DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize); *readsize += inSize; - if ((ress.readCtx->srcBufferLoaded == 0) || (*readsize == fileSize)) + if ((syncIO->srcBufferLoaded == 0) || (*readsize == fileSize)) directive = ZSTD_e_end; stillToFlush = 1; @@ -1336,10 +1823,10 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, || (directive == ZSTD_e_end && stillToFlush != 0) ) { size_t const oldIPos = inBuff.pos; - ZSTD_outBuffer outBuff= { writeJob->buffer, writeJob->bufferSize, 0 }; - size_t const toFlushNow = ZSTD_toFlushNow(ress.cctx); - CHECK_V(stillToFlush, ZSTD_compressStream2(ress.cctx, &outBuff, &inBuff, directive)); - AIO_ReadPool_consumeBytes(ress.readCtx, inBuff.pos - oldIPos); + ZSTD_outBuffer outBuff = setOutBuffer( syncIO->outBuffer, syncIO->outCapacity, 0 ); + size_t const toFlushNow = ZSTD_toFlushNow(ress->cctx); + CHECK_V(stillToFlush, ZSTD_compressStream2(ress->cctx, &outBuff, &inBuff, directive)); + FIO_SyncCompressIO_consumeBytes(syncIO, inBuff.pos - oldIPos); /* count stats */ inputPresented++; @@ -1350,136 +1837,141 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n", (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); if (outBuff.pos) { - writeJob->usedBufferSize = outBuff.pos; - AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + FIO_SyncCompressIO_commitOut(syncIO, syncIO->outBuffer, outBuff.pos); compressedfilesize += outBuff.pos; } - /* display notification; and adapt compression level */ - if (READY_FOR_UPDATE()) { - ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); + /* adaptive mode : statistics measurement and speed correction */ + if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress->cctx); + + lastAdaptTime = UTIL_getTime(); + + /* check output speed */ + if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ + + unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; + assert(zfp.produced >= previous_zfp_update.produced); + assert(prefs->nbWorkers >= 1); + + /* test if compression is blocked + * either because output is slow and all buffers are full + * or because input is slow and no job can start while waiting for at least one buffer to be filled. + * note : exclude starting part, since currentJobID > 1 */ + if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ + && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ + ) { + DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") + speedChange = slower; + } + + previous_zfp_update = zfp; + + if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ + && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ + ) { + DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); + speedChange = slower; + } + flushWaiting = 0; + } + + /* course correct only if there is at least one new job completed */ + if (zfp.currentJobID > lastJobID) { + DISPLAYLEVEL(6, "compression level adaptation check \n") + + /* check input speed */ + if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ + if (inputBlocked <= 0) { + DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); + speedChange = slower; + } else if (speedChange == noChange) { + unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; + unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; + unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; + previous_zfp_correction = zfp; + assert(inputPresented > 0); + DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", + inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, + (unsigned)newlyIngested, (unsigned)newlyConsumed, + (unsigned)newlyFlushed, (unsigned)newlyProduced); + if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ + && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ + && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ + ) { + DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", + newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); + speedChange = faster; + } + } + inputBlocked = 0; + inputPresented = 0; + } + + if (speedChange == slower) { + DISPLAYLEVEL(6, "slower speed , higher compression \n") + compressionLevel ++; + if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); + if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; + compressionLevel += (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress->cctx, ZSTD_c_compressionLevel, compressionLevel); + } + if (speedChange == faster) { + DISPLAYLEVEL(6, "faster speed , lighter compression \n") + compressionLevel --; + if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; + compressionLevel -= (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress->cctx, ZSTD_c_compressionLevel, compressionLevel); + } + speedChange = noChange; + + lastJobID = zfp.currentJobID; + } /* if (zfp.currentJobID > lastJobID) */ + } /* if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) */ + + /* display notification */ + if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress->cctx); double const cShare = (double)zfp.produced / (double)(zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; UTIL_HumanReadableSize_t const buffered_hrs = UTIL_makeHumanReadableSize(zfp.ingested - zfp.consumed); UTIL_HumanReadableSize_t const consumed_hrs = UTIL_makeHumanReadableSize(zfp.consumed); UTIL_HumanReadableSize_t const produced_hrs = UTIL_makeHumanReadableSize(zfp.produced); + DELAY_NEXT_UPDATE(); + /* display progress notifications */ + DISPLAY_PROGRESS("\r%79s\r", ""); /* Clear out the current displayed line */ if (g_display_prefs.displayLevel >= 3) { - DISPLAYUPDATE(3, "\r(L%i) Buffered:%5.*f%s - Consumed:%5.*f%s - Compressed:%5.*f%s => %.2f%% ", - compressionLevel, - buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, - consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, - produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, - cShare ); - } else if (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) { + /* Verbose progress update */ + DISPLAY_PROGRESS( + "(L%i) Buffered:%5.*f%s - Consumed:%5.*f%s - Compressed:%5.*f%s => %.2f%% ", + compressionLevel, + buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, + consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, + produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, + cShare ); + } else { /* Require level 2 or forcibly displayed progress counter for summarized updates */ - DISPLAYLEVEL(1, "\r%79s\r", ""); /* Clear out the current displayed line */ if (fCtx->nbFilesTotal > 1) { size_t srcFileNameSize = strlen(srcFileName); /* Ensure that the string we print is roughly the same size each time */ if (srcFileNameSize > 18) { const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: ...%s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: ...%s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName); } else { - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: %*s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: %*s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, (int)(18-srcFileNameSize), srcFileName); } } - DISPLAYLEVEL(1, "Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); + DISPLAY_PROGRESS("Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); if (fileSize != UTIL_FILESIZE_UNKNOWN) - DISPLAYLEVEL(2, "/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); - DISPLAYLEVEL(1, " ==> %2.f%%", cShare); - DELAY_NEXT_UPDATE(); + DISPLAY_PROGRESS("/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); + DISPLAY_PROGRESS(" ==> %2.f%%", cShare); } - - /* adaptive mode : statistics measurement and speed correction */ - if (prefs->adaptiveMode) { - - /* check output speed */ - if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ - - unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; - assert(zfp.produced >= previous_zfp_update.produced); - assert(prefs->nbWorkers >= 1); - - /* test if compression is blocked - * either because output is slow and all buffers are full - * or because input is slow and no job can start while waiting for at least one buffer to be filled. - * note : exclude starting part, since currentJobID > 1 */ - if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ - && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ - ) { - DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") - speedChange = slower; - } - - previous_zfp_update = zfp; - - if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ - && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ - ) { - DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); - speedChange = slower; - } - flushWaiting = 0; - } - - /* course correct only if there is at least one new job completed */ - if (zfp.currentJobID > lastJobID) { - DISPLAYLEVEL(6, "compression level adaptation check \n") - - /* check input speed */ - if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ - if (inputBlocked <= 0) { - DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); - speedChange = slower; - } else if (speedChange == noChange) { - unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; - unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; - unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; - previous_zfp_correction = zfp; - assert(inputPresented > 0); - DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", - inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, - (unsigned)newlyIngested, (unsigned)newlyConsumed, - (unsigned)newlyFlushed, (unsigned)newlyProduced); - if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ - && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ - && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ - ) { - DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", - newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); - speedChange = faster; - } - } - inputBlocked = 0; - inputPresented = 0; - } - - if (speedChange == slower) { - DISPLAYLEVEL(6, "slower speed , higher compression \n") - compressionLevel ++; - if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); - if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; - compressionLevel += (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - if (speedChange == faster) { - DISPLAYLEVEL(6, "faster speed , lighter compression \n") - compressionLevel --; - if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; - compressionLevel -= (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - speedChange = noChange; - - lastJobID = zfp.currentJobID; - } /* if (zfp.currentJobID > lastJobID) */ - } /* if (g_adaptiveMode) */ - } /* if (READY_FOR_UPDATE()) */ + } /* if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) */ } /* while ((inBuff.pos != inBuff.size) */ } while (directive != ZSTD_e_end); @@ -1488,8 +1980,7 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, (unsigned long long)*readsize, (unsigned long long)fileSize); } - AIO_WritePool_releaseIoJob(writeJob); - AIO_WritePool_sparseWriteEnd(ressPtr->writeCtx); + FIO_SyncCompressIO_finish(syncIO); return compressedfilesize; } @@ -1502,7 +1993,7 @@ FIO_compressZstdFrame(FIO_ctx_t* const fCtx, static int FIO_compressFilename_internal(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - cRess_t ress, + cRess_t* ress, const char* dstFileName, const char* srcFileName, int compressionLevel) { @@ -1517,12 +2008,12 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, switch (prefs->compressionType) { default: case FIO_zstdCompression: - compressedfilesize = FIO_compressZstdFrame(fCtx, prefs, &ress, srcFileName, fileSize, compressionLevel, &readsize); + compressedfilesize = FIO_compressZstdFrame(fCtx, prefs, ress, srcFileName, fileSize, compressionLevel, &readsize); break; case FIO_gzipCompression: #ifdef ZSTD_GZCOMPRESS - compressedfilesize = FIO_compressGzFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize); + compressedfilesize = FIO_compressGzFrame(ress, srcFileName, fileSize, compressionLevel, &readsize); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as gzip (zstd compiled without ZSTD_GZCOMPRESS) -- ignored \n", @@ -1533,7 +2024,7 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, case FIO_xzCompression: case FIO_lzmaCompression: #ifdef ZSTD_LZMACOMPRESS - compressedfilesize = FIO_compressLzmaFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize, prefs->compressionType==FIO_lzmaCompression); + compressedfilesize = FIO_compressLzmaFrame(ress, srcFileName, fileSize, compressionLevel, &readsize, prefs->compressionType==FIO_lzmaCompression); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as xz/lzma (zstd compiled without ZSTD_LZMACOMPRESS) -- ignored \n", @@ -1543,7 +2034,7 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, case FIO_lz4Compression: #ifdef ZSTD_LZ4COMPRESS - compressedfilesize = FIO_compressLz4Frame(&ress, srcFileName, fileSize, compressionLevel, prefs->checksumFlag, &readsize); + compressedfilesize = FIO_compressLz4Frame(ress, srcFileName, fileSize, compressionLevel, prefs->checksumFlag, &readsize); #else (void)compressionLevel; EXM_THROW(20, "zstd: %s: file cannot be compressed as lz4 (zstd compiled without ZSTD_LZ4COMPRESS) -- ignored \n", @@ -1555,20 +2046,18 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, /* Status */ fCtx->totalBytesInput += (size_t)readsize; fCtx->totalBytesOutput += (size_t)compressedfilesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - if (g_display_prefs.displayLevel >= 2 && - !fCtx->hasStdoutOutput && - (g_display_prefs.displayLevel >= 3 || fCtx->nbFilesTotal <= 1)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) readsize); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) compressedfilesize); if (readsize == 0) { - DISPLAYLEVEL(2,"%-20s : (%6.*f%s => %6.*f%s, %s) \n", + DISPLAY_SUMMARY("%-20s : (%6.*f%s => %6.*f%s, %s) \n", srcFileName, hr_isize.precision, hr_isize.value, hr_isize.suffix, hr_osize.precision, hr_osize.value, hr_osize.suffix, dstFileName); } else { - DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6.*f%s => %6.*f%s, %s) \n", + DISPLAY_SUMMARY("%-20s :%6.2f%% (%6.*f%s => %6.*f%s, %s) \n", srcFileName, (double)compressedfilesize / (double)readsize * 100, hr_isize.precision, hr_isize.value, hr_isize.suffix, @@ -1601,36 +2090,35 @@ FIO_compressFilename_internal(FIO_ctx_t* const fCtx, */ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - cRess_t ress, + cRess_t* ress, const char* dstFileName, const char* srcFileName, + const stat_t* srcFileStat, int compressionLevel) { int closeDstFile = 0; int result; - stat_t statbuf; - int transferMTime = 0; - FILE *dstFile; - assert(AIO_ReadPool_getFile(ress.readCtx) != NULL); - if (AIO_WritePool_getFile(ress.writeCtx) == NULL) { - int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; + int transferStat = 0; + int dstFd = -1; + + if (ress->io.dstFile == NULL) { + int dstFileInitialPermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp (srcFileName, stdinmark) && strcmp (dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFileInitialPermissions = TEMPORARY_FILE_PERMISSIONS; } closeDstFile = 1; DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s \n", dstFileName); - dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); - if (dstFile==NULL) return 1; /* could not open dstFileName */ - AIO_WritePool_setFile(ress.writeCtx, dstFile); - /* Must only be added after FIO_openDstFile() succeeds. - * Otherwise we may delete the destination file if it already exists, - * and the user presses Ctrl-C when asked if they wish to overwrite. - */ + { + FILE *dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions); + if (dstFile==NULL) return 1; /* could not open dstFileName */ + dstFd = fileno(dstFile); + FIO_SyncCompressIO_setDst(&ress->io, dstFile); + } + /* Must only be added after FIO_openDstFile() succeeds. */ addHandler(dstFileName); } @@ -1639,18 +2127,23 @@ static int FIO_compressFilename_dstFile(FIO_ctx_t* const fCtx, if (closeDstFile) { clearHandler(); + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: closing dst: %s \n", dstFileName); - if (AIO_WritePool_closeFile(ress.writeCtx)) { /* error closing file */ + if (FIO_SyncCompressIO_closeDst(&ress->io)) { /* error closing file */ DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result=1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); } - if ( (result != 0) /* operation failure */ - && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ - ) { - FIO_removeFile(dstFileName); /* remove compression artefact; note don't do anything special if remove() fails */ + + if ( (result != 0) + && strcmp(dstFileName, stdoutmark) ) { + FIO_removeFile(dstFileName); } } @@ -1670,6 +2163,110 @@ static const char *compressedFileExtensions[] = { TXZ_EXTENSION, LZ4_EXTENSION, TLZ4_EXTENSION, + ".7z", + ".aa3", + ".aac", + ".aar", + ".ace", + ".alac", + ".ape", + ".apk", + ".apng", + ".arc", + ".archive", + ".arj", + ".ark", + ".asf", + ".avi", + ".avif", + ".ba", + ".br", + ".bz2", + ".cab", + ".cdx", + ".chm", + ".cr2", + ".divx", + ".dmg", + ".dng", + ".docm", + ".docx", + ".dotm", + ".dotx", + ".dsft", + ".ear", + ".eftx", + ".emz", + ".eot", + ".epub", + ".f4v", + ".flac", + ".flv", + ".gho", + ".gif", + ".gifv", + ".gnp", + ".iso", + ".jar", + ".jpeg", + ".jpg", + ".jxl", + ".lz", + ".lzh", + ".m4a", + ".m4v", + ".mkv", + ".mov", + ".mp2", + ".mp3", + ".mp4", + ".mpa", + ".mpc", + ".mpe", + ".mpeg", + ".mpg", + ".mpl", + ".mpv", + ".msi", + ".odp", + ".ods", + ".odt", + ".ogg", + ".ogv", + ".otp", + ".ots", + ".ott", + ".pea", + ".png", + ".pptx", + ".qt", + ".rar", + ".s7z", + ".sfx", + ".sit", + ".sitx", + ".sqx", + ".svgz", + ".swf", + ".tbz2", + ".tib", + ".tlz", + ".vob", + ".war", + ".webm", + ".webp", + ".wma", + ".wmv", + ".woff", + ".woff2", + ".wvl", + ".xlsx", + ".xpi", + ".xps", + ".zip", + ".zipx", + ".zoo", + ".zpaq", NULL }; @@ -1680,25 +2277,33 @@ static const char *compressedFileExtensions[] = { static int FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - cRess_t ress, + cRess_t* ress, const char* dstFileName, const char* srcFileName, int compressionLevel) { int result; FILE* srcFile; + stat_t srcFileStat; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; DISPLAYLEVEL(6, "FIO_compressFilename_srcFile: %s \n", srcFileName); - /* ensure src is not a directory */ - if (UTIL_isDirectory(srcFileName)) { - DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); - return 1; - } + if (strcmp(srcFileName, stdinmark)) { + if (UTIL_stat(srcFileName, &srcFileStat)) { + /* failure to stat at all is handled during opening */ - /* ensure src is not the same as dict (if present) */ - if (ress.dictFileName != NULL && UTIL_isSameFile(srcFileName, ress.dictFileName)) { - DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); - return 1; + /* ensure src is not a directory */ + if (UTIL_isDirectoryStat(&srcFileStat)) { + DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); + return 1; + } + + /* ensure src is not the same as dict (if present) */ + if (ress->dictFileName != NULL && UTIL_isSameFileStat(srcFileName, ress->dictFileName, &srcFileStat, &ress->dictFileStat)) { + DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); + return 1; + } + } } /* Check if "srcFile" is compressed. Only done if --exclude-compressed flag is used @@ -1710,16 +2315,28 @@ FIO_compressFilename_srcFile(FIO_ctx_t* const fCtx, return 0; } - srcFile = FIO_openSrcFile(prefs, srcFileName); + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); if (srcFile == NULL) return 1; /* srcFile could not be opened */ - AIO_ReadPool_setFile(ress.readCtx, srcFile); - result = FIO_compressFilename_dstFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); - AIO_ReadPool_closeFile(ress.readCtx); + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + (void)fileSize; + + FIO_SyncCompressIO_setSrc(&ress->io, srcFile); + result = FIO_compressFilename_dstFile( + fCtx, prefs, ress, + dstFileName, srcFileName, + &srcFileStat, compressionLevel); + FIO_SyncCompressIO_clearSrc(&ress->io); + + if (srcFile != NULL && fclose(srcFile)) { + DISPLAYLEVEL(1, "zstd: %s: %s \n", srcFileName, strerror(errno)); + return 1; + } - if ( prefs->removeSrcFile /* --rm */ - && result == 0 /* success */ - && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ + if ( prefs->removeSrcFile /* --rm */ + && result == 0 /* success */ + && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ ) { /* We must clear the handler, since after this point calling it would * delete both the source and destination files. @@ -1741,7 +2358,8 @@ checked_index(const char* options[], size_t length, size_t index) { #define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (size_t)(index)) -void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) +{ static const char* formatOptions[5] = {ZSTD_EXTENSION, GZ_EXTENSION, XZ_EXTENSION, LZMA_EXTENSION, LZ4_EXTENSION}; static const char* sparseOptions[3] = {" --no-sparse", "", " --sparse"}; @@ -1755,7 +2373,7 @@ void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { DISPLAY("%s", INDEX(sparseOptions, prefs->sparseFileSupport)); DISPLAY("%s", prefs->dictIDFlag ? "" : " --no-dictID"); DISPLAY("%s", INDEX(checkSumOptions, prefs->checksumFlag)); - DISPLAY(" --block-size=%d", prefs->blockSize); + DISPLAY(" --jobsize=%d", prefs->jobSize); if (prefs->adaptiveMode) DISPLAY(" --adapt=min=%d,max=%d", prefs->minAdaptLevel, prefs->maxAdaptLevel); DISPLAY("%s", INDEX(rowMatchFinderOptions, prefs->useRowMatchFinder)); @@ -1780,10 +2398,12 @@ int FIO_compressFilename(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, const const char* srcFileName, const char* dictFileName, int compressionLevel, ZSTD_compressionParameters comprParams) { - cRess_t const ress = FIO_createCResources(prefs, dictFileName, UTIL_getFileSize(srcFileName), compressionLevel, comprParams); - int const result = FIO_compressFilename_srcFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); + cRess_t ress = FIO_createCResources(prefs, dictFileName, UTIL_getFileSize(srcFileName), compressionLevel, comprParams); + int const result = FIO_compressFilename_srcFile(fCtx, prefs, &ress, dstFileName, srcFileName, compressionLevel); -#define DISPLAY_LEVEL_DEFAULT 2 +#ifndef ZSTD_DISPLAY_LEVEL_DEFAULT +# define ZSTD_DISPLAY_LEVEL_DEFAULT 2 +#endif FIO_freeCResources(&ress); return result; @@ -1870,7 +2490,7 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, assert(outFileName != NULL || suffix != NULL); if (outFileName != NULL) { /* output into a single destination (stdout typically) */ FILE *dstFile; - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeCResources(&ress); return 1; } @@ -1878,13 +2498,13 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, if (dstFile == NULL) { /* could not open outFileName */ error = 1; } else { - AIO_WritePool_setFile(ress.writeCtx, dstFile); + FIO_SyncCompressIO_setDst(&ress.io, dstFile); for (; fCtx->currFileIdx < fCtx->nbFilesTotal; ++fCtx->currFileIdx) { - status = FIO_compressFilename_srcFile(fCtx, prefs, ress, outFileName, inFileNamesTable[fCtx->currFileIdx], compressionLevel); + status = FIO_compressFilename_srcFile(fCtx, prefs, &ress, outFileName, inFileNamesTable[fCtx->currFileIdx], compressionLevel); if (!status) fCtx->nbFilesProcessed++; error |= status; } - if (AIO_WritePool_closeFile(ress.writeCtx)) + if (FIO_SyncCompressIO_closeDst(&ress.io)) EXM_THROW(29, "Write error (%s) : cannot properly close %s", strerror(errno), outFileName); } @@ -1908,7 +2528,7 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, } else { dstFileName = FIO_determineCompressedName(srcFileName, outDirName, suffix); /* cannot fail */ } - status = FIO_compressFilename_srcFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); + status = FIO_compressFilename_srcFile(fCtx, prefs, &ress, dstFileName, srcFileName, compressionLevel); if (!status) fCtx->nbFilesProcessed++; error |= status; } @@ -1917,16 +2537,23 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, FIO_checkFilenameCollisions(inFileNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesInput != 0) { + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesInput); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesOutput); - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%3d files compressed :%.2f%% (%6.*f%4s => %6.*f%4s)\n", - fCtx->nbFilesProcessed, - (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, - hr_isize.precision, hr_isize.value, hr_isize.suffix, - hr_osize.precision, hr_osize.value, hr_osize.suffix); + DISPLAY_PROGRESS("\r%79s\r", ""); + if (fCtx->totalBytesInput == 0) { + DISPLAY_SUMMARY("%3d files compressed : (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } else { + DISPLAY_SUMMARY("%3d files compressed : %.2f%% (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } } FIO_freeCResources(&ress); @@ -1943,6 +2570,7 @@ int FIO_compressMultipleFilenames(FIO_ctx_t* const fCtx, * Decompression ***************************************************************************/ typedef struct { + FIO_Dict_t dict; ZSTD_DStream* dctx; WritePoolCtx_t *writeCtx; ReadPoolCtx_t *readCtx; @@ -1950,11 +2578,20 @@ typedef struct { static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFileName) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + stat_t statbuf; dRess_t ress; + memset(&statbuf, 0, sizeof(statbuf)); memset(&ress, 0, sizeof(ress)); - if (prefs->patchFromMode) - FIO_adjustMemLimitForPatchFromMode(prefs, UTIL_getFileSize(dictFileName), 0 /* just use the dict size */); + FIO_getDictFileStat(dictFileName, &statbuf); + + if (prefs->patchFromMode){ + U64 const dictSize = UTIL_getFileSizeStat(&statbuf); + useMMap |= dictSize > prefs->memLimit; + FIO_adjustMemLimitForPatchFromMode(prefs, dictSize, 0 /* just use the dict size */); + } /* Allocation */ ress.dctx = ZSTD_createDStream(); @@ -1964,27 +2601,34 @@ static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFi CHECK( ZSTD_DCtx_setParameter(ress.dctx, ZSTD_d_forceIgnoreChecksum, !prefs->checksumFlag)); /* dictionary */ - { void* dictBuffer; - size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName, prefs); - CHECK( ZSTD_initDStream_usingDict(ress.dctx, dictBuffer, dictBufferSize) ); - free(dictBuffer); + { + FIO_dictBufferType_t dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &statbuf, dictBufferType); + + CHECK(ZSTD_DCtx_reset(ress.dctx, ZSTD_reset_session_only) ); + + if (prefs->patchFromMode){ + CHECK(ZSTD_DCtx_refPrefix(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } else { + CHECK(ZSTD_DCtx_loadDictionary_byReference(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } } ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_DStreamOutSize()); ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_DStreamInSize()); - return ress; } static void FIO_freeDResources(dRess_t ress) { + FIO_freeDict(&(ress.dict)); CHECK( ZSTD_freeDStream(ress.dctx) ); AIO_WritePool_free(ress.writeCtx); AIO_ReadPool_free(ress.readCtx); } -/** FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode - @return : 0 (no error) */ +/* FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode + * @return : 0 (no error) */ static int FIO_passThrough(dRess_t *ress) { size_t const blockSize = MIN(MIN(64 KB, ZSTD_DStreamInSize()), ZSTD_DStreamOutSize()); @@ -2012,9 +2656,10 @@ static int FIO_passThrough(dRess_t *ress) static void FIO_zstdErrorHelp(const FIO_prefs_t* const prefs, const dRess_t* ress, - size_t err, const char* srcFileName) + size_t err, + const char* srcFileName) { - ZSTD_frameHeader header; + ZSTD_FrameHeader header; /* Help message only for one specific error */ if (ZSTD_getErrorCode(err) != ZSTD_error_frameParameter_windowTooLarge) @@ -2050,11 +2695,14 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, U64 alreadyDecoded) /* for multi-frames streams */ { U64 frameSize = 0; - IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + const char* srcFName20 = srcFileName; + IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + assert(writeJob); - /* display last 20 characters only */ + /* display last 20 characters only when not --verbose */ { size_t const srcFileLength = strlen(srcFileName); - if (srcFileLength>20) srcFileName += srcFileLength-20; + if ((srcFileLength>20) && (g_display_prefs.displayLevel<3)) + srcFName20 += srcFileLength-20; } ZSTD_DCtx_reset(ress->dctx, ZSTD_reset_session_only); @@ -2064,10 +2712,9 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, /* Main decompression Loop */ while (1) { - ZSTD_inBuffer inBuff = { ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded, 0 }; - ZSTD_outBuffer outBuff= { writeJob->buffer, writeJob->bufferSize, 0 }; + ZSTD_inBuffer inBuff = setInBuffer( ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded, 0 ); + ZSTD_outBuffer outBuff= setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff); - const int displayLevel = (g_display_prefs.progressSetting == FIO_ps_always) ? 1 : 2; UTIL_HumanReadableSize_t const hrs = UTIL_makeHumanReadableSize(alreadyDecoded+frameSize); if (ZSTD_isError(readSizeHint)) { DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n", @@ -2082,18 +2729,12 @@ FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); frameSize += outBuff.pos; if (fCtx->nbFilesTotal > 1) { - size_t srcFileNameSize = strlen(srcFileName); - if (srcFileNameSize > 18) { - const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); - } else { - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, hrs.precision, hrs.value, hrs.suffix); - } + DISPLAYUPDATE_PROGRESS( + "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFName20, hrs.precision, hrs.value, hrs.suffix); } else { - DISPLAYUPDATE(displayLevel, "\r%-20.20s : %.*f%s... ", - srcFileName, hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS("\r%-20.20s : %.*f%s... ", + srcFName20, hrs.precision, hrs.value, hrs.suffix); } AIO_ReadPool_consumeBytes(ress->readCtx, inBuff.pos); @@ -2134,7 +2775,7 @@ FIO_decompressGzFrame(dRess_t* ress, const char* srcFileName) strm.opaque = Z_NULL; strm.next_in = 0; strm.avail_in = 0; - /* see http://www.zlib.net/manual.html */ + /* see https://www.zlib.net/manual.html */ if (inflateInit2(&strm, 15 /* maxWindowLogSize */ + 16 /* gzip only */) != Z_OK) return FIO_ERROR_FRAME_DECODING; @@ -2214,8 +2855,8 @@ FIO_decompressLzmaFrame(dRess_t* ress, } writeJob = AIO_WritePool_acquireJob(ress->writeCtx); - strm.next_out = (Bytef*)writeJob->buffer; - strm.avail_out = (uInt)writeJob->bufferSize; + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; strm.avail_in = ress->readCtx->srcBufferLoaded; @@ -2243,7 +2884,7 @@ FIO_decompressLzmaFrame(dRess_t* ress, writeJob->usedBufferSize = decompBytes; AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (Bytef*)writeJob->buffer; + strm.next_out = (BYTE*)writeJob->buffer; strm.avail_out = writeJob->bufferSize; } } if (ret == LZMA_STREAM_END) break; @@ -2307,7 +2948,7 @@ FIO_decompressLz4Frame(dRess_t* ress, const char* srcFileName) AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); filesize += decodedBytes; hrs = UTIL_makeHumanReadableSize(filesize); - DISPLAYUPDATE(2, "\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS("\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); } if (!nextToLoad) break; @@ -2415,13 +3056,9 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, /* Final Status */ fCtx->totalBytesOutput += (size_t)filesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - /* No status message in pipe mode (stdin - stdout) or multi-files mode */ - if ((g_display_prefs.displayLevel >= 2 && fCtx->nbFilesTotal <= 1) || - g_display_prefs.displayLevel >= 3 || - g_display_prefs.progressSetting == FIO_ps_always) { - DISPLAYLEVEL(1, "\r%-20s: %llu bytes \n", srcFileName, filesize); - } + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) + DISPLAY_SUMMARY("%-20s: %llu bytes \n", srcFileName, filesize); return 0; } @@ -2435,28 +3072,30 @@ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, dRess_t ress, - const char* dstFileName, const char* srcFileName) + const char* dstFileName, + const char* srcFileName, + const stat_t* srcFileStat) { int result; - stat_t statbuf; int releaseDstFile = 0; - int transferMTime = 0; + int transferStat = 0; + int dstFd = 0; if ((AIO_WritePool_getFile(ress.writeCtx) == NULL) && (prefs->testMode == 0)) { FILE *dstFile; int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp(srcFileName, stdinmark) /* special case : don't transfer permissions from stdin */ && strcmp(dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFilePermissions = TEMPORARY_FILE_PERMISSIONS; } releaseDstFile = 1; dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); if (dstFile==NULL) return 1; + dstFd = fileno(dstFile); AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. @@ -2470,13 +3109,18 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, if (releaseDstFile) { clearHandler(); + + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + if (AIO_WritePool_closeFile(ress.writeCtx)) { DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result = 1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); } if ( (result != 0) /* operation failure */ @@ -2498,18 +3142,32 @@ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, static int FIO_decompressSrcFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, dRess_t ress, const char* dstFileName, const char* srcFileName) { FILE* srcFile; + stat_t srcFileStat; int result; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; if (UTIL_isDirectory(srcFileName)) { DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); return 1; } - srcFile = FIO_openSrcFile(prefs, srcFileName); + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); if (srcFile==NULL) return 1; + + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } + AIO_ReadPool_setFile(ress.readCtx, srcFile); - result = FIO_decompressDstFile(fCtx, prefs, ress, dstFileName, srcFileName); + result = FIO_decompressDstFile(fCtx, prefs, ress, dstFileName, srcFileName, &srcFileStat); AIO_ReadPool_setFile(ress.readCtx, NULL); @@ -2543,6 +3201,8 @@ int FIO_decompressFilename(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, int const decodingError = FIO_decompressSrcFile(fCtx, prefs, ress, dstFileName, srcFileName); + + FIO_freeDResources(ress); return decodingError; } @@ -2686,7 +3346,7 @@ FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, dRess_t ress = FIO_createDResources(prefs, dictFileName); if (outFileName) { - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeDResources(ress); return 1; } @@ -2730,8 +3390,11 @@ FIO_decompressMultipleFilenames(FIO_ctx_t* const fCtx, FIO_checkFilenameCollisions(srcNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesOutput != 0) - DISPLAYLEVEL(2, "%d files decompressed : %6zu bytes total \n", fCtx->nbFilesProcessed, fCtx->totalBytesOutput); + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + DISPLAY_SUMMARY("%d files decompressed : %6llu bytes total \n", + fCtx->nbFilesProcessed, (unsigned long long)fCtx->totalBytesOutput); + } FIO_freeDResources(ress); return error; @@ -2749,6 +3412,7 @@ typedef struct { int numSkippableFrames; int decompUnavailable; int usesCheck; + BYTE checksum[4]; U32 nbFiles; unsigned dictID; } fileInfo_t; @@ -2758,7 +3422,7 @@ typedef enum { info_frame_error=1, info_not_zstd=2, info_file_error=3, - info_truncated_input=4, + info_truncated_input=4 } InfoError; #define ERROR_IF(c,n,...) { \ @@ -2795,7 +3459,7 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) { U32 const magicNumber = MEM_readLE32(headerBuffer); /* Zstandard frame */ if (magicNumber == ZSTD_MAGICNUMBER) { - ZSTD_frameHeader header; + ZSTD_FrameHeader header; U64 const frameContentSize = ZSTD_getFrameContentSize(headerBuffer, numBytesRead); if ( frameContentSize == ZSTD_CONTENTSIZE_ERROR || frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN ) { @@ -2843,8 +3507,8 @@ FIO_analyzeFrames(fileInfo_t* info, FILE* const srcFile) int const contentChecksumFlag = (frameHeaderDescriptor & (1 << 2)) >> 2; if (contentChecksumFlag) { info->usesCheck = 1; - ERROR_IF(fseek(srcFile, 4, SEEK_CUR) != 0, - info_frame_error, "Error: could not skip past checksum"); + ERROR_IF(fread(info->checksum, 1, 4, srcFile) != 4, + info_frame_error, "Error: could not read checksum"); } } info->numActualFrames++; } @@ -2870,10 +3534,11 @@ static InfoError getFileInfo_fileConfirmed(fileInfo_t* info, const char* inFileName) { InfoError status; - FILE* const srcFile = FIO_openSrcFile(NULL, inFileName); + stat_t srcFileStat; + FILE* const srcFile = FIO_openSrcFile(NULL, inFileName, &srcFileStat); ERROR_IF(srcFile == NULL, info_file_error, "Error: could not open source file %s", inFileName); - info->compressedSize = UTIL_getFileSize(inFileName); + info->compressedSize = UTIL_getFileSizeStat(&srcFileStat); status = FIO_analyzeFrames(info, srcFile); fclose(srcFile); @@ -2936,7 +3601,16 @@ displayInfo(const char* inFileName, const fileInfo_t* info, int displayLevel) (unsigned long long)info->decompressedSize); DISPLAYOUT("Ratio: %.4f\n", ratio); } - DISPLAYOUT("Check: %s\n", checkString); + + if (info->usesCheck && info->numActualFrames == 1) { + DISPLAYOUT("Check: %s %02x%02x%02x%02x\n", checkString, + info->checksum[3], info->checksum[2], + info->checksum[1], info->checksum[0] + ); + } else { + DISPLAYOUT("Check: %s\n", checkString); + } + DISPLAYOUT("\n"); } } @@ -3000,7 +3674,7 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis } } if (numFiles == 0) { - if (!IS_CONSOLE(stdin)) { + if (!UTIL_isConsole(stdin)) { DISPLAYLEVEL(1, "zstd: --list does not support reading from standard input \n"); } DISPLAYLEVEL(1, "No files given \n"); diff --git a/programs/fileio.h b/programs/fileio.h index b848934bcae..5d7334ef5f0 100644 --- a/programs/fileio.h +++ b/programs/fileio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -17,11 +17,6 @@ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ #include "../lib/zstd.h" /* ZSTD_* */ -#if defined (__cplusplus) -extern "C" { -#endif - - /* ************************************* * Special i/o constants **************************************/ @@ -75,7 +70,7 @@ void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt); void FIO_setAdaptMin(FIO_prefs_t* const prefs, int minCLevel); void FIO_setAdaptMax(FIO_prefs_t* const prefs, int maxCLevel); void FIO_setUseRowMatchFinder(FIO_prefs_t* const prefs, int useRowMatchFinder); -void FIO_setBlockSize(FIO_prefs_t* const prefs, int blockSize); +void FIO_setJobSize(FIO_prefs_t* const prefs, int jobSize); void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag); void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag); void FIO_setLdmBucketSizeLog(FIO_prefs_t* const prefs, int ldmBucketSizeLog); @@ -86,7 +81,7 @@ void FIO_setLdmMinMatch(FIO_prefs_t* const prefs, int ldmMinMatch); void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit); void FIO_setNbWorkers(FIO_prefs_t* const prefs, int nbWorkers); void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog); -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag); +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag); void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable); void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize); @@ -95,7 +90,7 @@ void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint); void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode); void FIO_setLiteralCompressionMode( FIO_prefs_t* const prefs, - ZSTD_paramSwitch_e mode); + ZSTD_ParamSwitch_e mode); void FIO_setProgressSetting(FIO_progressSetting_e progressSetting); void FIO_setNotificationLevel(int level); @@ -106,6 +101,7 @@ void FIO_setContentSize(FIO_prefs_t* const prefs, int value); void FIO_displayCompressionParameters(const FIO_prefs_t* prefs); void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value); void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value); +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_ParamSwitch_e value); /* FIO_ctx_t functions */ void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value); @@ -172,9 +168,4 @@ char const* FIO_zlibVersion(void); char const* FIO_lz4Version(void); char const* FIO_lzmaVersion(void); - -#if defined (__cplusplus) -} -#endif - #endif /* FILEIO_H_23981798732 */ diff --git a/programs/fileio_asyncio.c b/programs/fileio_asyncio.c index 92c9a5b1d35..42a47201656 100644 --- a/programs/fileio_asyncio.c +++ b/programs/fileio_asyncio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -140,7 +140,7 @@ int AIO_supported(void) { } /* *********************************** - * General IoPool implementation + * Generic IoPool implementation *************************************/ static IOJob_t *AIO_IOPool_createIoJob(IOPoolCtx_t *ctx, size_t bufferSize) { @@ -163,20 +163,22 @@ static IOJob_t *AIO_IOPool_createIoJob(IOPoolCtx_t *ctx, size_t bufferSize) { * Displays warning if asyncio is requested but MT isn't available. */ static void AIO_IOPool_createThreadPool(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs) { ctx->threadPool = NULL; + ctx->threadPoolActive = 0; if(prefs->asyncIO) { if (ZSTD_pthread_mutex_init(&ctx->ioJobsMutex, NULL)) - EXM_THROW(102,"Failed creating write availableJobs mutex"); + EXM_THROW(102,"Failed creating ioJobsMutex mutex"); /* We want MAX_IO_JOBS-2 queue items because we need to always have 1 free buffer to * decompress into and 1 buffer that's actively written to disk and owned by the writing thread. */ assert(MAX_IO_JOBS >= 2); ctx->threadPool = POOL_create(1, MAX_IO_JOBS - 2); + ctx->threadPoolActive = 1; if (!ctx->threadPool) - EXM_THROW(104, "Failed creating writer thread pool"); + EXM_THROW(104, "Failed creating I/O thread pool"); } } /* AIO_IOPool_init: - * Allocates and sets and a new write pool including its included availableJobs. */ + * Allocates and sets and a new I/O thread pool including its included availableJobs. */ static void AIO_IOPool_init(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs, POOL_function poolFunction, size_t bufferSize) { int i; AIO_IOPool_createThreadPool(ctx, prefs); @@ -192,27 +194,59 @@ static void AIO_IOPool_init(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs, POOL_fun } +/* AIO_IOPool_threadPoolActive: + * Check if current operation uses thread pool. + * Note that in some cases we have a thread pool initialized but choose not to use it. */ +static int AIO_IOPool_threadPoolActive(IOPoolCtx_t* ctx) { + return ctx->threadPool && ctx->threadPoolActive; +} + + +/* AIO_IOPool_lockJobsMutex: + * Locks the IO jobs mutex if threading is active */ +static void AIO_IOPool_lockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_unlockJobsMutex: + * Unlocks the IO jobs mutex if threading is active */ +static void AIO_IOPool_unlockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); +} + /* AIO_IOPool_releaseIoJob: * Releases an acquired job back to the pool. Doesn't execute the job. */ static void AIO_IOPool_releaseIoJob(IOJob_t* job) { IOPoolCtx_t* const ctx = (IOPoolCtx_t *) job->ctx; - if(ctx->threadPool) - ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); + AIO_IOPool_lockJobsMutex(ctx); assert(ctx->availableJobsCount < ctx->totalIoJobs); ctx->availableJobs[ctx->availableJobsCount++] = job; - if(ctx->threadPool) - ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); + AIO_IOPool_unlockJobsMutex(ctx); } /* AIO_IOPool_join: * Waits for all tasks in the pool to finish executing. */ static void AIO_IOPool_join(IOPoolCtx_t* ctx) { - if(ctx->threadPool) + if(AIO_IOPool_threadPoolActive(ctx)) POOL_joinJobs(ctx->threadPool); } +/* AIO_IOPool_setThreaded: + * Allows (de)activating threaded mode, to be used when the expected overhead + * of threading costs more than the expected gains. */ +static void AIO_IOPool_setThreaded(IOPoolCtx_t* ctx, int threaded) { + assert(threaded == 0 || threaded == 1); + assert(ctx != NULL); + if(ctx->threadPoolActive != threaded) { + AIO_IOPool_join(ctx); + ctx->threadPoolActive = threaded; + } +} + /* AIO_IOPool_free: - * Release a previously allocated write thread pool. Makes sure all takss are done and released. */ + * Release a previously allocated IO thread pool. Makes sure all tasks are done and released. */ static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) { int i; if(ctx->threadPool) { @@ -234,14 +268,12 @@ static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) { /* AIO_IOPool_acquireJob: * Returns an available io job to be used for a future io. */ static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) { - IOJob_t *job; + IOJob_t* job; assert(ctx->file != NULL || ctx->prefs->testMode); - if(ctx->threadPool) - ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); + AIO_IOPool_lockJobsMutex(ctx); assert(ctx->availableJobsCount > 0); job = (IOJob_t*) ctx->availableJobs[--ctx->availableJobsCount]; - if(ctx->threadPool) - ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); + AIO_IOPool_unlockJobsMutex(ctx); job->usedBufferSize = 0; job->file = ctx->file; job->offset = 0; @@ -251,8 +283,7 @@ static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) { /* AIO_IOPool_setFile: * Sets the destination file for future files in the pool. - * Requires completion of all queues write jobs and release of all otherwise acquired jobs. - * Also requires ending of sparse write if a previous file was used in sparse mode. */ + * Requires completion of all queued jobs and release of all otherwise acquired jobs. */ static void AIO_IOPool_setFile(IOPoolCtx_t* ctx, FILE* file) { assert(ctx!=NULL); AIO_IOPool_join(ctx); @@ -269,7 +300,7 @@ static FILE* AIO_IOPool_getFile(const IOPoolCtx_t* ctx) { * The queued job shouldn't be used directly after queueing it. */ static void AIO_IOPool_enqueueJob(IOJob_t* job) { IOPoolCtx_t* const ctx = (IOPoolCtx_t *)job->ctx; - if(ctx->threadPool) + if(AIO_IOPool_threadPoolActive(ctx)) POOL_add(ctx->threadPool, ctx->poolFunction, job); else ctx->poolFunction(job); @@ -300,8 +331,7 @@ void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job) { * Blocks on completion of all current write jobs before executing. */ void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t* ctx) { assert(ctx != NULL); - if(ctx->base.threadPool) - POOL_joinJobs(ctx->base.threadPool); + AIO_IOPool_join(&ctx->base); AIO_fwriteSparseEnd(ctx->base.prefs, ctx->base.file, ctx->storedSkips); ctx->storedSkips = 0; } @@ -368,6 +398,13 @@ void AIO_WritePool_free(WritePoolCtx_t* ctx) { free(ctx); } +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} + /* *********************************** * ReadPool implementation @@ -383,14 +420,13 @@ static void AIO_ReadPool_releaseAllCompletedJobs(ReadPoolCtx_t* ctx) { static void AIO_ReadPool_addJobToCompleted(IOJob_t* job) { ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; - if(ctx->base.threadPool) - ZSTD_pthread_mutex_lock(&ctx->base.ioJobsMutex); + AIO_IOPool_lockJobsMutex(&ctx->base); assert(ctx->completedJobsCount < MAX_IO_JOBS); ctx->completedJobs[ctx->completedJobsCount++] = job; - if(ctx->base.threadPool) { + if(AIO_IOPool_threadPoolActive(&ctx->base)) { ZSTD_pthread_cond_signal(&ctx->jobCompletedCond); - ZSTD_pthread_mutex_unlock(&ctx->base.ioJobsMutex); } + AIO_IOPool_unlockJobsMutex(&ctx->base); } /* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked: @@ -417,8 +453,8 @@ static IOJob_t* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ReadPoolCt /* AIO_ReadPool_numReadsInFlight: * Returns the number of IO read jobs currently in flight. */ static size_t AIO_ReadPool_numReadsInFlight(ReadPoolCtx_t* ctx) { - const size_t jobsHeld = (ctx->currentJobHeld==NULL ? 0 : 1); - return ctx->base.totalIoJobs - (ctx->base.availableJobsCount + ctx->completedJobsCount + jobsHeld); + const int jobsHeld = (ctx->currentJobHeld==NULL ? 0 : 1); + return (size_t)(ctx->base.totalIoJobs - (ctx->base.availableJobsCount + ctx->completedJobsCount + jobsHeld)); } /* AIO_ReadPool_getNextCompletedJob: @@ -426,8 +462,7 @@ static size_t AIO_ReadPool_numReadsInFlight(ReadPoolCtx_t* ctx) { * Would block. */ static IOJob_t* AIO_ReadPool_getNextCompletedJob(ReadPoolCtx_t* ctx) { IOJob_t *job = NULL; - if (ctx->base.threadPool) - ZSTD_pthread_mutex_lock(&ctx->base.ioJobsMutex); + AIO_IOPool_lockJobsMutex(&ctx->base); job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); @@ -443,8 +478,7 @@ static IOJob_t* AIO_ReadPool_getNextCompletedJob(ReadPoolCtx_t* ctx) { ctx->waitingOnOffset += job->usedBufferSize; } - if (ctx->base.threadPool) - ZSTD_pthread_mutex_unlock(&ctx->base.ioJobsMutex); + AIO_IOPool_unlockJobsMutex(&ctx->base); return job; } @@ -480,8 +514,7 @@ static void AIO_ReadPool_enqueueRead(ReadPoolCtx_t* ctx) { } static void AIO_ReadPool_startReading(ReadPoolCtx_t* ctx) { - int i; - for (i = 0; i < ctx->base.availableJobsCount; i++) { + while(ctx->base.availableJobsCount) { AIO_ReadPool_enqueueRead(ctx); } } @@ -517,6 +550,7 @@ ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize) AIO_IOPool_init(&ctx->base, prefs, AIO_ReadPool_executeReadJob, bufferSize); ctx->coalesceBuffer = (U8*) malloc(bufferSize * 2); + if(!ctx->coalesceBuffer) EXM_THROW(100, "Allocation error : not enough memory"); ctx->srcBuffer = ctx->coalesceBuffer; ctx->srcBufferLoaded = 0; ctx->completedJobsCount = 0; @@ -524,7 +558,7 @@ ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize) if(ctx->base.threadPool) if (ZSTD_pthread_cond_init(&ctx->jobCompletedCond, NULL)) - EXM_THROW(103,"Failed creating write jobCompletedCond mutex"); + EXM_THROW(103,"Failed creating jobCompletedCond cond"); return ctx; } @@ -620,3 +654,10 @@ int AIO_ReadPool_closeFile(ReadPoolCtx_t* ctx) { AIO_ReadPool_setFile(ctx, NULL); return fclose(file); } + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} diff --git a/programs/fileio_asyncio.h b/programs/fileio_asyncio.h index 30db44b6ef0..d4980ef5b01 100644 --- a/programs/fileio_asyncio.h +++ b/programs/fileio_asyncio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,13 +8,20 @@ * You may select, at your option, one of the above-listed licenses. */ + /* + * FileIO AsyncIO exposes read/write IO pools that allow doing IO asynchronously. + * Current implementation relies on having one thread that reads and one that + * writes. + * Each IO pool supports up to `MAX_IO_JOBS` that can be enqueued for work, but + * are performed serially by the appropriate worker thread. + * Most systems exposes better primitives to perform asynchronous IO, such as + * io_uring on newer linux systems. The API is built in such a way that in the + * future we could replace the threads with better solutions when available. + */ + #ifndef ZSTD_FILEIO_ASYNCIO_H #define ZSTD_FILEIO_ASYNCIO_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "../lib/common/mem.h" /* U32, U64 */ #include "fileio_types.h" #include "platform.h" @@ -27,6 +34,7 @@ extern "C" { typedef struct { /* These struct fields should be set only on creation and not changed afterwards */ POOL_ctx* threadPool; + int threadPoolActive; int totalIoJobs; const FIO_prefs_t* prefs; POOL_function poolFunction; @@ -136,6 +144,11 @@ WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize * Frees and releases a writePool and its resources. Closes destination file. */ void AIO_WritePool_free(WritePoolCtx_t* ctx); +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async); + /* AIO_ReadPool_create: * Allocates and sets and a new readPool including its included jobs. * bufferSize should be set to the maximal buffer we want to read at a time, will also be used @@ -146,6 +159,11 @@ ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize); * Frees and releases a readPool and its resources. Closes source file. */ void AIO_ReadPool_free(ReadPoolCtx_t* ctx); +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async); + /* AIO_ReadPool_consumeBytes: * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ void AIO_ReadPool_consumeBytes(ReadPoolCtx_t *ctx, size_t n); @@ -174,8 +192,4 @@ FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t *ctx); * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ int AIO_ReadPool_closeFile(ReadPoolCtx_t *ctx); -#if defined (__cplusplus) -} -#endif - #endif /* ZSTD_FILEIO_ASYNCIO_H */ diff --git a/programs/fileio_common.h b/programs/fileio_common.h index 282c2f13b4d..8aa70edea81 100644 --- a/programs/fileio_common.h +++ b/programs/fileio_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,6 @@ #ifndef ZSTD_FILEIO_COMMON_H #define ZSTD_FILEIO_COMMON_H -#if defined (__cplusplus) -extern "C" { -#endif - #include "../lib/common/mem.h" /* U32, U64 */ #include "fileio_types.h" #include "platform.h" @@ -28,29 +24,36 @@ extern "C" { #define GB *(1U<<30) #undef MAX #define MAX(a,b) ((a)>(b) ? (a) : (b)) +#undef MIN /* in case it would be already defined */ +#define MIN(a,b) ((a) < (b) ? (a) : (b)) extern FIO_display_prefs_t g_display_prefs; -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) +#define DISPLAY_F(f, ...) fprintf((f), __VA_ARGS__) +#define DISPLAYOUT(...) DISPLAY_F(stdout, __VA_ARGS__) +#define DISPLAY(...) DISPLAY_F(stderr, __VA_ARGS__) #define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } extern UTIL_time_t g_displayClock; #define REFRESH_RATE ((U64)(SEC_TO_MICRO / 6)) -#define READY_FOR_UPDATE() ((g_display_prefs.progressSetting != FIO_ps_never) && UTIL_clockSpanMicro(g_displayClock) > REFRESH_RATE) +#define READY_FOR_UPDATE() (UTIL_clockSpanMicro(g_displayClock) > REFRESH_RATE || g_display_prefs.displayLevel >= 4) #define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } #define DISPLAYUPDATE(l, ...) { \ if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ - if (READY_FOR_UPDATE() || (g_display_prefs.displayLevel>=4)) { \ + if (READY_FOR_UPDATE()) { \ DELAY_NEXT_UPDATE(); \ DISPLAY(__VA_ARGS__); \ if (g_display_prefs.displayLevel>=4) fflush(stderr); \ } } } -#undef MIN /* in case it would be already defined */ -#define MIN(a,b) ((a) < (b) ? (a) : (b)) - +#define SHOULD_DISPLAY_SUMMARY() \ + (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) +#define SHOULD_DISPLAY_PROGRESS() \ + (g_display_prefs.progressSetting != FIO_ps_never && SHOULD_DISPLAY_SUMMARY()) +#define DISPLAY_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYLEVEL(1, __VA_ARGS__); }} +#define DISPLAYUPDATE_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYUPDATE(1, __VA_ARGS__); }} +#define DISPLAY_SUMMARY(...) { if (SHOULD_DISPLAY_SUMMARY()) { DISPLAYLEVEL(1, __VA_ARGS__); } } #define EXM_THROW(error, ...) \ { \ @@ -72,12 +75,16 @@ extern UTIL_time_t g_displayClock; /* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW */ -#if defined(_MSC_VER) && _MSC_VER >= 1400 +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define LONG_SEEK fseek +# define LONG_TELL ftell +#elif defined(_MSC_VER) && _MSC_VER >= 1400 # define LONG_SEEK _fseeki64 # define LONG_TELL _ftelli64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko -# define LONG_TELL ftello +# define LONG_SEEK fseeko +# define LONG_TELL ftello #elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) # define LONG_SEEK fseeko64 # define LONG_TELL ftello64 @@ -111,7 +118,4 @@ extern UTIL_time_t g_displayClock; # define LONG_TELL ftell #endif -#if defined (__cplusplus) -} -#endif -#endif //ZSTD_FILEIO_COMMON_H +#endif /* ZSTD_FILEIO_COMMON_H */ diff --git a/programs/fileio_types.h b/programs/fileio_types.h index a1fac2ca7a0..9bbb5154979 100644 --- a/programs/fileio_types.h +++ b/programs/fileio_types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -37,7 +37,7 @@ typedef struct FIO_prefs_s { int sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ int dictIDFlag; int checksumFlag; - int blockSize; + int jobSize; int overlapLog; int adaptiveMode; int useRowMatchFinder; @@ -53,7 +53,7 @@ typedef struct FIO_prefs_s { size_t targetCBlockSize; int srcSizeHint; int testMode; - ZSTD_paramSwitch_e literalCompressionMode; + ZSTD_ParamSwitch_e literalCompressionMode; /* IO preferences */ int removeSrcFile; @@ -69,6 +69,18 @@ typedef struct FIO_prefs_s { int contentSize; int allowBlockDevices; int passThrough; + ZSTD_ParamSwitch_e mmapDict; } FIO_prefs_t; +typedef enum {FIO_mallocDict, FIO_mmapDict} FIO_dictBufferType_t; + +typedef struct { + void* dictBuffer; + size_t dictBufferSize; + FIO_dictBufferType_t dictBufferType; +#if defined(_MSC_VER) || defined(_WIN32) + HANDLE dictHandle; +#endif +} FIO_Dict_t; + #endif /* FILEIO_TYPES_HEADER */ diff --git a/programs/lorem.c b/programs/lorem.c new file mode 100644 index 00000000000..79030c92f32 --- /dev/null +++ b/programs/lorem.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* Implementation notes: + * + * This is a very simple lorem ipsum generator + * which features a static list of words + * and print them one after another randomly + * with a fake sentence / paragraph structure. + * + * The goal is to generate a printable text + * that can be used to fake a text compression scenario. + * The resulting compression / ratio curve of the lorem ipsum generator + * is more satisfying than the previous statistical generator, + * which was initially designed for entropy compression, + * and lacks a regularity more representative of text. + * + * The compression ratio achievable on the generated lorem ipsum + * is still a bit too good, presumably because the dictionary is a bit too + * small. It would be possible to create some more complex scheme, notably by + * enlarging the dictionary with a word generator, and adding grammatical rules + * (composition) and syntax rules. But that's probably overkill for the intended + * goal. + */ + +#include "lorem.h" +#include +#include /* INT_MAX */ +#include /* memcpy */ + +#define WORD_MAX_SIZE 20 + +/* Define the word pool */ +static const char* kWords[] = { + "lorem", "ipsum", "dolor", "sit", "amet", + "consectetur", "adipiscing", "elit", "sed", "do", + "eiusmod", "tempor", "incididunt", "ut", "labore", + "et", "dolore", "magna", "aliqua", "dis", + "lectus", "vestibulum", "mattis", "ullamcorper", "velit", + "commodo", "a", "lacus", "arcu", "magnis", + "parturient", "montes", "nascetur", "ridiculus", "mus", + "mauris", "nulla", "malesuada", "pellentesque", "eget", + "gravida", "in", "dictum", "non", "erat", + "nam", "voluptat", "maecenas", "blandit", "aliquam", + "etiam", "enim", "lobortis", "scelerisque", "fermentum", + "dui", "faucibus", "ornare", "at", "elementum", + "eu", "facilisis", "odio", "morbi", "quis", + "eros", "donec", "ac", "orci", "purus", + "turpis", "cursus", "leo", "vel", "porta", + "consequat", "interdum", "varius", "vulputate", "aliquet", + "pharetra", "nunc", "auctor", "urna", "id", + "metus", "viverra", "nibh", "cras", "mi", + "unde", "omnis", "iste", "natus", "error", + "perspiciatis", "voluptatem", "accusantium", "doloremque", "laudantium", + "totam", "rem", "aperiam", "eaque", "ipsa", + "quae", "ab", "illo", "inventore", "veritatis", + "quasi", "architecto", "beatae", "vitae", "dicta", + "sunt", "explicabo", "nemo", "ipsam", "quia", + "voluptas", "aspernatur", "aut", "odit", "fugit", + "consequuntur", "magni", "dolores", "eos", "qui", + "ratione", "sequi", "nesciunt", "neque", "porro", + "quisquam", "est", "dolorem", "adipisci", "numquam", + "eius", "modi", "tempora", "incidunt", "magnam", + "quaerat", "ad", "minima", "veniam", "nostrum", + "ullam", "corporis", "suscipit", "laboriosam", "nisi", + "aliquid", "ex", "ea", "commodi", "consequatur", + "autem", "eum", "iure", "voluptate", "esse", + "quam", "nihil", "molestiae", "illum", "fugiat", + "quo", "pariatur", "vero", "accusamus", "iusto", + "dignissimos", "ducimus", "blanditiis", "praesentium", "voluptatum", + "deleniti", "atque", "corrupti", "quos", "quas", + "molestias", "excepturi", "sint", "occaecati", "cupiditate", + "provident", "similique", "culpa", "officia", "deserunt", + "mollitia", "animi", "laborum", "dolorum", "fuga", + "harum", "quidem", "rerum", "facilis", "expedita", + "distinctio", "libero", "tempore", "cum", "soluta", + "nobis", "eligendi", "optio", "cumque", "impedit", + "minus", "quod", "maxime", "placeat", "facere", + "possimus", "assumenda", "repellendus", "temporibus", "quibusdam", + "officiis", "debitis", "saepe", "eveniet", "voluptates", + "repudiandae", "recusandae", "itaque", "earum", "hic", + "tenetur", "sapiente", "delectus", "reiciendis", "cillum", + "maiores", "alias", "perferendis", "doloribus", "asperiores", + "repellat", "minim", "nostrud", "exercitation", "ullamco", + "laboris", "aliquip", "duis", "aute", "irure", +}; +static const unsigned kNbWords = sizeof(kWords) / sizeof(kWords[0]); + +/* simple 1-dimension distribution, based on word's length, favors small words + */ +static const int kWeights[] = { 0, 8, 6, 4, 3, 2 }; +static const size_t kNbWeights = sizeof(kWeights) / sizeof(kWeights[0]); + +#define DISTRIB_SIZE_MAX 650 +static int g_distrib[DISTRIB_SIZE_MAX] = { 0 }; +static unsigned g_distribCount = 0; + +static void countFreqs( + const char* words[], + size_t nbWords, + const int* weights, + size_t nbWeights) +{ + unsigned total = 0; + size_t w; + for (w = 0; w < nbWords; w++) { + size_t len = strlen(words[w]); + int lmax; + if (len >= nbWeights) + len = nbWeights - 1; + lmax = weights[len]; + total += (unsigned)lmax; + } + g_distribCount = total; + assert(g_distribCount <= DISTRIB_SIZE_MAX); +} + +static void init_word_distrib( + const char* words[], + size_t nbWords, + const int* weights, + size_t nbWeights) +{ + size_t w, d = 0; + countFreqs(words, nbWords, weights, nbWeights); + for (w = 0; w < nbWords; w++) { + size_t len = strlen(words[w]); + int l, lmax; + if (len >= nbWeights) + len = nbWeights - 1; + lmax = weights[len]; + for (l = 0; l < lmax; l++) { + g_distrib[d++] = (int)w; + } + } +} + +/* Note: this unit only works when invoked sequentially. + * No concurrent access is allowed */ +static char* g_ptr = NULL; +static size_t g_nbChars = 0; +static size_t g_maxChars = 10000000; +static unsigned g_randRoot = 0; + +#define RDG_rotl32(x, r) ((x << r) | (x >> (32 - r))) +static unsigned LOREM_rand(unsigned range) +{ + static const unsigned prime1 = 2654435761U; + static const unsigned prime2 = 2246822519U; + unsigned rand32 = g_randRoot; + rand32 *= prime1; + rand32 ^= prime2; + rand32 = RDG_rotl32(rand32, 13); + g_randRoot = rand32; + return (unsigned)(((unsigned long long)rand32 * range) >> 32); +} + +static void writeLastCharacters(void) +{ + size_t lastChars = g_maxChars - g_nbChars; + assert(g_maxChars >= g_nbChars); + if (lastChars == 0) + return; + g_ptr[g_nbChars++] = '.'; + if (lastChars > 2) { + memset(g_ptr + g_nbChars, ' ', lastChars - 2); + } + if (lastChars > 1) { + g_ptr[g_maxChars - 1] = '\n'; + } + g_nbChars = g_maxChars; +} + +static void generateWord(const char* word, const char* separator, int upCase) +{ + size_t const len = strlen(word) + strlen(separator); + if (g_nbChars + len > g_maxChars) { + writeLastCharacters(); + return; + } + memcpy(g_ptr + g_nbChars, word, strlen(word)); + if (upCase) { + static const char toUp = 'A' - 'a'; + g_ptr[g_nbChars] = (char)(g_ptr[g_nbChars] + toUp); + } + g_nbChars += strlen(word); + memcpy(g_ptr + g_nbChars, separator, strlen(separator)); + g_nbChars += strlen(separator); +} + +static int about(unsigned target) +{ + return (int)(LOREM_rand(target) + LOREM_rand(target) + 1); +} + +/* Function to generate a random sentence */ +static void generateSentence(int nbWords) +{ + int commaPos = about(9); + int comma2 = commaPos + about(7); + int qmark = (LOREM_rand(11) == 7); + const char* endSep = qmark ? "? " : ". "; + int i; + for (i = 0; i < nbWords; i++) { + int const wordID = g_distrib[LOREM_rand(g_distribCount)]; + const char* const word = kWords[wordID]; + const char* sep = " "; + if (i == commaPos) + sep = ", "; + if (i == comma2) + sep = ", "; + if (i == nbWords - 1) + sep = endSep; + generateWord(word, sep, i == 0); + } +} + +static void generateParagraph(int nbSentences) +{ + int i; + for (i = 0; i < nbSentences; i++) { + int wordsPerSentence = about(11); + generateSentence(wordsPerSentence); + } + if (g_nbChars < g_maxChars) { + g_ptr[g_nbChars++] = '\n'; + } + if (g_nbChars < g_maxChars) { + g_ptr[g_nbChars++] = '\n'; + } +} + +/* It's "common" for lorem ipsum generators to start with the same first + * pre-defined sentence */ +static void generateFirstSentence(void) +{ + int i; + for (i = 0; i < 18; i++) { + const char* word = kWords[i]; + const char* separator = " "; + if (i == 4) + separator = ", "; + if (i == 7) + separator = ", "; + generateWord(word, separator, i == 0); + } + generateWord(kWords[18], ". ", 0); +} + +size_t +LOREM_genBlock(void* buffer, size_t size, unsigned seed, int first, int fill) +{ + g_ptr = (char*)buffer; + assert(size < INT_MAX); + g_maxChars = size; + g_nbChars = 0; + g_randRoot = seed; + if (g_distribCount == 0) { + init_word_distrib(kWords, kNbWords, kWeights, kNbWeights); + } + + if (first) { + generateFirstSentence(); + } + while (g_nbChars < g_maxChars) { + int sentencePerParagraph = about(7); + generateParagraph(sentencePerParagraph); + if (!fill) + break; /* only generate one paragraph in not-fill mode */ + } + g_ptr = NULL; + return g_nbChars; +} + +void LOREM_genBuffer(void* buffer, size_t size, unsigned seed) +{ + LOREM_genBlock(buffer, size, seed, 1, 1); +} diff --git a/programs/lorem.h b/programs/lorem.h new file mode 100644 index 00000000000..4a87f8748a5 --- /dev/null +++ b/programs/lorem.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* lorem ipsum generator */ + +#include /* size_t */ + +/* + * LOREM_genBuffer(): + * Generate @size bytes of compressible data using lorem ipsum generator + * into provided @buffer. + */ +void LOREM_genBuffer(void* buffer, size_t size, unsigned seed); + +/* + * LOREM_genBlock(): + * Similar to LOREM_genBuffer, with additional controls : + * - @first : generate the first sentence + * - @fill : fill the entire @buffer, + * if ==0: generate one paragraph at most. + * @return : nb of bytes generated into @buffer. + */ +size_t LOREM_genBlock(void* buffer, size_t size, + unsigned seed, + int first, int fill); diff --git a/programs/platform.h b/programs/platform.h index b858e3b484c..9f1c968f40f 100644 --- a/programs/platform.h +++ b/programs/platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,12 +11,6 @@ #ifndef PLATFORM_H_MODULE #define PLATFORM_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - - /* ************************************** * Compiler Options ****************************************/ @@ -33,14 +27,15 @@ extern "C" { /* ************************************** * Detect 64-bit OS -* http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros +* https://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros ****************************************/ #if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \ || defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \ || (defined __sparc && (defined __sparcv9 || defined __sparc_v9__ || defined __arch64__)) || defined __sparc64__ /* SPARC 64-bit */ \ - || defined __x86_64__s || defined _M_X64 /* x86 64-bit */ \ + || defined __x86_64__ || defined _M_X64 /* x86 64-bit */ \ || defined __arm64__ || defined __aarch64__ || defined __ARM64_ARCH_8__ /* ARM 64-bit */ \ || (defined __mips && (__mips == 64 || __mips == 4 || __mips == 3)) /* MIPS 64-bit */ \ + || (defined(__riscv) && __riscv_xlen == 64) /* RISC-V 64-bit */ \ || defined _LP64 || defined __LP64__ /* NetBSD, OpenBSD */ || defined __64BIT__ /* AIX */ || defined _ADDR64 /* Cray */ \ || (defined __SIZEOF_POINTER__ && __SIZEOF_POINTER__ == 8) /* gcc */ # if !defined(__64BIT__) @@ -74,13 +69,12 @@ extern "C" { ***************************************************************/ #ifndef PLATFORM_POSIX_VERSION -# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \ - || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* BSD distros */ +# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ /* exception rule : force posix version to 200112L, * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ # define PLATFORM_POSIX_VERSION 200112L -/* try to determine posix version through official unistd.h's _POSIX_VERSION (http://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). * note : there is no simple way to know in advance if is present or not on target system, * Posix specification mandates its presence and its content, but target system must respect this spec. * It's necessary to _not_ #include whenever target OS is not unix-like @@ -89,7 +83,7 @@ extern "C" { */ # elif !defined(_WIN32) \ && ( defined(__unix__) || defined(__unix) \ - || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) + || defined(_QNX_SOURCE) || defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) ) # if defined(__linux__) || defined(__linux) || defined(__CYGWIN__) # ifndef _POSIX_C_SOURCE @@ -127,6 +121,10 @@ extern "C" { /*-********************************************* * Detect if isatty() and fileno() are available +* +* Note: Use UTIL_isConsole() for the zstd CLI +* instead, as it allows faking is console for +* testing. ************************************************/ #if (defined(__linux__) && (PLATFORM_POSIX_VERSION > 1)) \ || (PLATFORM_POSIX_VERSION >= 200112L) \ @@ -137,14 +135,24 @@ extern "C" { #elif defined(MSDOS) || defined(OS2) # include /* _isatty */ # define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream)) -#elif defined(WIN32) || defined(_WIN32) +#elif defined(_WIN32) # include /* _isatty */ # include /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */ # include /* FILE */ + +#if defined (__cplusplus) +extern "C" { +#endif + static __inline int IS_CONSOLE(FILE* stdStream) { DWORD dummy; return _isatty(_fileno(stdStream)) && GetConsoleMode((HANDLE)_get_osfhandle(_fileno(stdStream)), &dummy); } + +#if defined (__cplusplus) +} +#endif + #else # define IS_CONSOLE(stdStream) 0 #endif @@ -153,7 +161,7 @@ static __inline int IS_CONSOLE(FILE* stdStream) { /****************************** * OS-specific IO behaviors ******************************/ -#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) +#if defined(MSDOS) || defined(OS2) || defined(_WIN32) # include /* _O_BINARY */ # include /* _setmode, _fileno, _get_osfhandle */ # if !defined(__DJGPP__) @@ -192,13 +200,13 @@ static __inline int IS_CONSOLE(FILE* stdStream) { #ifndef ZSTD_SETPRIORITY_SUPPORT - /* mandates presence of and support for setpriority() : http://man7.org/linux/man-pages/man2/setpriority.2.html */ + /* mandates presence of and support for setpriority() : https://man7.org/linux/man-pages/man2/setpriority.2.html */ # define ZSTD_SETPRIORITY_SUPPORT (PLATFORM_POSIX_VERSION >= 200112L) #endif #ifndef ZSTD_NANOSLEEP_SUPPORT - /* mandates support of nanosleep() within : http://man7.org/linux/man-pages/man2/nanosleep.2.html */ + /* mandates support of nanosleep() within : https://man7.org/linux/man-pages/man2/nanosleep.2.html */ # if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) \ || (PLATFORM_POSIX_VERSION >= 200112L) # define ZSTD_NANOSLEEP_SUPPORT 1 @@ -207,9 +215,4 @@ static __inline int IS_CONSOLE(FILE* stdStream) { # endif #endif - -#if defined (__cplusplus) -} -#endif - #endif /* PLATFORM_H_MODULE */ diff --git a/programs/timefn.c b/programs/timefn.c index 64577b0e932..d7f330974d3 100644 --- a/programs/timefn.c +++ b/programs/timefn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,8 @@ /* === Dependencies === */ #include "timefn.h" - +#include "platform.h" /* set _POSIX_C_SOURCE */ +#include /* CLOCK_MONOTONIC, TIME_UTC */ /*-**************************************** * Time functions @@ -20,46 +21,37 @@ #if defined(_WIN32) /* Windows */ +#include /* LARGE_INTEGER */ #include /* abort */ #include /* perror */ -UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } - -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static LARGE_INTEGER ticksPerSecond; + static double nsFactor = 1.0; static int init = 0; if (!init) { if (!QueryPerformanceFrequency(&ticksPerSecond)) { perror("timefn::QueryPerformanceFrequency"); abort(); } + nsFactor = 1000000000.0 / (double)ticksPerSecond.QuadPart; init = 1; } - return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; -} - -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) -{ - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) { - perror("timefn::QueryPerformanceFrequency"); - abort(); - } - init = 1; + { UTIL_time_t r; + LARGE_INTEGER x; + QueryPerformanceCounter(&x); + r.t = (PTime)((double)x.QuadPart * nsFactor); + return r; } - return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } - #elif defined(__APPLE__) && defined(__MACH__) -UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } +#include /* mach_timebase_info_data_t, mach_timebase_info, mach_absolute_time */ -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static mach_timebase_info_data_t rate; static int init = 0; @@ -67,23 +59,39 @@ PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) mach_timebase_info(&rate); init = 1; } - return (((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom))/1000ULL; + { UTIL_time_t r; + r.t = mach_absolute_time() * (PTime)rate.numer / (PTime)rate.denom; + return r; + } } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +/* POSIX.1-2001 (optional) */ +#elif defined(CLOCK_MONOTONIC) + +#include /* abort */ +#include /* perror */ + +UTIL_time_t UTIL_getTime(void) { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; + /* time must be initialized, othersize it may fail msan test. + * No good reason, likely a limitation of timespec_get() for some target */ + struct timespec time = { 0, 0 }; + if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { + perror("timefn::clock_gettime(CLOCK_MONOTONIC)"); + abort(); + } + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return ((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom); } -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ +/* C11 requires support of timespec_get(). + * However, FreeBSD 11 claims C11 compliance while lacking timespec_get(). + * Double confirm timespec_get() support by checking the definition of TIME_UTC. + * However, some versions of Android manage to simultaneously define TIME_UTC + * and lack timespec_get() support... */ #elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ && defined(TIME_UTC) && !defined(__ANDROID__) @@ -94,65 +102,49 @@ UTIL_time_t UTIL_getTime(void) { /* time must be initialized, othersize it may fail msan test. * No good reason, likely a limitation of timespec_get() for some target */ - UTIL_time_t time = UTIL_TIME_INITIALIZER; + struct timespec time = { 0, 0 }; if (timespec_get(&time, TIME_UTC) != TIME_UTC) { - perror("timefn::timespec_get"); + perror("timefn::timespec_get(TIME_UTC)"); abort(); } - return time; -} - -static UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t diff; - if (end.tv_nsec < begin.tv_nsec) { - diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; - diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; - } else { - diff.tv_sec = end.tv_sec - begin.tv_sec; - diff.tv_nsec = end.tv_nsec - begin.tv_nsec; + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return diff; } -PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime micro = 0; - micro += 1000000ULL * diff.tv_sec; - micro += diff.tv_nsec / 1000ULL; - return micro; -} -PTime UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) +#else /* relies on standard C90 (note : clock_t produces wrong measurements for multi-threaded workloads) */ + +UTIL_time_t UTIL_getTime(void) { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime nano = 0; - nano += 1000000000ULL * diff.tv_sec; - nano += diff.tv_nsec; - return nano; + UTIL_time_t r; + r.t = (PTime)clock() * 1000000000ULL / CLOCKS_PER_SEC; + return r; } - - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - -UTIL_time_t UTIL_getTime(void) { return clock(); } -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } +#define TIME_MT_MEASUREMENTS_NOT_SUPPORTED #endif +/* ==== Common functions, valid for all time API ==== */ +PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +{ + return clockEnd.t - clockStart.t; +} + +PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) +{ + return UTIL_getSpanTimeNano(begin, end) / 1000ULL; +} -/* returns time span in microseconds */ PTime UTIL_clockSpanMicro(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeMicro(clockStart, clockEnd); } -/* returns time span in microseconds */ PTime UTIL_clockSpanNano(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); @@ -167,3 +159,12 @@ void UTIL_waitForNextTick(void) clockEnd = UTIL_getTime(); } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); } + +int UTIL_support_MT_measurements(void) +{ +# if defined(TIME_MT_MEASUREMENTS_NOT_SUPPORTED) + return 0; +# else + return 1; +# endif +} diff --git a/programs/timefn.h b/programs/timefn.h index 8ba8ed787bc..80f72e228a3 100644 --- a/programs/timefn.h +++ b/programs/timefn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,80 +11,49 @@ #ifndef TIME_FN_H_MODULE_287987 #define TIME_FN_H_MODULE_287987 -#if defined (__cplusplus) -extern "C" { -#endif - - -/*-**************************************** -* Dependencies -******************************************/ -#include /* clock_t, clock, CLOCKS_PER_SEC */ - - - /*-**************************************** -* Local Types +* Types ******************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # if defined(_AIX) # include # else -# include /* intptr_t */ +# include /* uint64_t */ # endif typedef uint64_t PTime; /* Precise Time */ #else typedef unsigned long long PTime; /* does not support compilers without long long support */ #endif +/* UTIL_time_t contains a nanosecond time counter. + * The absolute value is not meaningful. + * It's only valid to compute the difference between 2 measurements. */ +typedef struct { PTime t; } UTIL_time_t; +#define UTIL_TIME_INITIALIZER { 0 } /*-**************************************** * Time functions ******************************************/ -#if defined(_WIN32) /* Windows */ - - #include /* LARGE_INTEGER */ - typedef LARGE_INTEGER UTIL_time_t; - #define UTIL_TIME_INITIALIZER { { 0, 0 } } - -#elif defined(__APPLE__) && defined(__MACH__) - - #include - typedef PTime UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ -#elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ - && defined(TIME_UTC) && !defined(__ANDROID__) - - typedef struct timespec UTIL_time_t; - #define UTIL_TIME_INITIALIZER { 0, 0 } - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - #define UTIL_TIME_USES_C90_CLOCK - typedef clock_t UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -#endif +UTIL_time_t UTIL_getTime(void); +/* Timer resolution can be low on some platforms. + * To improve accuracy, it's recommended to wait for a new tick + * before starting benchmark measurements */ +void UTIL_waitForNextTick(void); +/* tells if timefn will return correct time measurements + * in presence of multi-threaded workload. + * note : this is not the case if only C90 clock_t measurements are available */ +int UTIL_support_MT_measurements(void); -UTIL_time_t UTIL_getTime(void); -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd); - -#define SEC_TO_MICRO ((PTime)1000000) -PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -void UTIL_waitForNextTick(void); - +PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); +PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); -#if defined (__cplusplus) -} -#endif +#define SEC_TO_MICRO ((PTime)1000000) /* nb of microseconds in a second */ #endif /* TIME_FN_H_MODULE_287987 */ diff --git a/programs/util.c b/programs/util.c index a3af2621143..652530b1223 100644 --- a/programs/util.c +++ b/programs/util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,11 +8,6 @@ * You may select, at your option, one of the above-listed licenses. */ -#if defined (__cplusplus) -extern "C" { -#endif - - /*-**************************************** * Dependencies ******************************************/ @@ -23,16 +18,27 @@ extern "C" { #include #include +#if defined(__FreeBSD__) +#include /* __FreeBSD_version */ +#endif /* #ifdef __FreeBSD__ */ + #if defined(_WIN32) # include /* utime */ # include /* _chmod */ +# define ZSTD_USE_UTIMENSAT 0 #else # include /* chown, stat */ -# if PLATFORM_POSIX_VERSION < 200809L || !defined(st_mtime) -# include /* utime */ +# include /* utimensat, st_mtime */ +# if (PLATFORM_POSIX_VERSION >= 200809L && defined(st_mtime)) \ + || (defined(__FreeBSD__) && __FreeBSD_version >= 1100056) +# define ZSTD_USE_UTIMENSAT 1 # else +# define ZSTD_USE_UTIMENSAT 0 +# endif +# if ZSTD_USE_UTIMENSAT # include /* AT_FDCWD */ -# include /* utimensat */ +# else +# include /* utime */ # endif #endif @@ -66,6 +72,27 @@ extern "C" { #define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } } +static int g_traceDepth = 0; +int g_traceFileStat = 0; + +#define UTIL_TRACE_CALL(...) \ + { \ + if (g_traceFileStat) { \ + UTIL_DISPLAY("Trace:FileStat: %*s> ", g_traceDepth, ""); \ + UTIL_DISPLAY(__VA_ARGS__); \ + UTIL_DISPLAY("\n"); \ + ++g_traceDepth; \ + } \ + } + +#define UTIL_TRACE_RET(ret) \ + { \ + if (g_traceFileStat) { \ + --g_traceDepth; \ + UTIL_DISPLAY("Trace:FileStat: %*s< %d\n", g_traceDepth, "", (ret)); \ + } \ + } + /* A modified version of realloc(). * If UTIL_realloc() fails the original block is freed. */ @@ -81,6 +108,17 @@ UTIL_STATIC void* UTIL_realloc(void *ptr, size_t size) #define chmod _chmod #endif +#ifndef ZSTD_HAVE_FCHMOD +#if PLATFORM_POSIX_VERSION >= 199309L +#define ZSTD_HAVE_FCHMOD +#endif +#endif + +#ifndef ZSTD_HAVE_FCHOWN +#if PLATFORM_POSIX_VERSION >= 200809L +#define ZSTD_HAVE_FCHOWN +#endif +#endif /*-**************************************** * Console log @@ -100,7 +138,7 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, ch = getchar(); result = 0; if (strchr(acceptableLetters, ch) == NULL) { - UTIL_DISPLAY("%s", abortMsg); + UTIL_DISPLAY("%s \n", abortMsg); result = 1; } /* flush the rest */ @@ -113,7 +151,8 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, /*-************************************* * Constants ***************************************/ -#define LIST_SIZE_INCREASE (8*1024) +#define KB * (1 << 10) +#define LIST_SIZE_INCREASE (8 KB) #define MAX_FILE_OF_FILE_NAMES_SIZE (1<<20)*50 @@ -121,21 +160,61 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, * Functions ***************************************/ -int UTIL_stat(const char* filename, stat_t* statbuf) +void UTIL_traceFileStat(void) { + g_traceFileStat = 1; +} + +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf) +{ + int ret; + UTIL_TRACE_CALL("UTIL_stat(%d, %s)", fd, filename); #if defined(_MSC_VER) - return !_stat64(filename, statbuf); + if (fd >= 0) { + ret = !_fstat64(fd, statbuf); + } else { + ret = !_stat64(filename, statbuf); + } #elif defined(__MINGW32__) && defined (__MSVCRT__) - return !_stati64(filename, statbuf); + if (fd >= 0) { + ret = !_fstati64(fd, statbuf); + } else { + ret = !_stati64(filename, statbuf); + } #else - return !stat(filename, statbuf); + if (fd >= 0) { + ret = !fstat(fd, statbuf); + } else { + ret = !stat(filename, statbuf); + } #endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_stat(const char* filename, stat_t* statbuf) +{ + return UTIL_fstat(-1, filename, statbuf); +} + +int UTIL_isFdRegularFile(int fd) +{ + stat_t statbuf; + int ret; + UTIL_TRACE_CALL("UTIL_isFdRegularFile(%d)", fd); + ret = fd >= 0 && UTIL_fstat(fd, "", &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFile(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isRegularFile(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFileStat(const stat_t* statbuf) @@ -149,73 +228,153 @@ int UTIL_isRegularFileStat(const stat_t* statbuf) /* like chmod, but avoid changing permission of /dev/null */ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions) +{ + return UTIL_fchmod(-1, filename, statbuf, permissions); +} + +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions) { stat_t localStatBuf; + UTIL_TRACE_CALL("UTIL_chmod(%s, %#4o)", filename, (unsigned)permissions); if (statbuf == NULL) { - if (!UTIL_stat(filename, &localStatBuf)) return 0; + if (!UTIL_fstat(fd, filename, &localStatBuf)) { + UTIL_TRACE_RET(0); + return 0; + } statbuf = &localStatBuf; } - if (!UTIL_isRegularFileStat(statbuf)) return 0; /* pretend success, but don't change anything */ - return chmod(filename, permissions); + if (!UTIL_isRegularFileStat(statbuf)) { + UTIL_TRACE_RET(0); + return 0; /* pretend success, but don't change anything */ + } +#ifdef ZSTD_HAVE_FCHMOD + if (fd >= 0) { + int ret; + UTIL_TRACE_CALL("fchmod"); + ret = fchmod(fd, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } else +#endif + { + int ret; + UTIL_TRACE_CALL("chmod"); + ret = chmod(filename, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } } /* set access and modification times */ int UTIL_utime(const char* filename, const stat_t *statbuf) { int ret; + UTIL_TRACE_CALL("UTIL_utime(%s)", filename); /* We check that st_mtime is a macro here in order to give us confidence * that struct stat has a struct timespec st_mtim member. We need this * check because there are some platforms that claim to be POSIX 2008 * compliant but which do not have st_mtim... */ -#if (PLATFORM_POSIX_VERSION >= 200809L) && defined(st_mtime) - /* (atime, mtime) */ - struct timespec timebuf[2] = { {0, UTIME_NOW} }; - timebuf[1] = statbuf->st_mtim; - ret = utimensat(AT_FDCWD, filename, timebuf, 0); + /* FreeBSD has implemented POSIX 2008 for a long time but still only + * advertises support for POSIX 2001. They have a version macro that + * lets us safely gate them in. + * See https://docs.freebsd.org/en/books/porters-handbook/versions/. + */ +#if ZSTD_USE_UTIMENSAT + { + /* (atime, mtime) */ + struct timespec timebuf[2] = { {0, UTIME_NOW} }; + timebuf[1] = statbuf->st_mtim; + ret = utimensat(AT_FDCWD, filename, timebuf, 0); + } #else - struct utimbuf timebuf; - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - ret = utime(filename, &timebuf); + { + struct utimbuf timebuf; + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + ret = utime(filename, &timebuf); + } #endif errno = 0; + UTIL_TRACE_RET(ret); return ret; } int UTIL_setFileStat(const char *filename, const stat_t *statbuf) { - int res = 0; + return UTIL_setFDStat(-1, filename, statbuf); +} +int UTIL_setFDStat(const int fd, const char *filename, const stat_t *statbuf) +{ + int res = 0; stat_t curStatBuf; - if (!UTIL_stat(filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) + UTIL_TRACE_CALL("UTIL_setFileStat(%d, %s)", fd, filename); + + if (!UTIL_fstat(fd, filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) { + UTIL_TRACE_RET(-1); return -1; + } - /* set access and modification times */ - res += UTIL_utime(filename, statbuf); + /* Mimic gzip's behavior: + * + * "Change the group first, then the permissions, then the owner. + * That way, the permissions will be correct on systems that allow + * users to give away files, without introducing a security hole. + * Security depends on permissions not containing the setuid or + * setgid bits." */ #if !defined(_WIN32) - res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, -1, statbuf->st_gid); /* Apply group ownership */ + } else +#endif + { + res += chown(filename, -1, statbuf->st_gid); /* Apply group ownership */ + } #endif - res += UTIL_chmod(filename, &curStatBuf, statbuf->st_mode & 07777); /* Copy file permissions */ + res += UTIL_fchmod(fd, filename, &curStatBuf, statbuf->st_mode & 0777); /* Copy file permissions */ + +#if !defined(_WIN32) +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, statbuf->st_uid, -1); /* Apply user ownership */ + } else +#endif + { + res += chown(filename, statbuf->st_uid, -1); /* Apply user ownership */ + } +#endif errno = 0; + UTIL_TRACE_RET(-res); return -res; /* number of errors is returned */ } int UTIL_isDirectory(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isDirectory(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isDirectoryStat(const stat_t* statbuf) { + int ret; + UTIL_TRACE_CALL("UTIL_isDirectoryStat()"); #if defined(_MSC_VER) - return (statbuf->st_mode & _S_IFDIR) != 0; + ret = (statbuf->st_mode & _S_IFDIR) != 0; #else - return S_ISDIR(statbuf->st_mode) != 0; + ret = S_ISDIR(statbuf->st_mode) != 0; #endif + UTIL_TRACE_RET(ret); + return ret; } int UTIL_compareStr(const void *p1, const void *p2) { @@ -224,33 +383,68 @@ int UTIL_compareStr(const void *p1, const void *p2) { int UTIL_isSameFile(const char* fName1, const char* fName2) { + int ret; assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFile(%s, %s)", fName1, fName2); #if defined(_MSC_VER) || defined(_WIN32) /* note : Visual does not support file identification by inode. * inode does not work on Windows, even with a posix layer, like msys2. * The following work-around is limited to detecting exact name repetition only, * aka `filename` is considered different from `subdir/../filename` */ - return !strcmp(fName1, fName2); + ret = !strcmp(fName1, fName2); #else { stat_t file1Stat; stat_t file2Stat; - return UTIL_stat(fName1, &file1Stat) + ret = UTIL_stat(fName1, &file1Stat) && UTIL_stat(fName2, &file2Stat) - && (file1Stat.st_dev == file2Stat.st_dev) - && (file1Stat.st_ino == file2Stat.st_ino); + && UTIL_isSameFileStat(fName1, fName2, &file1Stat, &file2Stat); } #endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_isSameFileStat( + const char* fName1, const char* fName2, + const stat_t* file1Stat, const stat_t* file2Stat) +{ + int ret; + assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFileStat(%s, %s)", fName1, fName2); +#if defined(_MSC_VER) || defined(_WIN32) + /* note : Visual does not support file identification by inode. + * inode does not work on Windows, even with a posix layer, like msys2. + * The following work-around is limited to detecting exact name repetition only, + * aka `filename` is considered different from `subdir/../filename` */ + (void)file1Stat; + (void)file2Stat; + ret = !strcmp(fName1, fName2); +#else + { + ret = (file1Stat->st_dev == file2Stat->st_dev) + && (file1Stat->st_ino == file2Stat->st_ino); + } +#endif + UTIL_TRACE_RET(ret); + return ret; } /* UTIL_isFIFO : distinguish named pipes */ int UTIL_isFIFO(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isFIFO(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) return 1; + { + stat_t statbuf; + if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } @@ -265,6 +459,26 @@ int UTIL_isFIFOStat(const stat_t* statbuf) return 0; } +/* process substitution */ +int UTIL_isFileDescriptorPipe(const char* filename) +{ + UTIL_TRACE_CALL("UTIL_isFileDescriptorPipe(%s)", filename); + /* Check if the filename is a /dev/fd/ path which indicates a file descriptor */ + if (filename[0] == '/' && strncmp(filename, "/dev/fd/", 8) == 0) { + UTIL_TRACE_RET(1); + return 1; + } + + /* Check for alternative process substitution formats on different systems */ + if (filename[0] == '/' && strncmp(filename, "/proc/self/fd/", 14) == 0) { + UTIL_TRACE_RET(1); + return 1; + } + + UTIL_TRACE_RET(0); + return 0; /* Not recognized as a file descriptor pipe */ +} + /* UTIL_isBlockDevStat : distinguish named pipes */ int UTIL_isBlockDevStat(const stat_t* statbuf) { @@ -278,21 +492,69 @@ int UTIL_isBlockDevStat(const stat_t* statbuf) int UTIL_isLink(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isLink(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - int const r = lstat(infilename, &statbuf); - if (!r && S_ISLNK(statbuf.st_mode)) return 1; + { + stat_t statbuf; + int const r = lstat(infilename, &statbuf); + if (!r && S_ISLNK(statbuf.st_mode)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } +static int g_fakeStdinIsConsole = 0; +static int g_fakeStderrIsConsole = 0; +static int g_fakeStdoutIsConsole = 0; + +int UTIL_isConsole(FILE* file) +{ + int ret; + UTIL_TRACE_CALL("UTIL_isConsole(%d)", fileno(file)); + if (file == stdin && g_fakeStdinIsConsole) + ret = 1; + else if (file == stderr && g_fakeStderrIsConsole) + ret = 1; + else if (file == stdout && g_fakeStdoutIsConsole) + ret = 1; + else + ret = IS_CONSOLE(file); + UTIL_TRACE_RET(ret); + return ret; +} + +void UTIL_fakeStdinIsConsole(void) +{ + g_fakeStdinIsConsole = 1; +} +void UTIL_fakeStdoutIsConsole(void) +{ + g_fakeStdoutIsConsole = 1; +} +void UTIL_fakeStderrIsConsole(void) +{ + g_fakeStderrIsConsole = 1; +} + U64 UTIL_getFileSize(const char* infilename) { stat_t statbuf; - if (!UTIL_stat(infilename, &statbuf)) return UTIL_FILESIZE_UNKNOWN; - return UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_CALL("UTIL_getFileSize(%s)", infilename); + if (!UTIL_stat(infilename, &statbuf)) { + UTIL_TRACE_RET(-1); + return UTIL_FILESIZE_UNKNOWN; + } + { + U64 const size = UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_RET((int)size); + return size; + } } U64 UTIL_getFileSizeStat(const stat_t* statbuf) @@ -369,110 +631,171 @@ U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles) { U64 total = 0; unsigned n; + UTIL_TRACE_CALL("UTIL_getTotalFileSize(%u)", nbFiles); for (n=0; n 0) { + totalRead += bytesRead; + + /* If buffer is nearly full, expand it */ + if (bufSize - totalRead < 1 KB) { + if (bufSize >= MAX_FILE_OF_FILE_NAMES_SIZE) { + /* Too large, abort */ + free(buf); + return NULL; + } + + { size_t newBufSize = bufSize * 2; + if (newBufSize > MAX_FILE_OF_FILE_NAMES_SIZE) + newBufSize = MAX_FILE_OF_FILE_NAMES_SIZE; + + { char* newBuf = (char*)realloc(buf, newBufSize); + if (newBuf == NULL) { + free(buf); + return NULL; + } + + buf = newBuf; + bufSize = newBufSize; + } } } } + + /* Add null terminator to the end */ + buf[totalRead] = '\0'; + *totalReadPtr = totalRead; + + return buf; } -/* Conditions : - * size of @inputFileName file must be < @dstCapacity - * @dst must be initialized - * @return : nb of lines - * or -1 if there's an error - */ -static int -readLinesFromFile(void* dst, size_t dstCapacity, - const char* inputFileName) +/* Process a buffer containing multiple lines and count the number of lines */ +static size_t UTIL_processLines(char* buffer, size_t bufferSize) { - int nbFiles = 0; + size_t lineCount = 0; + size_t i = 0; + + /* Convert newlines to null terminators and count lines */ + while (i < bufferSize) { + if (buffer[i] == '\n') { + buffer[i] = '\0'; /* Replace newlines with null terminators */ + lineCount++; + } + i++; + } + + /* Count the last line if it doesn't end with a newline */ + if (bufferSize > 0 && (i == 0 || buffer[i-1] != '\0')) { + lineCount++; + } + + return lineCount; +} + +/* Create an array of pointers to the lines in a buffer */ +static const char** UTIL_createLinePointers(char* buffer, size_t numLines, size_t bufferSize) +{ + size_t lineIndex = 0; size_t pos = 0; - char* const buf = (char*)dst; - FILE* const inputFile = fopen(inputFileName, "r"); + void* const bufferPtrs = malloc(numLines * sizeof(const char**)); + const char** const linePointers = (const char**)bufferPtrs; + if (bufferPtrs == NULL) return NULL; - assert(dst != NULL); + while (lineIndex < numLines && pos < bufferSize) { + size_t len = 0; + linePointers[lineIndex++] = buffer+pos; - if(!inputFile) { - if (g_utilDisplayLevel >= 1) perror("zstd:util:readLinesFromFile"); - return -1; - } + /* Find the next null terminator, being careful not to go past the buffer */ + while ((pos + len < bufferSize) && buffer[pos + len] != '\0') { + len++; + } - while ( !feof(inputFile) ) { - size_t const lineLength = readLineFromFile(buf+pos, dstCapacity-pos, inputFile); - if (lineLength == 0) break; - assert(pos + lineLength <= dstCapacity); /* '=' for inputFile not terminated with '\n' */ - pos += lineLength; - ++nbFiles; + /* Move past this string and its null terminator */ + pos += len; + if (pos < bufferSize) pos++; /* Skip the null terminator if we're not at buffer end */ } - CONTROL( fclose(inputFile) == 0 ); + /* Verify we processed the expected number of lines */ + if (lineIndex != numLines) { + /* Something went wrong - we didn't find as many lines as expected */ + free(bufferPtrs); + return NULL; + } - return nbFiles; + return linePointers; } -/*Note: buf is not freed in case function successfully created table because filesTable->fileNames[0] = buf*/ FileNamesTable* -UTIL_createFileNamesTable_fromFileName(const char* inputFileName) +UTIL_createFileNamesTable_fromFileList(const char* fileList) { - size_t nbFiles = 0; - char* buf; - size_t bufSize; - size_t pos = 0; stat_t statbuf; + char* buffer = NULL; + size_t numLines = 0; + size_t bufferSize = 0; - if (!UTIL_stat(inputFileName, &statbuf) || !UTIL_isRegularFileStat(&statbuf)) + /* Check if the input is a valid file */ + if (!UTIL_stat(fileList, &statbuf)) { return NULL; - - { U64 const inputFileSize = UTIL_getFileSizeStat(&statbuf); - if(inputFileSize > MAX_FILE_OF_FILE_NAMES_SIZE) - return NULL; - bufSize = (size_t)(inputFileSize + 1); /* (+1) to add '\0' at the end of last filename */ } - buf = (char*) malloc(bufSize); - CONTROL( buf != NULL ); + /* Check if the input is a supported type */ + if (!UTIL_isRegularFileStat(&statbuf) && + !UTIL_isFIFOStat(&statbuf) && + !UTIL_isFileDescriptorPipe(fileList)) { + return NULL; + } - { int const ret_nbFiles = readLinesFromFile(buf, bufSize, inputFileName); + /* Open the input file */ + { FILE* const inFile = fopen(fileList, "rb"); + if (inFile == NULL) return NULL; - if (ret_nbFiles <= 0) { - free(buf); - return NULL; - } - nbFiles = (size_t)ret_nbFiles; + /* Read the file content */ + buffer = UTIL_readFileContent(inFile, &bufferSize); + fclose(inFile); } - { const char** filenamesTable = (const char**) malloc(nbFiles * sizeof(*filenamesTable)); - CONTROL(filenamesTable != NULL); + if (buffer == NULL) return NULL; - { size_t fnb; - for (fnb = 0, pos = 0; fnb < nbFiles; fnb++) { - filenamesTable[fnb] = buf+pos; - pos += strlen(buf+pos)+1; /* +1 for the finishing `\0` */ - } } - assert(pos <= bufSize); + /* Process lines */ + numLines = UTIL_processLines(buffer, bufferSize); + if (numLines == 0) { + free(buffer); + return NULL; + } + + /* Create line pointers */ + { const char** linePointers = UTIL_createLinePointers(buffer, numLines, bufferSize); + if (linePointers == NULL) { + free(buffer); + return NULL; + } - return UTIL_assembleFileNamesTable(filenamesTable, nbFiles, buf); + /* Create the final table */ + return UTIL_assembleFileNamesTable(linePointers, numLines, buffer); } } + static FileNamesTable* UTIL_assembleFileNamesTable2(const char** filenames, size_t tableSize, size_t tableCapacity, char* buf) { @@ -528,7 +851,7 @@ void UTIL_refFilename(FileNamesTable* fnt, const char* filename) static size_t getTotalTableSize(FileNamesTable* table) { - size_t fnb = 0, totalSize = 0; + size_t fnb, totalSize = 0; for(fnb = 0 ; fnb < table->tableSize && table->fileNames[fnb] ; ++fnb) { totalSize += strlen(table->fileNames[fnb]) + 1; /* +1 to add '\0' at the end of each fileName */ } @@ -569,7 +892,7 @@ UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2) for( idx2=0 ; (idx2 < table2->tableSize) && table2->fileNames[idx2] && (pos < newTotalTableSize) ; ++idx2, ++newTableIdx) { size_t const curLen = strlen(table2->fileNames[idx2]); memcpy(buf+pos, table2->fileNames[idx2], curLen); - assert(newTableIdx <= newTable->tableSize); + assert(newTableIdx < newTable->tableSize); newTable->fileNames[newTableIdx] = buf+pos; pos += curLen+1; } } @@ -605,6 +928,7 @@ static int UTIL_prepareFileList(const char* dirName, hFile=FindFirstFileA(path, &cFile); if (hFile == INVALID_HANDLE_VALUE) { UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s'\n", dirName); + free(path); return 0; } free(path); @@ -693,8 +1017,11 @@ static int UTIL_prepareFileList(const char *dirName, ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; assert(newListSize >= 0); *bufStart = (char*)UTIL_realloc(*bufStart, (size_t)newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } + if (*bufStart != NULL) { + *bufEnd = *bufStart + newListSize; + } else { + free(path); closedir(dir); return 0; + } } if (*bufStart + *pos + pathLength < *bufEnd) { memcpy(*bufStart + *pos, path, pathLength + 1); /* with final \0 */ @@ -891,9 +1218,6 @@ static char* mallocAndJoin2Dir(const char *dir1, const char *dir2) memcpy(outDirBuffer, dir1, dir1Size); outDirBuffer[dir1Size] = '\0'; - if (dir2[0] == '.') - return outDirBuffer; - buffer = outDirBuffer + dir1Size; if (dir1Size > 0 && *(buffer - 1) != PATH_SEP) { *buffer = PATH_SEP; @@ -1318,7 +1642,6 @@ int UTIL_countCores(int logical) #elif defined(__FreeBSD__) -#include #include /* Use physical core sysctl when available @@ -1406,7 +1729,3 @@ int UTIL_countLogicalCores(void) { return UTIL_countCores(1); } - -#if defined (__cplusplus) -} -#endif diff --git a/programs/util.h b/programs/util.h index faf8c9f11cb..65e12633a67 100644 --- a/programs/util.h +++ b/programs/util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,25 +11,26 @@ #ifndef UTIL_H_MODULE #define UTIL_H_MODULE -#if defined (__cplusplus) -extern "C" { -#endif - - /*-**************************************** * Dependencies ******************************************/ #include "platform.h" /* PLATFORM_POSIX_VERSION, ZSTD_NANOSLEEP_SUPPORT, ZSTD_SETPRIORITY_SUPPORT */ #include /* size_t, ptrdiff_t */ +#include /* FILE */ #include /* stat, utime */ #include /* stat, chmod */ #include "../lib/common/mem.h" /* U64 */ - +#if !(defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__)) +#include +#endif /*-************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW +* Fix fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW ***************************************************************/ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) +#if defined(LIBC_NO_FSEEKO) +/* Some older libc implementations don't include these functions (e.g. Bionic < 24) */ +# define UTIL_fseek fseek +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) # define UTIL_fseek _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ # define UTIL_fseek fseeko @@ -39,7 +40,6 @@ extern "C" { # define UTIL_fseek fseek #endif - /*-************************************************* * Sleep & priority functions: Windows - Posix - others ***************************************************/ @@ -88,6 +88,10 @@ extern "C" { #endif +#if defined (__cplusplus) +extern "C" { +#endif + /*-**************************************** * Console log ******************************************/ @@ -118,7 +122,6 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, const #define STRDUP(s) _strdup(s) #else #define PATH_SEP '/' -#include #define STRDUP(s) strdup(s) #endif @@ -126,15 +129,25 @@ int UTIL_requireUserConfirmation(const char* prompt, const char* abortMsg, const /** * Calls platform's equivalent of stat() on filename and writes info to statbuf. * Returns success (1) or failure (0). + * + * UTIL_fstat() is like UTIL_stat() but takes an optional fd that refers to the + * file in question. It turns out that this can be meaningfully faster. If fd is + * -1, behaves just like UTIL_stat() (i.e., falls back to using the filename). */ int UTIL_stat(const char* filename, stat_t* statbuf); +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf); /** * Instead of getting a file's stats, this updates them with the info in the * provided stat_t. Currently sets owner, group, atime, and mtime. Will only * update this info for regular files. + * + * UTIL_setFDStat() also takes an fd, and will preferentially use that to + * indicate which file to modify, If fd is -1, it will fall back to using the + * filename. */ int UTIL_setFileStat(const char* filename, const stat_t* statbuf); +int UTIL_setFDStat(const int fd, const char* filename, const stat_t* statbuf); /** * Set atime to now and mtime to the st_mtim in statbuf. @@ -159,8 +172,11 @@ U64 UTIL_getFileSizeStat(const stat_t* statbuf); * Like chmod(), but only modifies regular files. Provided statbuf may be NULL, * in which case this function will stat() the file internally, in order to * check whether it should be modified. + * + * If fd is -1, fd is ignored and the filename is used. */ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions); +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions); /* * In the absence of a pre-existing stat result on the file in question, these @@ -168,12 +184,34 @@ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions); * compute the needed information. */ +int UTIL_isFdRegularFile(int fd); int UTIL_isRegularFile(const char* infilename); int UTIL_isDirectory(const char* infilename); int UTIL_isSameFile(const char* file1, const char* file2); +int UTIL_isSameFileStat(const char* file1, const char* file2, const stat_t* file1Stat, const stat_t* file2Stat); int UTIL_isCompressedFile(const char* infilename, const char *extensionList[]); int UTIL_isLink(const char* infilename); int UTIL_isFIFO(const char* infilename); +int UTIL_isFileDescriptorPipe(const char* filename); + +/** + * Returns with the given file descriptor is a console. + * Allows faking whether stdin/stdout/stderr is a console + * using UTIL_fake*IsConsole(). + */ +int UTIL_isConsole(FILE* file); + +/** + * Pretends that stdin/stdout/stderr is a console for testing. + */ +void UTIL_fakeStdinIsConsole(void); +void UTIL_fakeStdoutIsConsole(void); +void UTIL_fakeStderrIsConsole(void); + +/** + * Emit traces for functions that read, or modify file metadata. + */ +void UTIL_traceFileStat(void); #define UTIL_FILESIZE_UNKNOWN ((U64)(-1)) U64 UTIL_getFileSize(const char* infilename); @@ -214,13 +252,13 @@ typedef struct size_t tableCapacity; } FileNamesTable; -/*! UTIL_createFileNamesTable_fromFileName() : +/*! UTIL_createFileNamesTable_fromFileList() : * read filenames from @inputFileName, and store them into returned object. * @return : a FileNamesTable*, or NULL in case of error (ex: @inputFileName doesn't exist). * Note: inputFileSize must be less than 50MB */ FileNamesTable* -UTIL_createFileNamesTable_fromFileName(const char* inputFileName); +UTIL_createFileNamesTable_fromFileList(const char* inputFileName); /*! UTIL_assembleFileNamesTable() : * This function takes ownership of its arguments, @filenames and @buf, @@ -248,7 +286,6 @@ UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2); /*! UTIL_expandFNT() : * read names from @fnt, and expand those corresponding to directories * update @fnt, now containing only file names, - * @return : 0 in case of success, 1 if error * note : in case of error, @fnt[0] is NULL */ void UTIL_expandFNT(FileNamesTable** fnt, int followLinks); @@ -306,7 +343,7 @@ void UTIL_refFilename(FileNamesTable* fnt, const char* filename); FileNamesTable* UTIL_createExpandedFNT(const char* const* filenames, size_t nbFilenames, int followLinks); -#if defined(_WIN32) || defined(WIN32) +#if defined(_WIN32) DWORD CountSetBits(ULONG_PTR bitMask); #endif diff --git a/programs/windres/verrsrc.h b/programs/windres/verrsrc.h index c1b60e90f32..61b1f3ddc7e 100644 --- a/programs/windres/verrsrc.h +++ b/programs/windres/verrsrc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/programs/windres/zstd.rc b/programs/windres/zstd.rc index f5e404730d2..a2118c2df10 100644 --- a/programs/windres/zstd.rc +++ b/programs/windres/zstd.rc @@ -32,11 +32,11 @@ BEGIN BEGIN BLOCK "040904B0" BEGIN - VALUE "CompanyName", "Yann Collet, Facebook, Inc." + VALUE "CompanyName", "Meta Platforms, Inc." VALUE "FileDescription", "Zstandard - Fast and efficient compression algorithm" VALUE "FileVersion", ZSTD_VERSION_STRING VALUE "InternalName", "zstd.exe" - VALUE "LegalCopyright", "Copyright (c) 2013-present, Yann Collet, Facebook, Inc." + VALUE "LegalCopyright", "Copyright (c) Meta Platforms, Inc. and affiliates." VALUE "OriginalFilename", "zstd.exe" VALUE "ProductName", "Zstandard" VALUE "ProductVersion", ZSTD_VERSION_STRING diff --git a/programs/zstd.1 b/programs/zstd.1 index a2bf7fd2f50..e49a182d506 100644 --- a/programs/zstd.1 +++ b/programs/zstd.1 @@ -1,5 +1,5 @@ . -.TH "ZSTD" "1" "August 2022" "zstd 1.5.3" "User Commands" +.TH "ZSTD" "1" "February 2025" "zstd 1.5.7" "User Commands" . .SH "NAME" \fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files @@ -17,10 +17,10 @@ \fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR . .SH "DESCRIPTION" -\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. +\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip\fR(1) and \fBxz\fR(1)\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core, which remains roughly stable at all compression settings\. . .P -\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences : +\fBzstd\fR command line syntax is generally similar to gzip, but features the following few differences: . .IP "\(bu" 4 Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\. @@ -34,10 +34,13 @@ When compressing a single file, \fBzstd\fR displays progress notifications and r .IP "\(bu" 4 \fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it\'s not the console\. . +.IP "\(bu" 4 +\fBzstd\fR does not store the input\'s filename or attributes, only its contents\. +. .IP "" 0 . .P -\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. +\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal: it will display an error message and skip the file\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. . .P Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name: @@ -50,12 +53,12 @@ When decompressing, the \fB\.zst\fR suffix is removed from the source filename t . .IP "" 0 . -.SS "Concatenation with \.zst files" +.SS "Concatenation with \.zst Files" It is possible to concatenate multiple \fB\.zst\fR files\. \fBzstd\fR will decompress such agglomerated file as if it was a single \fB\.zst\fR file\. . .SH "OPTIONS" . -.SS "Integer suffixes and special values" +.SS "Integer Suffixes and Special Values" In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\. . .TP @@ -66,7 +69,7 @@ Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accep \fBMiB\fR Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. . -.SS "Operation mode" +.SS "Operation Mode" If multiple operation mode options are given, the last one takes effect\. . .TP @@ -83,20 +86,20 @@ Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\ . .TP \fB\-b#\fR -Benchmark file(s) using compression level # +Benchmark file(s) using compression level \fI#\fR\. See \fIBENCHMARK\fR below for a description of this operation\. . .TP -\fB\-\-train FILEs\fR -Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. +\fB\-\-train FILES\fR +Use \fIFILES\fR as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. See \fIDICTIONARY BUILDER\fR below for a description of this operation\. . .TP \fB\-l\fR, \fB\-\-list\fR Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command\'s output can be augmented with the \fB\-v\fR modifier\. . -.SS "Operation modifiers" +.SS "Operation Modifiers" . .IP "\(bu" 4 -\fB\-#\fR: \fB#\fR compression level [1\-19] (default: 3) +\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3)\. Higher compression levels \fIgenerally\fR produce higher compression ratio at the expense of speed and memory\. A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels\. Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below)\. Because the compressor\'s behavior highly depends on the content to compress, there\'s no guarantee of a smooth progression from one level to another\. . .IP "\(bu" 4 \fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. @@ -105,16 +108,25 @@ Display information related to a zstd compressed file, such as size, ratio, and \fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. . .IP "\(bu" 4 -\fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. +\fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: between 1 and 4 depending on physical CPU cores; see \fBZSTD_NBTHREADS\fR below)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. . .IP "\(bu" 4 -\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\. Note 1 : this mode is the only available one when multithread support is disabled\. Note 2 : this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\. +\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\. +. +.IP +Note 1: this mode is the only available one when multithread support is disabled\. +. +.IP +Note 2: this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\. . .IP "\(bu" 4 \fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\. . .IP "\(bu" 4 -\fB\-\-adapt[=min=#,max=#]\fR : \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. +\fB\-\-adapt[=min=#,max=#]\fR: \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MiB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. +. +.IP +\fINote\fR: at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. . .IP "\(bu" 4 \fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. @@ -123,58 +135,73 @@ Display information related to a zstd compressed file, such as size, ratio, and Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. . .IP "\(bu" 4 +\fB\-\-max\fR: set advanced parameters to maximum compression\. warning: this setting is very slow and uses a lot of resources\. It\'s inappropriate for 32\-bit mode and therefore disabled in this mode\. +. +.IP "\(bu" 4 \fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s) . .IP "\(bu" 4 -\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that windowSize > srcSize\. +\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\. +. +.IP +Note: cannot use both this and \fB\-D\fR together\. +. +.IP +Note: \fB\-\-long\fR mode will be automatically activated if \fIchainLog\fR < \fIfileLog\fR (\fIfileLog\fR being the \fIwindowLog\fR required to cover the whole file)\. You can also manually force it\. +. +.IP +Note: up to level 15, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio marginally at the cost of speed\. Using \'\-\-single\-thread\' above level 15 will lead to lower compression ratios\. . .IP -Note: cannot use both this and \-D together Note: \fB\-\-long\fR mode will be automatically activated if chainLog < fileLog (fileLog being the windowLog required to cover the whole file)\. You can also manually force it\. Note: for all levels, you can use \-\-patch\-from in \-\-single\-thread mode to improve compression ratio at the cost of speed Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR +Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR\. . .IP "\(bu" 4 -\fB\-\-rsyncable\fR : \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. +\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and a potential impact to compression speed, perceptible at higher speeds, for example when combining \fB\-\-rsyncable\fR with many parallel worker threads\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. . .IP "\(bu" 4 \fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled) . .IP "\(bu" 4 -\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \-\-content\-size (meaning that the original size will be placed in the header)\. +\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \fB\-\-content\-size\fR (meaning that the original size will be placed in the header)\. . .IP "\(bu" 4 \fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. . .IP "\(bu" 4 -\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, Zstandard uses 128 MB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\. +\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, \fBzstd\fR uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\. . .IP -This is also used during compression when using with \-\-patch\-from=\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MB)\. +This is also used during compression when using with \fB\-\-patch\-from=\fR\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MiB)\. . .IP -Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GB\. zstd will load training samples up to the memory limit and ignore the rest\. +Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GiB\. zstd will load training samples up to the memory limit and ignore the rest\. . .IP "\(bu" 4 -\fB\-\-stream\-size=#\fR : Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. +\fB\-\-stream\-size=#\fR: Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. . .IP "\(bu" 4 \fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\. . .IP "\(bu" 4 -\fB\-o FILE\fR: save result into \fBFILE\fR +\fB\-\-target\-compressed\-block\-size=#\fR: Attempt to produce compressed blocks of approximately this size\. This will split larger blocks in order to approach this target\. This feature is notably useful for improved latency, when the receiver can leverage receiving early incomplete data\. This parameter defines a loose target: compressed blocks will target this size "on average", but individual blocks can still be larger or smaller\. Enabling this feature can decrease compression speed by up to ~10% at level 1\. Higher levels will see smaller relative speed regression, becoming invisible at higher settings\. . .IP "\(bu" 4 \fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. During decompression and when the output destination is stdout, pass\-through unrecognized formats as\-is\. . .IP "\(bu" 4 -\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files unchanged\. +\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files (disable \fB\-\-rm\fR)\. +. +.IP "\(bu" 4 +\fB\-o FILE\fR: save result into \fBFILE\fR\. Note that this operation is in conflict with \fB\-c\fR\. If both operations are present on the command line, the last expressed one wins\. . .IP "\(bu" 4 \fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. . .IP "\(bu" 4 -\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\-f) option is set\. +\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\fB\-f\fR) option is set\. . .IP "\(bu" 4 -\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. If used in combination with \-o, will trigger a confirmation prompt (which can be silenced with \-f), as this is a destructive operation\. +\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. This command is silently ignored if output is \fBstdout\fR\. If used in combination with \fB\-o\fR, triggers a confirmation prompt (which can be silenced with \fB\-f\fR), as this is a destructive operation\. . .IP "\(bu" 4 \fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\. @@ -201,7 +228,7 @@ If input directory contains "\.\.", the files in this directory will be ignored\ \fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit . .IP "\(bu" 4 -\fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced : \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. +\fB\-V\fR, \fB\-\-version\fR: display version number and immediately exit\. note that, since it exits, flags specified after \fB\-V\fR are effectively ignored\. Advanced: \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-qV\fR will only display the version number, suitable for machine reading\. . .IP "\(bu" 4 \fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information @@ -213,14 +240,17 @@ If input directory contains "\.\.", the files in this directory will be ignored\ \fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\. . .IP "\(bu" 4 -\fB\-\-show\-default\-cparams\fR: Shows the default compression parameters that will be used for a particular src file\. If the provided src file is not a regular file (e\.g\. named pipe), the cli will just output the default parameters\. That is, the parameters that are used when the src size is unknown\. +\fB\-\-show\-default\-cparams\fR: shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size\. If the provided file is not a regular file (e\.g\. a pipe), this flag will output the parameters used for inputs of unknown size\. +. +.IP "\(bu" 4 +\fB\-\-exclude\-compressed\fR: only compress files that are not already compressed\. . .IP "\(bu" 4 \fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files . .IP "" 0 . -.SS "gzip Operation modifiers" +.SS "gzip Operation Modifiers" When invoked via a \fBgzip\fR symlink, \fBzstd\fR will support further options that intend to mimic the \fBgzip\fR behavior: . .TP @@ -231,288 +261,317 @@ do not store the original filename and timestamps when compressing a file\. This \fB\-\-best\fR alias to the option \fB\-9\fR\. . -.SS "Interactions with Environment Variables" -Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. +.SS "Environment Variables" +Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the default compression level and number of threads to use during compression, respectively\. . .P \fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. . .P -\fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of (\fB1\fR), and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this to have any effect\. +\fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of \fBmax(1, min(4, nbCores/4))\fR, and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this variable to have any effect\. . .P They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\. . -.SH "DICTIONARY BUILDER" -\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. +.SH "ADVANCED COMPRESSION OPTIONS" +\fBzstd\fR provides 22 predefined regular compression levels plus the fast levels\. A compression level is translated internally into multiple advanced parameters that control the behavior of the compressor (one can observe the result of this translation with \fB\-\-show\-default\-cparams\fR)\. These advanced parameters can be overridden using advanced compression options\. +. +.SS "\-\-zstd[=options]:" +The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: . .TP -\fB\-\-train FILEs\fR -Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. +\fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR +Specify a strategy used by a match finder\. . .IP -Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KB of these samples will be used for training\. +There are 9 strategies numbered from 1 to 9, from fastest to strongest: 1=\fBZSTD_fast\fR, 2=\fBZSTD_dfast\fR, 3=\fBZSTD_greedy\fR, 4=\fBZSTD_lazy\fR, 5=\fBZSTD_lazy2\fR, 6=\fBZSTD_btlazy2\fR, 7=\fBZSTD_btopt\fR, 8=\fBZSTD_btultra\fR, 9=\fBZSTD_btultra2\fR\. +. +.TP +\fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR +Specify the maximum number of bits for a match distance\. . .IP -\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. +The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\. . -.TP -\fB\-o FILE\fR -Dictionary saved into \fBFILE\fR (default name: dictionary)\. +.IP +Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. . .TP -\fB\-\-maxdict=#\fR -Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\. +\fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR +Specify the maximum number of bits for a hash table\. . -.TP -\fB\-#\fR -Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. +.IP +Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\. . -.TP -\fB\-B#\fR -Split input files into blocks of size # (default: no split) +.IP +The minimum \fIhlog\fR is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB)\. . .TP -\fB\-M#\fR, \fB\-\-memory=#\fR -Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\. +\fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR +Specify the maximum number of bits for the secondary search structure, whose form depends on the selected \fBstrategy\fR\. . .IP -In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\. +Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the \fBZSTD_fast\fR \fBstrategy\fR, which only has the primary hash table\. . -.TP -\fB\-\-dictID=#\fR -A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. +.IP +The minimum \fIclog\fR is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32\-bit platforms and 30 (1B entries / 4 GiB) on 64\-bit platforms\. . .TP -\fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR -Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. +\fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR +Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\. . .IP -Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. +More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. . .IP -Examples: +The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. . -.IP -\fBzstd \-\-train\-cover FILEs\fR +.TP +\fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR +Specify the minimum searched length of a match in a hash table\. . .IP -\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR +Larger search lengths usually decrease compression ratio but improve decompression speed\. . .IP -\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR +The minimum \fImml\fR is 3 and the maximum is 7\. +. +.TP +\fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR +The impact of this field vary depending on selected strategy\. . .IP -\fBzstd \-\-train\-cover=k=50 FILEs\fR +For \fBZSTD_btopt\fR, \fBZSTD_btultra\fR and \fBZSTD_btultra2\fR, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. . .IP -\fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR +For \fBZSTD_fast\fR, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed: a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. . .IP -\fBzstd \-\-train\-cover=shrink FILEs\fR +For all other strategies, this field has no impact\. . .IP -\fBzstd \-\-train\-cover=shrink=2 FILEs\fR +The minimum \fItlen\fR is 0 and the maximum is 128 KiB\. . .TP -\fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR -Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. +\fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR +Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. . .IP -\fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. +The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default": \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. +. +.TP +\fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR +Specify the frequency of inserting entries into the long distance matching hash table\. . .IP -Examples: +This option is ignored unless long distance matching is enabled\. . .IP -\fBzstd \-\-train\-fastcover FILEs\fR +Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. . .IP -\fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR +The default value varies between 4 and 7, depending on \fBstrategy\fR\. . .TP -\fB\-\-train\-legacy[=selectivity=#]\fR -Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. +\fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR +Specify the maximum size for a hash table used for long distance matching\. . .IP -Examples: +This option is ignored unless long distance matching is enabled\. . .IP -\fBzstd \-\-train\-legacy FILEs\fR +Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. . .IP -\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR -. -.SH "BENCHMARK" +The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: \fBwindowLog \- ldmHashRateLog\fR)\. . .TP -\fB\-b#\fR -benchmark file(s) using compression level # +\fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR +Specify the minimum searched length of a match for long distance matching\. . -.TP -\fB\-e#\fR -benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) +.IP +This option is ignored unless long distance matching is enabled\. . -.TP -\fB\-i#\fR -minimum evaluation time, in seconds (default: 3s), benchmark mode only +.IP +Larger/very small values usually decrease compression ratio\. . -.TP -\fB\-B#\fR, \fB\-\-block\-size=#\fR -cut file(s) into independent chunks of size # (default: no chunking) +.IP +The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 32 to 64, depending on \fBstrategy\fR)\. . .TP -\fB\-\-priority=rt\fR -set process priority to real\-time +\fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR +Specify the size of each bucket for the hash table used for long distance matching\. . -.P -\fBOutput Format:\fR CompressionLevel#Filename : InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +.IP +This option is ignored unless long distance matching is enabled\. . -.P -\fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. +.IP +Larger bucket sizes improve collision resolution but decrease compression speed\. . -.SH "ADVANCED COMPRESSION OPTIONS" +.IP +The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 4 to 8, depending on \fBstrategy\fR)\. +. +.SS "Example" +The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: +. +.P +\fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 . .SS "\-B#:" Specify the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to non\-identical compressed frames\. . -.SS "\-\-zstd[=options]:" -\fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: +.SH "DICTIONARY BUILDER" +\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. . .TP -\fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR -Specify a strategy used by a match finder\. +\fB\-\-train FILEs\fR +Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. . .IP -There are 9 strategies numbered from 1 to 9, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra, 9=ZSTD_btultra2\. -. -.TP -\fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR -Specify the maximum number of bits for a match distance\. +Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KiB of these samples will be used for training\. . .IP -The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\. +\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. . -.IP -Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. +.TP +\fB\-o FILE\fR +Dictionary saved into \fBFILE\fR (default name: dictionary)\. . .TP -\fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR -Specify the maximum number of bits for a hash table\. +\fB\-\-maxdict=#\fR +Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\. . -.IP -Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\. +.TP +\fB\-#\fR +Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. . -.IP -The minimum \fIhlog\fR is 6 (64 B) and the maximum is 30 (1 GiB)\. +.TP +\fB\-B#\fR +Split input files into blocks of size # (default: no split) . .TP -\fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR -Specify the maximum number of bits for a hash chain or a binary tree\. +\fB\-M#\fR, \fB\-\-memory=#\fR +Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\. . .IP -Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\. +In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\. +. +.TP +\fB\-\-dictID=#\fR +A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. . .IP -The minimum \fIclog\fR is 6 (64 B) and the maximum is 29 (524 Mib) on 32\-bit platforms and 30 (1 Gib) on 64\-bit platforms\. +Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2^31, so they should not be used in public\. . .TP -\fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR -Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\. +\fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR +Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. . .IP -More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. +Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. . .IP -The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. +Examples: . -.TP -\fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR -Specify the minimum searched length of a match in a hash table\. +.IP +\fBzstd \-\-train\-cover FILEs\fR . .IP -Larger search lengths usually decrease compression ratio but improve decompression speed\. +\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR . .IP -The minimum \fImml\fR is 3 and the maximum is 7\. +\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR . -.TP -\fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR -The impact of this field vary depending on selected strategy\. +.IP +\fBzstd \-\-train\-cover=k=50 FILEs\fR . .IP -For ZSTD_btopt, ZSTD_btultra and ZSTD_btultra2, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. t For ZSTD_fast, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed : a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. +\fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR . .IP -For all other strategies, this field has no impact\. +\fBzstd \-\-train\-cover=shrink FILEs\fR . .IP -The minimum \fItlen\fR is 0 and the maximum is 128 Kib\. +\fBzstd \-\-train\-cover=shrink=2 FILEs\fR . .TP -\fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR -Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. +\fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR +Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. . .IP -The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default" : \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. -. -.TP -\fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR -Specify the maximum size for a hash table used for long distance matching\. +\fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. . .IP -This option is ignored unless long distance matching is enabled\. +Examples: . .IP -Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. +\fBzstd \-\-train\-fastcover FILEs\fR . .IP -The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: 20)\. +\fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR . .TP -\fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR -Specify the minimum searched length of a match for long distance matching\. +\fB\-\-train\-legacy[=selectivity=#]\fR +Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. . .IP -This option is ignored unless long distance matching is enabled\. +Examples: . .IP -Larger/very small values usually decrease compression ratio\. +\fBzstd \-\-train\-legacy FILEs\fR . .IP -The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\. +\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR . -.TP -\fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR -Specify the size of each bucket for the hash table used for long distance matching\. +.SH "BENCHMARK" +The \fBzstd\fR CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer\'s performance\. \fBzstd \-b [FILE(s)]\fR will benchmark \fBzstd\fR for both compression and decompression using default compression level\. Note that results are very dependent on the content being compressed\. . -.IP -This option is ignored unless long distance matching is enabled\. +.P +It\'s possible to pass multiple files to the benchmark, and even a directory with \fB\-r DIRECTORY\fR\. When no \fBFILE\fR is provided, the benchmark will use a procedurally generated \fBlorem ipsum\fR text\. . -.IP -Larger bucket sizes improve collision resolution but decrease compression speed\. +.P +Benchmarking will employ \fBmax(1, min(4, nbCores/4))\fR worker threads by default in order to match the behavior of the normal CLI I/O\. . -.IP -The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 3)\. +.IP "\(bu" 4 +\fB\-b#\fR: benchmark file(s) using compression level # . -.TP -\fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR -Specify the frequency of inserting entries into the long distance matching hash table\. +.IP "\(bu" 4 +\fB\-e#\fR: benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) . -.IP -This option is ignored unless long distance matching is enabled\. +.IP "\(bu" 4 +\fB\-d\fR: benchmark decompression speed only (requires providing a zstd\-compressed content) . -.IP -Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. +.IP "\(bu" 4 +\fB\-i#\fR: minimum evaluation time, in seconds (default: 3s), benchmark mode only . -.IP -The default value is \fBwlog \- lhlog\fR\. +.IP "\(bu" 4 +\fB\-B#\fR, \fB\-\-block\-size=#\fR: cut file(s) into independent chunks of size # (default: no chunking) . -.SS "Example" -The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: +.IP "\(bu" 4 +\fB\-S\fR: output one benchmark result per input file (default: consolidated result) +. +.IP "\(bu" 4 +\fB\-D dictionary\fR benchmark using dictionary +. +.IP "\(bu" 4 +\fB\-\-priority=rt\fR: set process priority to real\-time (Windows) +. +.IP "" 0 . .P -\fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (\fB\-T#\fR), advanced compression parameters (\fB\-\-zstd=###\fR), dictionary compression (\fB\-D dictionary\fR), or even disabling checksum verification for example\. +. +.P +\fBOutput Format:\fR CompressionLevel#Filename: InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +. +.P +\fBMethodology:\fR For speed measurement, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. +. +.SH "SEE ALSO" +\fBzstdgrep\fR(1), \fBzstdless\fR(1), \fBgzip\fR(1), \fBxz\fR(1) +. +.P +The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the \'application/zstd\' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\. . .SH "BUGS" Report bugs at: https://github\.com/facebook/zstd/issues diff --git a/programs/zstd.1.md b/programs/zstd.1.md index 3ab8404ee65..1721441dfbb 100644 --- a/programs/zstd.1.md +++ b/programs/zstd.1.md @@ -4,7 +4,7 @@ zstd(1) -- zstd, zstdmt, unzstd, zstdcat - Compress or decompress .zst files SYNOPSIS -------- -`zstd` [*OPTIONS*] [-|_INPUT-FILE_] [-o _OUTPUT-FILE_] +`zstd` [] [-|] [-o ] `zstdmt` is equivalent to `zstd -T0` @@ -16,15 +16,16 @@ SYNOPSIS DESCRIPTION ----------- `zstd` is a fast lossless compression algorithm and data compression tool, -with command line syntax similar to `gzip (1)` and `xz (1)`. +with command line syntax similar to `gzip`(1) and `xz`(1). It is based on the **LZ77** family, with further FSE & huff0 entropy stages. `zstd` offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios. -It also features a very fast decoder, with speeds > 500 MB/s per core. +It also features a very fast decoder, with speeds > 500 MB/s per core, +which remains roughly stable at all compression settings. `zstd` command line syntax is generally similar to gzip, -but features the following differences : +but features the following few differences: - Source files are preserved by default. It's possible to remove them automatically by using the `--rm` command. @@ -35,12 +36,13 @@ but features the following differences : Use `-q` to turn it off. - `zstd` does not accept input from console, though it does accept `stdin` when it's not the console. + - `zstd` does not store the input's filename or attributes, only its contents. `zstd` processes each _file_ according to the selected operation mode. If no _files_ are given or _file_ is `-`, `zstd` reads from standard input and writes the processed data to standard output. `zstd` will refuse to write compressed data to standard output -if it is a terminal : it will display an error message and skip the _file_. +if it is a terminal: it will display an error message and skip the file. Similarly, `zstd` will refuse to read compressed data from standard input if it is a terminal. @@ -52,14 +54,15 @@ whose name is derived from the source _file_ name: * When decompressing, the `.zst` suffix is removed from the source filename to get the target filename -### Concatenation with .zst files +### Concatenation with .zst Files It is possible to concatenate multiple `.zst` files. `zstd` will decompress such agglomerated file as if it was a single `.zst` file. OPTIONS ------- -### Integer suffixes and special values +### Integer Suffixes and Special Values + In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers. There must be no space between the integer and the suffix. @@ -71,7 +74,8 @@ There must be no space between the integer and the suffix. Multiply the integer by 1,048,576 (2\^20). `Mi`, `M`, and `MB` are accepted as synonyms for `MiB`. -### Operation mode +### Operation Mode + If multiple operation mode options are given, the last one takes effect. @@ -88,22 +92,32 @@ the last one takes effect. decompressed data is discarded and checksummed for errors. No files are created or removed. * `-b#`: - Benchmark file(s) using compression level # -* `--train FILEs`: - Use FILEs as a training set to create a dictionary. + Benchmark file(s) using compression level _#_. + See _BENCHMARK_ below for a description of this operation. +* `--train FILES`: + Use _FILES_ as a training set to create a dictionary. The training set should contain a lot of small files (> 100). + See _DICTIONARY BUILDER_ below for a description of this operation. * `-l`, `--list`: Display information related to a zstd compressed file, such as size, ratio, and checksum. Some of these fields may not be available. This command's output can be augmented with the `-v` modifier. -### Operation modifiers +### Operation Modifiers * `-#`: - `#` compression level \[1-19] (default: 3) + selects `#` compression level \[1-19\] (default: 3). + Higher compression levels *generally* produce higher compression ratio at the expense of speed and memory. + A rough rule of thumb is that compression speed is expected to be divided by 2 every 2 levels. + Technically, each level is mapped to a set of advanced parameters (that can also be modified individually, see below). + Because the compressor's behavior highly depends on the content to compress, there's no guarantee of a smooth progression from one level to another. * `--ultra`: unlocks high compression levels 20+ (maximum 22), using a lot more memory. - Note that decompression will also require more memory when using these levels. + Decompression will also need more memory when using these levels. +* `--max`: + set advanced parameters to reach maximum compression. + warning: this setting is very slow and uses a lot of resources. + It's inappropriate for 32-bit mode and therefore disabled in this mode. * `--fast[=#]`: switch to ultra-fast compression levels. If `=#` is not present, it defaults to `1`. @@ -112,35 +126,39 @@ the last one takes effect. This setting overwrites compression level if one was set previously. Similarly, if a compression level is set after `--fast`, it overrides it. * `-T#`, `--threads=#`: - Compress using `#` working threads (default: 1). + Compress using `#` working threads (default: between 1 and 4 depending on physical CPU cores; see `ZSTD_NBTHREADS` below). If `#` is 0, attempt to detect and use the number of physical CPU cores. In all cases, the nb of threads is capped to `ZSTDMT_NBWORKERS_MAX`, which is either 64 in 32-bit mode, or 256 for 64-bit environments. This modifier does nothing if `zstd` is compiled without multithread support. + Note that memory usage increases with each thread. * `--single-thread`: Use a single thread for both I/O and compression. As compression is serialized with I/O, this can be slightly slower. Single-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32-bit systems. - Note 1 : this mode is the only available one when multithread support is disabled. - Note 2 : this mode is different from `-T1`, which spawns 1 compression thread in parallel with I/O. + + Note 1: this mode is the only available one when multithread support is disabled. + + Note 2: this mode is different from `-T1`, which spawns 1 compression thread in parallel with I/O. Final compressed result is also slightly different from `-T1`. * `--auto-threads={physical,logical} (default: physical)`: When using a default amount of threads via `-T0`, choose the default based on the number of detected physical or logical cores. -* `--adapt[=min=#,max=#]` : +* `--adapt[=min=#,max=#]`: `zstd` will dynamically adapt compression level to perceived I/O conditions. Compression level adaptation can be observed live by using command `-v`. Adaptation can be constrained between supplied `min` and `max` levels. The feature works when combined with multi-threading and `--long` mode. It does not work with `--single-thread`. - It sets window size to 8 MB by default (can be changed manually, see `wlog`). + It sets window size to 8 MiB by default (can be changed manually, see `wlog`). Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible. - _note_ : at the time of this writing, `--adapt` can remain stuck at low speed + + _Note_: at the time of this writing, `--adapt` can remain stuck at low speed when combined with multiple worker threads (>=2). * `--long[=#]`: enables long distance matching with `#` `windowLog`, if `#` is not - present it defaults to `27`. + present it defaults to `27`. The highest possible value is 31. This increases the window size (`windowLog`) and memory usage for both the compressor and decompressor. This setting is designed to improve the compression ratio for files with @@ -153,22 +171,28 @@ the last one takes effect. * `--patch-from FILE`: Specify the file to be used as a reference point for zstd's diff engine. This is effectively dictionary compression with some convenient parameter - selection, namely that windowSize > srcSize. + selection, namely that _windowSize_ > _srcSize_. + + Note: cannot use both this and `-D` together. - Note: cannot use both this and -D together - Note: `--long` mode will be automatically activated if chainLog < fileLog - (fileLog being the windowLog required to cover the whole file). You + Note: `--long` mode will be automatically activated if _chainLog_ < _fileLog_ + (_fileLog_ being the _windowLog_ required to cover the whole file). You can also manually force it. - Note: for all levels, you can use --patch-from in --single-thread mode - to improve compression ratio at the cost of speed + + Note: up to level 15, you can use `--patch-from` in `--single-thread` mode + to improve compression ratio marginally at the cost of speed. Using + '--single-thread' above level 15 will lead to lower compression + ratios. + Note: for level 19, you can get increased compression ratio at the cost of speed by specifying `--zstd=targetLength=` to be something large - (i.e. 4096), and by setting a large `--zstd=chainLog=` -* `--rsyncable` : + (i.e. 4096), and by setting a large `--zstd=chainLog=`. +* `--rsyncable`: `zstd` will periodically synchronize the compression state to make the - compressed file more rsync-friendly. There is a negligible impact to - compression ratio, and the faster compression levels will see a small - compression speed hit. + compressed file more rsync-friendly. + There is a negligible impact to compression ratio, + and a potential impact to compression speed, perceptible at higher speeds, + for example when combining `--rsyncable` with many parallel worker threads. This feature does not work with `--single-thread`. You probably don't want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary. @@ -177,24 +201,24 @@ the last one takes effect. * `--[no-]content-size`: enable / disable whether or not the original size of the file is placed in the header of the compressed file. The default option is - --content-size (meaning that the original size will be placed in the header). + `--content-size` (meaning that the original size will be placed in the header). * `--no-dictID`: do not store dictionary ID within frame header (dictionary compression). The decoder will have to rely on implicit knowledge about which dictionary to use, it won't be able to check if it's correct. * `-M#`, `--memory=#`: - Set a memory usage limit. By default, Zstandard uses 128 MB for decompression + Set a memory usage limit. By default, `zstd` uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i.e. you can increase or decrease it). - This is also used during compression when using with --patch-from=. In this case, - this parameter overrides that maximum size allowed for a dictionary. (128 MB). + This is also used during compression when using with `--patch-from=`. In this case, + this parameter overrides that maximum size allowed for a dictionary. (128 MiB). Additionally, this can be used to limit memory for dictionary training. This parameter - overrides the default limit of 2 GB. zstd will load training samples up to the memory limit + overrides the default limit of 2 GiB. zstd will load training samples up to the memory limit and ignore the rest. -* `--stream-size=#` : +* `--stream-size=#`: Sets the pledged source size of input coming from a stream. This value must be exact, as it will be included in the produced frame header. Incorrect stream sizes will cause an error. This information will be used to better optimize compression parameters, resulting in @@ -206,15 +230,24 @@ the last one takes effect. expected. This feature allows for controlling the guess when needed. Exact guesses result in better compression ratios. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation. -* `-o FILE`: - save result into `FILE` +* `--target-compressed-block-size=#`: + Attempt to produce compressed blocks of approximately this size. + This will split larger blocks in order to approach this target. + This feature is notably useful for improved latency, when the receiver can leverage receiving early incomplete data. + This parameter defines a loose target: compressed blocks will target this size "on average", but individual blocks can still be larger or smaller. + Enabling this feature can decrease compression speed by up to ~10% at level 1. + Higher levels will see smaller relative speed regression, becoming invisible at higher settings. * `-f`, `--force`: disable input and output checks. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc. During decompression and when the output destination is stdout, pass-through unrecognized formats as-is. * `-c`, `--stdout`: - write to standard output (even if it is the console); keep original files unchanged. + write to standard output (even if it is the console); keep original files (disable `--rm`). +* `-o FILE`: + save result into `FILE`. + Note that this operation is in conflict with `-c`. + If both operations are present on the command line, the last expressed one wins. * `--[no-]sparse`: enable / disable sparse FS support, to make files with many zeroes smaller on disk. @@ -227,11 +260,13 @@ the last one takes effect. enable / disable passing through uncompressed files as-is. During decompression when pass-through is enabled, unrecognized formats will be copied as-is from the input to the output. By default, pass-through will - occur when the output destination is stdout and the force (-f) option is + occur when the output destination is stdout and the force (`-f`) option is set. * `--rm`: - remove source file(s) after successful compression or decompression. If used in combination with - -o, will trigger a confirmation prompt (which can be silenced with -f), as this is a destructive operation. + remove source file(s) after successful compression or decompression. + This command is silently ignored if output is `stdout`. + If used in combination with `-o`, + triggers a confirmation prompt (which can be silenced with `-f`), as this is a destructive operation. * `-k`, `--keep`: keep source file(s) after successful compression or decompression. This is the default behavior. @@ -269,10 +304,11 @@ the last one takes effect. * `-h`/`-H`, `--help`: display help/long help and exit * `-V`, `--version`: - display version number and exit. - Advanced : `-vV` also displays supported formats. + display version number and immediately exit. + note that, since it exits, flags specified after `-V` are effectively ignored. + Advanced: `-vV` also displays supported formats. `-vvV` also displays POSIX support. - `-q` will only display the version number, suitable for machine reading. + `-qV` will only display the version number, suitable for machine reading. * `-v`, `--verbose`: verbose mode, display more information * `-q`, `--quiet`: @@ -281,15 +317,15 @@ the last one takes effect. * `--no-progress`: do not display the progress bar, but keep all other messages. * `--show-default-cparams`: - Shows the default compression parameters that will be used for a - particular src file. If the provided src file is not a regular file - (e.g. named pipe), the cli will just output the default parameters. - That is, the parameters that are used when the src size is unknown. + shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size. + If the provided file is not a regular file (e.g. a pipe), this flag will output the parameters used for inputs of unknown size. +* `--exclude-compressed`: + only compress files that are not already compressed. * `--`: All arguments after `--` are treated as files -### gzip Operation modifiers +### gzip Operation Modifiers When invoked via a `gzip` symlink, `zstd` will support further options that intend to mimic the `gzip` behavior: @@ -300,12 +336,11 @@ options that intend to mimic the `gzip` behavior: alias to the option `-9`. -### Interactions with Environment Variables - +### Environment Variables Employing environment variables to set parameters has security implications. Therefore, this avenue is intentionally limited. Only `ZSTD_CLEVEL` and `ZSTD_NBTHREADS` are currently supported. -They set the compression level and number of threads to use during compression, respectively. +They set the default compression level and number of threads to use during compression, respectively. `ZSTD_CLEVEL` can be used to set the level between 1 and 19 (the "normal" range). If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a warning message. @@ -313,13 +348,173 @@ If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a `ZSTD_NBTHREADS` can be used to set the number of threads `zstd` will attempt to use during compression. If the value of `ZSTD_NBTHREADS` is not a valid unsigned integer, it will be ignored with a warning message. -`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200. -`zstd` must be compiled with multithread support for this to have any effect. +`ZSTD_NBTHREADS` has a default value of `max(1, min(4, nbCores/4))`, and is capped at ZSTDMT_NBWORKERS_MAX==200. +`zstd` must be compiled with multithread support for this variable to have any effect. They can both be overridden by corresponding command line arguments: `-#` for compression level and `-T#` for number of compression threads. +ADVANCED COMPRESSION OPTIONS +---------------------------- +`zstd` provides 22 predefined regular compression levels plus the fast levels. +A compression level is translated internally into multiple advanced parameters that control the behavior of the compressor +(one can observe the result of this translation with `--show-default-cparams`). +These advanced parameters can be overridden using advanced compression options. + +### --zstd[=options]: +The _options_ are provided as a comma-separated list. +You may specify only the options you want to change and the rest will be +taken from the selected or default compression level. +The list of available _options_: + +- `strategy`=_strat_, `strat`=_strat_: + Specify a strategy used by a match finder. + + There are 9 strategies numbered from 1 to 9, from fastest to strongest: + 1=`ZSTD_fast`, 2=`ZSTD_dfast`, 3=`ZSTD_greedy`, + 4=`ZSTD_lazy`, 5=`ZSTD_lazy2`, 6=`ZSTD_btlazy2`, + 7=`ZSTD_btopt`, 8=`ZSTD_btultra`, 9=`ZSTD_btultra2`. + +- `windowLog`=_wlog_, `wlog`=_wlog_: + Specify the maximum number of bits for a match distance. + + The higher number of increases the chance to find a match which usually + improves compression ratio. + It also increases memory requirements for the compressor and decompressor. + The minimum _wlog_ is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32-bit + platforms and 31 (2 GiB) on 64-bit platforms. + + Note: If `windowLog` is set to larger than 27, `--long=windowLog` or + `--memory=windowSize` needs to be passed to the decompressor. + +- `hashLog`=_hlog_, `hlog`=_hlog_: + Specify the maximum number of bits for a hash table. + + Bigger hash tables cause fewer collisions which usually makes compression + faster, but requires more memory during compression. + + The minimum _hlog_ is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB). + +- `chainLog`=_clog_, `clog`=_clog_: + Specify the maximum number of bits for the secondary search structure, + whose form depends on the selected `strategy`. + + Higher numbers of bits increases the chance to find a match which usually + improves compression ratio. + It also slows down compression speed and increases memory requirements for + compression. + This option is ignored for the `ZSTD_fast` `strategy`, which only has the primary hash table. + + The minimum _clog_ is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32-bit platforms + and 30 (1B entries / 4 GiB) on 64-bit platforms. + +- `searchLog`=_slog_, `slog`=_slog_: + Specify the maximum number of searches in a hash chain or a binary tree + using logarithmic scale. + + More searches increases the chance to find a match which usually increases + compression ratio but decreases compression speed. + + The minimum _slog_ is 1 and the maximum is 'windowLog' - 1. + +- `minMatch`=_mml_, `mml`=_mml_: + Specify the minimum searched length of a match in a hash table. + + Larger search lengths usually decrease compression ratio but improve + decompression speed. + + The minimum _mml_ is 3 and the maximum is 7. + +- `targetLength`=_tlen_, `tlen`=_tlen_: + The impact of this field vary depending on selected strategy. + + For `ZSTD_btopt`, `ZSTD_btultra` and `ZSTD_btultra2`, it specifies + the minimum match length that causes match finder to stop searching. + A larger `targetLength` usually improves compression ratio + but decreases compression speed. + + For `ZSTD_fast`, it triggers ultra-fast mode when > 0. + The value represents the amount of data skipped between match sampling. + Impact is reversed: a larger `targetLength` increases compression speed + but decreases compression ratio. + + For all other strategies, this field has no impact. + + The minimum _tlen_ is 0 and the maximum is 128 KiB. + +- `overlapLog`=_ovlog_, `ovlog`=_ovlog_: + Determine `overlapSize`, amount of data reloaded from previous job. + This parameter is only available when multithreading is enabled. + Reloading more data improves compression ratio, but decreases speed. + + The minimum _ovlog_ is 0, and the maximum is 9. + 1 means "no overlap", hence completely independent jobs. + 9 means "full overlap", meaning up to `windowSize` is reloaded from previous job. + Reducing _ovlog_ by 1 reduces the reloaded amount by a factor 2. + For example, 8 means "windowSize/2", and 6 means "windowSize/8". + Value 0 is special and means "default": _ovlog_ is automatically determined by `zstd`. + In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_. + +- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_: + Specify the frequency of inserting entries into the long distance matching + hash table. + + This option is ignored unless long distance matching is enabled. + + Larger values will improve compression speed. Deviating far from the + default value will likely result in a decrease in compression ratio. + + The default value varies between 4 and 7, depending on `strategy`. + +- `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_: + Specify the maximum size for a hash table used for long distance matching. + + This option is ignored unless long distance matching is enabled. + + Bigger hash tables usually improve compression ratio at the expense of more + memory during compression and a decrease in compression speed. + + The minimum _lhlog_ is 6 and the maximum is 30 (default: `windowLog - ldmHashRateLog`). + +- `ldmMinMatch`=_lmml_, `lmml`=_lmml_: + Specify the minimum searched length of a match for long distance matching. + + This option is ignored unless long distance matching is enabled. + + Larger/very small values usually decrease compression ratio. + + The minimum _lmml_ is 4 and the maximum is 4096 (default: 32 to 64, depending on `strategy`). + +- `ldmBucketSizeLog`=_lblog_, `lblog`=_lblog_: + Specify the size of each bucket for the hash table used for long distance + matching. + + This option is ignored unless long distance matching is enabled. + + Larger bucket sizes improve collision resolution but decrease compression + speed. + + The minimum _lblog_ is 1 and the maximum is 8 (default: 4 to 8, depending on `strategy`). + + +### Example +The following parameters sets advanced compression options to something +similar to predefined level 19 for files bigger than 256 KB: + +`--zstd`=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 + +### --jobsize=#: +Specify the size of each compression job. +This parameter is only meaningful when multi-threading is enabled. +Each compression job is run in parallel, so this value can indirectly impact the nb of active threads. +Default job size varies depending on compression level (generally `4 * windowSize`). +`--jobsize=#` makes it possible to manually select a custom size. +Note that job size must respect a minimum value which is enforced transparently. +This minimum is either 512 KB, or `overlapSize`, whichever is largest. +Different job sizes will lead to non-identical compressed frames. + + DICTIONARY BUILDER ------------------ `zstd` offers _dictionary_ compression, @@ -341,7 +536,7 @@ Compression of small files similar to the sample set will be greatly improved. Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files. In the case where some samples happen to be large, - only the first 128 KB of these samples will be used for training. + only the first 128 KiB of these samples will be used for training. `--train` supports multithreading if `zstd` is compiled with threading support (default). Additional advanced parameters can be specified with `--train-fastcover`. @@ -360,8 +555,8 @@ Compression of small files similar to the sample set will be greatly improved. Use `#` compression level during training (optional). Will generate statistics more tuned for selected compression level, resulting in a _small_ compression ratio improvement for this level. -* `-B#`: - Split input files into blocks of size # (default: no split) +* `--split=#`: + Split input files into independent chunks of size # (default: no split) * `-M#`, `--memory=#`: Limit the amount of sample data loaded for training (default: 2 GB). Note that the default (2 GB) is also the maximum. @@ -389,11 +584,13 @@ Compression of small files similar to the sample set will be greatly improved. It's possible to provide an explicit number ID instead. It's up to the dictionary manager to not assign twice the same ID to 2 different dictionaries. - Note that short numbers have an advantage : + Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes. This compares favorably to 4 bytes default. + Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\^31, so they should not be used in public. + * `--train-cover[=k#,d=#,steps=#,split=#,shrink[=#]]`: Select parameters for the default dictionary builder algorithm named cover. If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8. @@ -470,177 +667,44 @@ Compression of small files similar to the sample set will be greatly improved. BENCHMARK --------- +The `zstd` CLI provides a benchmarking mode that can be used to easily find suitable compression parameters, or alternatively to benchmark a computer's performance. +`zstd -b [FILE(s)]` will benchmark `zstd` for both compression and decompression using default compression level. +Note that results are very dependent on the content being compressed. + +It's possible to pass multiple files to the benchmark, and even a directory with `-r DIRECTORY`. +When no `FILE` is provided, the benchmark will use a procedurally generated `lorem ipsum` text. + +Benchmarking will employ `max(1, min(4, nbCores/4))` worker threads by default in order to match the behavior of the normal CLI I/O. * `-b#`: benchmark file(s) using compression level # * `-e#`: benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive) +* `-d`: + benchmark decompression speed only (requires providing a zstd-compressed content) * `-i#`: minimum evaluation time, in seconds (default: 3s), benchmark mode only -* `-B#`, `--block-size=#`: - cut file(s) into independent chunks of size # (default: no chunking) +* `--split=#`: + split input file(s) into independent chunks of size # (default: no chunking) +* `-S`: + output one benchmark result per input file (default: consolidated result) +* `-D dictionary` + benchmark using dictionary * `--priority=rt`: - set process priority to real-time + set process priority to real-time (Windows) -**Output Format:** CompressionLevel#Filename : InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +Beyond compression levels, benchmarking is also compatible with other parameters, such as number of threads (`-T#`), advanced compression parameters (`--zstd=###`), dictionary compression (`-D dictionary`), or even disabling checksum verification for example. -**Methodology:** For both compression and decompression speed, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. +**Output Format:** CompressionLevel#Filename: InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed -ADVANCED COMPRESSION OPTIONS ----------------------------- -### -B#: -Specify the size of each compression job. -This parameter is only available when multi-threading is enabled. -Each compression job is run in parallel, so this value indirectly impacts the nb of active threads. -Default job size varies depending on compression level (generally `4 * windowSize`). -`-B#` makes it possible to manually select a custom size. -Note that job size must respect a minimum value which is enforced transparently. -This minimum is either 512 KB, or `overlapSize`, whichever is largest. -Different job sizes will lead to non-identical compressed frames. +**Methodology:** For speed measurement, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. -### --zstd[=options]: -`zstd` provides 22 predefined compression levels. -The selected or default predefined compression level can be changed with -advanced compression options. -The _options_ are provided as a comma-separated list. -You may specify only the options you want to change and the rest will be -taken from the selected or default compression level. -The list of available _options_: - -- `strategy`=_strat_, `strat`=_strat_: - Specify a strategy used by a match finder. - - There are 9 strategies numbered from 1 to 9, from faster to stronger: - 1=ZSTD\_fast, 2=ZSTD\_dfast, 3=ZSTD\_greedy, - 4=ZSTD\_lazy, 5=ZSTD\_lazy2, 6=ZSTD\_btlazy2, - 7=ZSTD\_btopt, 8=ZSTD\_btultra, 9=ZSTD\_btultra2. - -- `windowLog`=_wlog_, `wlog`=_wlog_: - Specify the maximum number of bits for a match distance. - - The higher number of increases the chance to find a match which usually - improves compression ratio. - It also increases memory requirements for the compressor and decompressor. - The minimum _wlog_ is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32-bit - platforms and 31 (2 GiB) on 64-bit platforms. - Note: If `windowLog` is set to larger than 27, `--long=windowLog` or - `--memory=windowSize` needs to be passed to the decompressor. - -- `hashLog`=_hlog_, `hlog`=_hlog_: - Specify the maximum number of bits for a hash table. - - Bigger hash tables cause fewer collisions which usually makes compression - faster, but requires more memory during compression. - - The minimum _hlog_ is 6 (64 B) and the maximum is 30 (1 GiB). - -- `chainLog`=_clog_, `clog`=_clog_: - Specify the maximum number of bits for a hash chain or a binary tree. - - Higher numbers of bits increases the chance to find a match which usually - improves compression ratio. - It also slows down compression speed and increases memory requirements for - compression. - This option is ignored for the ZSTD_fast strategy. - - The minimum _clog_ is 6 (64 B) and the maximum is 29 (524 Mib) on 32-bit platforms - and 30 (1 Gib) on 64-bit platforms. - -- `searchLog`=_slog_, `slog`=_slog_: - Specify the maximum number of searches in a hash chain or a binary tree - using logarithmic scale. - - More searches increases the chance to find a match which usually increases - compression ratio but decreases compression speed. - - The minimum _slog_ is 1 and the maximum is 'windowLog' - 1. - -- `minMatch`=_mml_, `mml`=_mml_: - Specify the minimum searched length of a match in a hash table. - - Larger search lengths usually decrease compression ratio but improve - decompression speed. - - The minimum _mml_ is 3 and the maximum is 7. - -- `targetLength`=_tlen_, `tlen`=_tlen_: - The impact of this field vary depending on selected strategy. - - For ZSTD\_btopt, ZSTD\_btultra and ZSTD\_btultra2, it specifies - the minimum match length that causes match finder to stop searching. - A larger `targetLength` usually improves compression ratio - but decreases compression speed. -t - For ZSTD\_fast, it triggers ultra-fast mode when > 0. - The value represents the amount of data skipped between match sampling. - Impact is reversed : a larger `targetLength` increases compression speed - but decreases compression ratio. - - For all other strategies, this field has no impact. - - The minimum _tlen_ is 0 and the maximum is 128 Kib. - -- `overlapLog`=_ovlog_, `ovlog`=_ovlog_: - Determine `overlapSize`, amount of data reloaded from previous job. - This parameter is only available when multithreading is enabled. - Reloading more data improves compression ratio, but decreases speed. - - The minimum _ovlog_ is 0, and the maximum is 9. - 1 means "no overlap", hence completely independent jobs. - 9 means "full overlap", meaning up to `windowSize` is reloaded from previous job. - Reducing _ovlog_ by 1 reduces the reloaded amount by a factor 2. - For example, 8 means "windowSize/2", and 6 means "windowSize/8". - Value 0 is special and means "default" : _ovlog_ is automatically determined by `zstd`. - In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_. - -- `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_: - Specify the maximum size for a hash table used for long distance matching. - - This option is ignored unless long distance matching is enabled. - - Bigger hash tables usually improve compression ratio at the expense of more - memory during compression and a decrease in compression speed. - - The minimum _lhlog_ is 6 and the maximum is 30 (default: 20). - -- `ldmMinMatch`=_lmml_, `lmml`=_lmml_: - Specify the minimum searched length of a match for long distance matching. - - This option is ignored unless long distance matching is enabled. - - Larger/very small values usually decrease compression ratio. - - The minimum _lmml_ is 4 and the maximum is 4096 (default: 64). - -- `ldmBucketSizeLog`=_lblog_, `lblog`=_lblog_: - Specify the size of each bucket for the hash table used for long distance - matching. - - This option is ignored unless long distance matching is enabled. - - Larger bucket sizes improve collision resolution but decrease compression - speed. - - The minimum _lblog_ is 1 and the maximum is 8 (default: 3). - -- `ldmHashRateLog`=_lhrlog_, `lhrlog`=_lhrlog_: - Specify the frequency of inserting entries into the long distance matching - hash table. - - This option is ignored unless long distance matching is enabled. - - Larger values will improve compression speed. Deviating far from the - default value will likely result in a decrease in compression ratio. - - The default value is `wlog - lhlog`. - -### Example -The following parameters sets advanced compression options to something -similar to predefined level 19 for files bigger than 256 KB: - -`--zstd`=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +SEE ALSO +-------- +`zstdgrep`(1), `zstdless`(1), `gzip`(1), `xz`(1) +The format is specified in Y. Collet, "Zstandard Compression and the 'application/zstd' Media Type", https://www.ietf.org/rfc/rfc8878.txt, Internet RFC 8878 (February 2021). BUGS ---- diff --git a/programs/zstdcli.c b/programs/zstdcli.c index 583c8a5919b..31b06d67be5 100644 --- a/programs/zstdcli.c +++ b/programs/zstdcli.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,36 +8,19 @@ * You may select, at your option, one of the above-listed licenses. */ - -/*-************************************ -* Tuning parameters -**************************************/ -#ifndef ZSTDCLI_CLEVEL_DEFAULT -# define ZSTDCLI_CLEVEL_DEFAULT 3 -#endif - -#ifndef ZSTDCLI_CLEVEL_MAX -# define ZSTDCLI_CLEVEL_MAX 19 /* without using --ultra */ -#endif - -#ifndef ZSTDCLI_NBTHREADS_DEFAULT -# define ZSTDCLI_NBTHREADS_DEFAULT 1 -#endif - /*-************************************ * Dependencies **************************************/ -#include "platform.h" /* IS_CONSOLE, PLATFORM_POSIX_VERSION */ -#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ +#include "platform.h" /* PLATFORM_POSIX_VERSION */ +#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList, UTIL_isConsole */ #include /* getenv */ #include /* strcmp, strlen */ #include /* fprintf(), stdin, stdout, stderr */ -#include /* errno */ #include /* assert */ #include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ #ifndef ZSTD_NOBENCH -# include "benchzstd.h" /* BMK_benchFiles */ +# include "benchzstd.h" /* BMK_benchFilesAdvanced */ #endif #ifndef ZSTD_NODICT # include "dibio.h" /* ZDICT_cover_params_t, DiB_trainFromFiles() */ @@ -47,17 +30,34 @@ #endif #include "../lib/zstd.h" /* ZSTD_VERSION_STRING, ZSTD_minCLevel, ZSTD_maxCLevel */ #include "fileio_asyncio.h" +#include "fileio_common.h" +/*-************************************ +* Tuning parameters +**************************************/ +#ifndef ZSTDCLI_CLEVEL_DEFAULT +# define ZSTDCLI_CLEVEL_DEFAULT 3 +#endif + +#ifndef ZSTDCLI_CLEVEL_MAX +# define ZSTDCLI_CLEVEL_MAX 19 /* without using --ultra */ +#endif + +#ifndef ZSTDCLI_NBTHREADS_DEFAULT +#define ZSTDCLI_NBTHREADS_DEFAULT (unsigned)(MAX(1, MIN(4, UTIL_countLogicalCores() / 4))) +#endif + +static unsigned init_nbWorkers(unsigned defaultNbWorkers); /*-************************************ * Constants **************************************/ -#define COMPRESSOR_NAME "zstd command line interface" +#define COMPRESSOR_NAME "Zstandard CLI" #ifndef ZSTD_VERSION # define ZSTD_VERSION "v" ZSTD_VERSION_STRING #endif #define AUTHOR "Yann Collet" -#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR +#define WELCOME_MESSAGE "*** %s (%i-bit) %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR #define ZSTD_ZSTDMT "zstdmt" #define ZSTD_UNZSTD "unzstd" @@ -77,7 +77,9 @@ #define MB *(1 <<20) #define GB *(1U<<30) -#define DISPLAY_LEVEL_DEFAULT 2 +#ifndef ZSTD_DISPLAY_LEVEL_DEFAULT +# define ZSTD_DISPLAY_LEVEL_DEFAULT 2 +#endif static const char* g_defaultDictName = "dictionary"; static const unsigned g_defaultMaxDictSize = 110 KB; @@ -94,17 +96,17 @@ static U32 g_ldmBucketSizeLog = LDM_PARAM_DEFAULT; #define DEFAULT_ACCEL 1 +#define NBWORKERS_AUTOCPU 0 +#define NBWORKERS_UNSET UINT_MAX typedef enum { cover, fastCover, legacy } dictType; /*-************************************ * Display Macros **************************************/ -#define DISPLAY_F(f, ...) fprintf((f), __VA_ARGS__) -#define DISPLAYOUT(...) DISPLAY_F(stdout, __VA_ARGS__) -#define DISPLAY(...) DISPLAY_F(stderr, __VA_ARGS__) +#undef DISPLAYLEVEL #define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } } -static int g_displayLevel = DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */ +static int g_displayLevel = ZSTD_DISPLAY_LEVEL_DEFAULT; /* 0 : no display, 1: errors, 2 : + result + interaction + warnings, 3 : + progression, 4 : + information */ /*-************************************ @@ -138,172 +140,189 @@ static int exeNameMatch(const char* exeName, const char* test) * Command Line **************************************/ /* print help either in `stderr` or `stdout` depending on originating request - * error (badusage) => stderr - * help (usage_advanced) => stdout + * error (badUsage) => stderr + * help (usageAdvanced) => stdout */ static void usage(FILE* f, const char* programName) { - DISPLAY_F(f, "Usage: %s [OPTION]... [FILE]... [-o file]\n", programName); - DISPLAY_F(f, "Compress or uncompress FILEs (with no FILE or when FILE is `-`, read from standard input).\n\n"); - DISPLAY_F(f, " -o file result stored into `file` (only 1 output file)\n"); -#ifndef ZSTD_NOCOMPRESS - DISPLAY_F(f, " -1 .. -%d compression level (faster .. better; default: %d)\n", ZSTDCLI_CLEVEL_MAX, ZSTDCLI_CLEVEL_DEFAULT); -#endif -#ifndef ZSTD_NODECOMPRESS - DISPLAY_F(f, " -d, --decompress decompression\n"); -#endif - DISPLAY_F(f, " -f, --force disable input and output checks. Allows overwriting existing files,\n"); - DISPLAY_F(f, " input from console, output to stdout, operating on links,\n"); - DISPLAY_F(f, " block devices, etc. During decompression and when the output\n"); - DISPLAY_F(f, " destination is stdout, pass-through unrecognized formats as-is.\n"); - DISPLAY_F(f, " --rm remove source file(s) after successful de/compression\n"); - DISPLAY_F(f, " -k, --keep preserve source file(s) (default) \n"); + DISPLAY_F(f, "Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided.\n\n"); + DISPLAY_F(f, "Usage: %s [OPTIONS...] [INPUT... | -] [-o OUTPUT]\n\n", programName); + DISPLAY_F(f, "Options:\n"); + DISPLAY_F(f, " -o OUTPUT Write output to a single file, OUTPUT.\n"); + DISPLAY_F(f, " -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s).\n"); + DISPLAY_F(f, " -k, --keep Preserve INPUT file(s). [Default] \n"); + DISPLAY_F(f, " --rm Remove INPUT file(s) after successful (de)compression to file.\n"); #ifdef ZSTD_GZCOMPRESS if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ - DISPLAY_F(f, " -n, --no-name do not store original filename when compressing\n"); + DISPLAY_F(f, " -n, --no-name Do not store original filename when compressing.\n\n"); } #endif - DISPLAY_F(f, " -D DICT use DICT as Dictionary for compression or decompression\n"); - DISPLAY_F(f, " -h display usage and exit\n"); - DISPLAY_F(f, " -H,--help display long help and exit\n"); + DISPLAY_F(f, "\n"); +#ifndef ZSTD_NOCOMPRESS + DISPLAY_F(f, " -# Desired compression level, where `#` is a number between 1 and %d;\n", ZSTDCLI_CLEVEL_MAX); + DISPLAY_F(f, " lower numbers provide faster compression, higher numbers yield\n"); + DISPLAY_F(f, " better compression ratios. [Default: %d]\n\n", ZSTDCLI_CLEVEL_DEFAULT); +#endif +#ifndef ZSTD_NODECOMPRESS + DISPLAY_F(f, " -d, --decompress Perform decompression.\n"); +#endif + DISPLAY_F(f, " -D DICT Use DICT as the dictionary for compression or decompression.\n\n"); + DISPLAY_F(f, " -f, --force Disable input and output checks. Allows overwriting existing files,\n"); + DISPLAY_F(f, " receiving input from the console, printing output to STDOUT, and\n"); + DISPLAY_F(f, " operating on links, block devices, etc. Unrecognized formats will be\n"); + DISPLAY_F(f, " passed-through through as-is.\n\n"); + + DISPLAY_F(f, " -h Display short usage and exit.\n"); + DISPLAY_F(f, " -H, --help Display full help and exit.\n"); + DISPLAY_F(f, " -V, --version Display the program version and exit.\n"); DISPLAY_F(f, "\n"); } -static void usage_advanced(const char* programName) +static void usageAdvanced(const char* programName) { DISPLAYOUT(WELCOME_MESSAGE); + DISPLAYOUT("\n"); usage(stdout, programName); - DISPLAYOUT("Advanced options :\n"); - DISPLAYOUT(" -V, --version display Version number and exit\n"); + DISPLAYOUT("Advanced options:\n"); - DISPLAYOUT(" -c, --stdout write to standard output (even if it is the console), keep original file\n"); - - DISPLAYOUT(" -v, --verbose verbose mode; specify multiple times to increase verbosity\n"); - DISPLAYOUT(" -q, --quiet suppress warnings; specify twice to suppress errors too\n"); - DISPLAYOUT(" --[no-]progress forcibly display, or never display the progress counter\n"); - DISPLAYOUT(" note: any (de)compressed output to terminal will mix with progress counter text\n"); + DISPLAYOUT(" -v, --verbose Enable verbose output; pass multiple times to increase verbosity.\n"); + DISPLAYOUT(" -q, --quiet Suppress warnings; pass twice to suppress errors.\n"); +#ifndef ZSTD_NOTRACE + DISPLAYOUT(" --trace LOG Log tracing information to LOG.\n"); +#endif + DISPLAYOUT("\n"); + DISPLAYOUT(" --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed\n"); + DISPLAYOUT(" output to terminal will mix with progress counter text.\n\n"); #ifdef UTIL_HAS_CREATEFILELIST - DISPLAYOUT(" -r operate recursively on directories\n"); - DISPLAYOUT(" --filelist FILE read list of files to operate upon from FILE\n"); - DISPLAYOUT(" --output-dir-flat DIR : processed files are stored into DIR\n"); + DISPLAYOUT(" -r Operate recursively on directories.\n"); + DISPLAYOUT(" --filelist LIST Read a list of files to operate on from LIST.\n"); + DISPLAYOUT(" --output-dir-flat DIR Store processed files in DIR.\n"); #endif #ifdef UTIL_HAS_MIRRORFILELIST - DISPLAYOUT(" --output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure\n"); + DISPLAYOUT(" --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure.\n"); #endif if (AIO_supported()) - DISPLAYOUT(" --[no-]asyncio use asynchronous IO (default: enabled)\n"); + DISPLAYOUT(" --[no-]asyncio Use asynchronous IO. [Default: Enabled]\n"); + DISPLAYOUT("\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAYOUT(" --[no-]check during compression, add XXH64 integrity checksum to frame (default: enabled)\n"); + DISPLAYOUT(" --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate]\n"); #ifndef ZSTD_NODECOMPRESS - DISPLAYOUT(" if specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate)\n"); + DISPLAYOUT(" If `-d` is present, ignore/validate checksums during decompression.\n"); #endif #else #ifdef ZSTD_NOCOMPRESS - DISPLAYOUT(" --[no-]check during decompression, ignore/validate checksums in compressed frame (default: validate)"); + DISPLAYOUT(" --[no-]check Ignore/validate checksums during decompression. [Default: Validate]"); #endif - DISPLAYOUT("\n"); #endif /* ZSTD_NOCOMPRESS */ -#ifndef ZSTD_NOTRACE - DISPLAYOUT(" --trace FILE log tracing information to FILE\n"); -#endif - DISPLAYOUT(" -- all arguments after \"--\" are treated as files\n"); + DISPLAYOUT("\n"); + DISPLAYOUT(" -- Treat remaining arguments after `--` as files.\n"); #ifndef ZSTD_NOCOMPRESS DISPLAYOUT("\n"); - DISPLAYOUT("Advanced compression options :\n"); - DISPLAYOUT(" --ultra enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); - DISPLAYOUT(" --long[=#] enable long distance matching with given window log (default: %u)\n", g_defaultMaxWindowLog); - DISPLAYOUT(" --fast[=#] switch to very fast compression levels (default: %u)\n", 1); + DISPLAYOUT("Advanced compression options:\n"); + DISPLAYOUT(" --ultra Enable levels beyond %i, up to %i; requires more memory.\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); + DISPLAYOUT(" --fast[=#] Use to very fast compression levels. [Default: %u]\n", 1); #ifdef ZSTD_GZCOMPRESS if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ - DISPLAYOUT(" --best compatibility alias for -9 \n"); - DISPLAYOUT(" --no-name do not store original filename when compressing\n"); + DISPLAYOUT(" --best Compatibility alias for `-9`.\n"); } #endif - DISPLAYOUT(" --adapt dynamically adapt compression level to I/O conditions\n"); - DISPLAYOUT(" --[no-]row-match-finder : force enable/disable usage of fast row-based matchfinder for greedy, lazy, and lazy2 strategies\n"); - DISPLAYOUT(" --patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine. \n"); + DISPLAYOUT(" --adapt Dynamically adapt compression level to I/O conditions.\n"); + DISPLAYOUT(" --long[=#] Enable long distance matching with window log #. [Default: %u]\n", g_defaultMaxWindowLog); + DISPLAYOUT(" --patch-from=REF Use REF as the reference point for Zstandard's diff engine. \n"); + DISPLAYOUT(" --patch-apply Equivalent for `-d --patch-from` \n\n"); # ifdef ZSTD_MULTITHREAD - DISPLAYOUT(" -T# spawn # compression threads (default: 1, 0==# cores) \n"); - DISPLAYOUT(" -B# select size of each job (default: 0==automatic) \n"); - DISPLAYOUT(" --single-thread use a single thread for both I/O and compression (result slightly different than -T1) \n"); - DISPLAYOUT(" --auto-threads={physical,logical} : use either physical cores or logical cores as default when specifying -T0 (default: physical)\n"); - DISPLAYOUT(" --rsyncable compress using a rsync-friendly method (-B sets block size) \n"); + DISPLAYOUT(" -T# Spawn # compression threads. [Default: %u; pass 0 for core count.]\n", init_nbWorkers(ZSTDCLI_NBTHREADS_DEFAULT)); + DISPLAYOUT(" --single-thread Share a single thread for I/O and compression (slightly different than `-T1`).\n"); + DISPLAYOUT(" --auto-threads={physical|logical}\n"); + DISPLAYOUT(" Use physical/logical cores when using `-T0`. [Default: Physical]\n\n"); + DISPLAYOUT(" --jobsize=# Set job size to #. [Default: 0 (automatic)]\n"); + DISPLAYOUT(" --rsyncable Compress using a rsync-friendly method (`--jobsize=#` sets unit size). \n"); + DISPLAYOUT("\n"); # endif - DISPLAYOUT(" --exclude-compressed : only compress files that are not already compressed \n"); - DISPLAYOUT(" --stream-size=# specify size of streaming input from `stdin` \n"); - DISPLAYOUT(" --size-hint=# optimize compression parameters for streaming input of approximately this size \n"); - DISPLAYOUT(" --target-compressed-block-size=# : generate compressed block of approximately targeted size \n"); - DISPLAYOUT(" --no-dictID don't write dictID into header (dictionary compression only)\n"); - DISPLAYOUT(" --[no-]compress-literals : force (un)compressed literals\n"); - - DISPLAYOUT(" --format=zstd compress files to the .zst format (default)\n"); + DISPLAYOUT(" --exclude-compressed Only compress files that are not already compressed.\n\n"); + + DISPLAYOUT(" --stream-size=# Specify size of streaming input from STDIN.\n"); + DISPLAYOUT(" --size-hint=# Optimize compression parameters for streaming input of approximately size #.\n"); + DISPLAYOUT(" --target-compressed-block-size=#\n"); + DISPLAYOUT(" Generate compressed blocks of approximately # size.\n\n"); + DISPLAYOUT(" --no-dictID Don't write `dictID` into the header (dictionary compression only).\n"); + DISPLAYOUT(" --[no-]compress-literals Force (un)compressed literals.\n"); + DISPLAYOUT(" --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for\n"); + DISPLAYOUT(" the 'greedy', 'lazy', and 'lazy2' strategies.\n"); + + DISPLAYOUT("\n"); + DISPLAYOUT(" --format=zstd Compress files to the `.zst` format. [Default]\n"); + DISPLAYOUT(" --[no-]mmap-dict Memory-map dictionary file rather than mallocing and loading all at once\n"); #ifdef ZSTD_GZCOMPRESS - DISPLAYOUT(" --format=gzip compress files to the .gz format\n"); + DISPLAYOUT(" --format=gzip Compress files to the `.gz` format.\n"); #endif #ifdef ZSTD_LZMACOMPRESS - DISPLAYOUT(" --format=xz compress files to the .xz format\n"); - DISPLAYOUT(" --format=lzma compress files to the .lzma format\n"); + DISPLAYOUT(" --format=xz Compress files to the `.xz` format.\n"); + DISPLAYOUT(" --format=lzma Compress files to the `.lzma` format.\n"); #endif #ifdef ZSTD_LZ4COMPRESS - DISPLAYOUT( " --format=lz4 compress files to the .lz4 format\n"); + DISPLAYOUT( " --format=lz4 Compress files to the `.lz4` format.\n"); #endif #endif /* !ZSTD_NOCOMPRESS */ #ifndef ZSTD_NODECOMPRESS DISPLAYOUT("\n"); - DISPLAYOUT("Advanced decompression options :\n"); - DISPLAYOUT(" -l print information about zstd compressed files\n"); - DISPLAYOUT(" --test test compressed file integrity\n"); - DISPLAYOUT(" -M# Set a memory usage limit for decompression\n"); + DISPLAYOUT("Advanced decompression options:\n"); + DISPLAYOUT(" -l Print information about Zstandard-compressed files.\n"); + DISPLAYOUT(" --test Test compressed file integrity.\n"); + DISPLAYOUT(" -M# Set the memory usage limit to # megabytes.\n"); # if ZSTD_SPARSE_DEFAULT - DISPLAYOUT(" --[no-]sparse sparse mode (default: enabled on file, disabled on stdout)\n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.]\n"); # else - DISPLAYOUT(" --[no-]sparse sparse mode (default: disabled)\n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Disabled]\n"); # endif { - char const* passThroughDefault = "disabled"; + char const* passThroughDefault = "Disabled"; if (exeNameMatch(programName, ZSTD_CAT) || exeNameMatch(programName, ZSTD_ZCAT) || exeNameMatch(programName, ZSTD_GZCAT)) { - passThroughDefault = "enabled"; + passThroughDefault = "Enabled"; } - DISPLAYOUT(" --[no-]pass-through : passes through uncompressed files as-is (default: %s)\n", passThroughDefault); + DISPLAYOUT(" --[no-]pass-through Pass through uncompressed files as-is. [Default: %s]\n", passThroughDefault); } #endif /* ZSTD_NODECOMPRESS */ #ifndef ZSTD_NODICT DISPLAYOUT("\n"); - DISPLAYOUT("Dictionary builder :\n"); - DISPLAYOUT(" --train ## create a dictionary from a training set of files\n"); - DISPLAYOUT(" --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] : use the cover algorithm with optional args\n"); - DISPLAYOUT(" --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] : use the fast cover algorithm with optional args\n"); - DISPLAYOUT(" --train-legacy[=s=#] : use the legacy algorithm with selectivity (default: %u)\n", g_defaultSelectivityLevel); - DISPLAYOUT(" -o DICT DICT is dictionary name (default: %s)\n", g_defaultDictName); - DISPLAYOUT(" --maxdict=# limit dictionary to specified size (default: %u)\n", g_defaultMaxDictSize); - DISPLAYOUT(" --dictID=# force dictionary ID to specified value (default: random)\n"); + DISPLAYOUT("Dictionary builder:\n"); + DISPLAYOUT(" --train Create a dictionary from a training set of files.\n\n"); + DISPLAYOUT(" --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the cover algorithm (with optional arguments).\n"); + DISPLAYOUT(" --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the fast cover algorithm (with optional arguments).\n\n"); + DISPLAYOUT(" --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: %u]\n", g_defaultSelectivityLevel); + DISPLAYOUT(" -o NAME Use NAME as dictionary name. [Default: %s]\n", g_defaultDictName); + DISPLAYOUT(" --maxdict=# Limit dictionary to specified size #. [Default: %u]\n", g_defaultMaxDictSize); + DISPLAYOUT(" --dictID=# Force dictionary ID to #. [Default: Random]\n"); #endif #ifndef ZSTD_NOBENCH DISPLAYOUT("\n"); - DISPLAYOUT("Benchmark options : \n"); - DISPLAYOUT(" -b# benchmark file(s), using # compression level (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT); - DISPLAYOUT(" -e# test all compression levels successively from -b# to -e# (default: 1)\n"); - DISPLAYOUT(" -i# minimum evaluation time in seconds (default: 3s)\n"); - DISPLAYOUT(" -B# cut file into independent chunks of size # (default: no chunking)\n"); - DISPLAYOUT(" -S output one benchmark result per input file (default: consolidated result)\n"); - DISPLAYOUT(" --priority=rt set process priority to real-time\n"); + DISPLAYOUT("Benchmark options:\n"); + DISPLAYOUT(" -b# Perform benchmarking with compression level #. [Default: %d]\n", ZSTDCLI_CLEVEL_DEFAULT); + DISPLAYOUT(" -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1]\n"); + DISPLAYOUT(" -i# Set the minimum evaluation to time # seconds. [Default: 3]\n"); + DISPLAYOUT(" --split=# Split input into independent chunks of size #. [Default: No chunking]\n"); + DISPLAYOUT(" -S Output one benchmark result per input file. [Default: Consolidated result]\n"); + DISPLAYOUT(" -D dictionary Benchmark using dictionary \n"); + DISPLAYOUT(" --priority=rt Set process priority to real-time.\n"); #endif } -static void badusage(const char* programName) +static void badUsage(const char* programName, const char* parameter) { - DISPLAYLEVEL(1, "Incorrect parameters \n"); + DISPLAYLEVEL(1, "Incorrect parameter: %s \n", parameter); if (g_displayLevel >= 2) usage(stderr, programName); } @@ -325,12 +344,12 @@ static const char* lastNameFromPath(const char* path) static void errorOut(const char* msg) { - DISPLAY("%s \n", msg); exit(1); + DISPLAYLEVEL(1, "%s \n", msg); exit(1); } /*! readU32FromCharChecked() : * @return 0 if success, and store the result in *value. - * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * allows and interprets K, KB, KiB, M, MB, MiB, G, GB and GiB suffix. * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * @return 1 if an overflow error occurs */ static int readU32FromCharChecked(const char** stringPtr, unsigned* value) @@ -345,15 +364,22 @@ static int readU32FromCharChecked(const char** stringPtr, unsigned* value) if (result < last) return 1; /* overflow error */ (*stringPtr)++ ; } - if ((**stringPtr=='K') || (**stringPtr=='M')) { - unsigned const maxK = ((unsigned)(-1)) >> 10; - if (result > maxK) return 1; /* overflow error */ - result <<= 10; - if (**stringPtr=='M') { - if (result > maxK) return 1; /* overflow error */ - result <<= 10; + if ((**stringPtr=='K') || (**stringPtr=='M') || (**stringPtr=='G')) { + switch (**stringPtr) { + case 'K': + if (result > (((unsigned)-1) >> 10)) return 1; /* overflow error */ + result <<= 10; + break; + case 'M': + if (result > (((unsigned)-1) >> 20)) return 1; /* overflow error */ + result <<= 20; + break; + case 'G': + if (result > (((unsigned)-1) >> 30)) return 1; /* overflow error */ + result <<= 30; + break; } - (*stringPtr)++; /* skip `K` or `M` */ + (*stringPtr)++; /* skip `K`, `M` or `G` */ if (**stringPtr=='i') (*stringPtr)++; if (**stringPtr=='B') (*stringPtr)++; } @@ -363,7 +389,7 @@ static int readU32FromCharChecked(const char** stringPtr, unsigned* value) /*! readU32FromChar() : * @return : unsigned integer value read from input in `char` format. - * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * allows and interprets K, KB, KiB, M, MB, MiB, G, GB and GiB suffix. * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * Note : function will exit() program if digit sequence overflows */ static unsigned readU32FromChar(const char** stringPtr) { @@ -375,7 +401,7 @@ static unsigned readU32FromChar(const char** stringPtr) { /*! readIntFromChar() : * @return : signed integer value read from input in `char` format. - * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * allows and interprets K, KB, KiB, M, MB, MiB, G, GB and GiB suffix. * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * Note : function will exit() program if digit sequence overflows */ static int readIntFromChar(const char** stringPtr) { @@ -392,7 +418,7 @@ static int readIntFromChar(const char** stringPtr) { /*! readSizeTFromCharChecked() : * @return 0 if success, and store the result in *value. - * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * allows and interprets K, KB, KiB, M, MB, MiB, G, GB and GiB suffix. * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * @return 1 if an overflow error occurs */ static int readSizeTFromCharChecked(const char** stringPtr, size_t* value) @@ -407,15 +433,22 @@ static int readSizeTFromCharChecked(const char** stringPtr, size_t* value) if (result < last) return 1; /* overflow error */ (*stringPtr)++ ; } - if ((**stringPtr=='K') || (**stringPtr=='M')) { - size_t const maxK = ((size_t)(-1)) >> 10; - if (result > maxK) return 1; /* overflow error */ - result <<= 10; - if (**stringPtr=='M') { - if (result > maxK) return 1; /* overflow error */ - result <<= 10; + if ((**stringPtr=='K') || (**stringPtr=='M') || (**stringPtr=='G')) { + switch (**stringPtr) { + case 'K': + if (result > (((size_t)-1) >> 10)) return 1; /* overflow error */ + result <<= 10; + break; + case 'M': + if (result > (((size_t)-1) >> 20)) return 1; /* overflow error */ + result <<= 20; + break; + case 'G': + if (result > (((size_t)-1) >> 30)) return 1; /* overflow error */ + result <<= 30; + break; } - (*stringPtr)++; /* skip `K` or `M` */ + (*stringPtr)++; /* skip `K`, `M` or `G` */ if (**stringPtr=='i') (*stringPtr)++; if (**stringPtr=='B') (*stringPtr)++; } @@ -425,7 +458,7 @@ static int readSizeTFromCharChecked(const char** stringPtr, size_t* value) /*! readSizeTFromChar() : * @return : size_t value read from input in `char` format. - * allows and interprets K, KB, KiB, M, MB and MiB suffix. + * allows and interprets K, KB, KiB, M, MB, MiB, G, GB and GiB suffix. * Will also modify `*stringPtr`, advancing it to position where it stopped reading. * Note : function will exit() program if digit sequence overflows */ static size_t readSizeTFromChar(const char** stringPtr) { @@ -574,7 +607,7 @@ static ZDICT_fastCover_params_t defaultFastCoverParams(void) /** parseAdaptParameters() : - * reads adapt parameters from *stringPtr (e.g. "--zstd=min=1,max=19) and store them into adaptMinPtr and adaptMaxPtr. + * reads adapt parameters from *stringPtr (e.g. "--adapt=min=1,max=19) and store them into adaptMinPtr and adaptMaxPtr. * Both adaptMinPtr and adaptMaxPtr must be already allocated and correctly initialized. * There is no guarantee that any of these values will be updated. * @return 1 means that parsing was successful, @@ -621,15 +654,29 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi return 0; } - DISPLAYLEVEL(4, "windowLog=%d, chainLog=%d, hashLog=%d, searchLog=%d \n", params->windowLog, params->chainLog, params->hashLog, params->searchLog); - DISPLAYLEVEL(4, "minMatch=%d, targetLength=%d, strategy=%d \n", params->minMatch, params->targetLength, params->strategy); if (stringPtr[0] != 0) return 0; /* check the end of string */ return 1; } +static void setMaxCompression(ZSTD_compressionParameters* params) +{ + params->windowLog = ZSTD_WINDOWLOG_MAX; + params->chainLog = ZSTD_CHAINLOG_MAX; + params->hashLog = ZSTD_HASHLOG_MAX; + params->searchLog = ZSTD_SEARCHLOG_MAX; + params->minMatch = ZSTD_MINMATCH_MIN; + params->targetLength = ZSTD_TARGETLENGTH_MAX; + params->strategy = ZSTD_STRATEGY_MAX; + g_overlapLog = ZSTD_OVERLAPLOG_MAX; + g_ldmHashLog = ZSTD_LDM_HASHLOG_MAX; + g_ldmHashRateLog = 0; /* automatically derived */ + g_ldmMinMatch = 16; /* heuristic */ + g_ldmBucketSizeLog = ZSTD_LDM_BUCKETSIZELOG_MAX; +} + static void printVersion(void) { - if (g_displayLevel < DISPLAY_LEVEL_DEFAULT) { + if (g_displayLevel < ZSTD_DISPLAY_LEVEL_DEFAULT) { DISPLAYOUT("%s\n", ZSTD_VERSION_STRING); return; } @@ -657,6 +704,12 @@ static void printVersion(void) DISPLAYOUT("lz4 version %s\n", FIO_lz4Version()); DISPLAYOUT("lzma version %s\n", FIO_lzmaVersion()); + #ifdef ZSTD_MULTITHREAD + DISPLAYOUT("supports Multithreading \n"); + #else + DISPLAYOUT("single-thread operations only \n"); + #endif + /* posix support */ #ifdef _POSIX_C_SOURCE DISPLAYOUT("_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); @@ -667,7 +720,10 @@ static void printVersion(void) #ifdef PLATFORM_POSIX_VERSION DISPLAYOUT("PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION); #endif - } } + + if (!ZSTD_isDeterministicBuild()) { + DISPLAYOUT("non-deterministic build\n"); + } } } } #define ZSTD_NB_STRATEGIES 9 @@ -681,7 +737,7 @@ static void printDefaultCParams(const char* filename, const char* dictFileName, unsigned long long fileSize = UTIL_getFileSize(filename); const size_t dictSize = dictFileName != NULL ? (size_t)UTIL_getFileSize(dictFileName) : 0; const ZSTD_compressionParameters cParams = ZSTD_getCParams(cLevel, fileSize, dictSize); - if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%u bytes)\n", filename, (unsigned)fileSize); + if (fileSize != UTIL_FILESIZE_UNKNOWN) DISPLAY("%s (%llu bytes)\n", filename, fileSize); else DISPLAY("%s (src size unknown)\n", filename); DISPLAY(" - windowLog : %u\n", cParams.windowLog); DISPLAY(" - chainLog : %u\n", cParams.chainLog); @@ -714,7 +770,7 @@ static void printActualCParams(const char* filename, const char* dictFileName, i /* Environment variables for parameter setting */ #define ENV_CLEVEL "ZSTD_CLEVEL" -#define ENV_NBTHREADS "ZSTD_NBTHREADS" /* takes lower precedence than directly specifying -T# in the CLI */ +#define ENV_NBWORKERS "ZSTD_NBTHREADS" /* takes lower precedence than directly specifying -T# in the CLI */ /* pick up environment variable */ static int init_cLevel(void) { @@ -744,26 +800,29 @@ static int init_cLevel(void) { return ZSTDCLI_CLEVEL_DEFAULT; } +static unsigned init_nbWorkers(unsigned defaultNbWorkers) { #ifdef ZSTD_MULTITHREAD -static unsigned init_nbThreads(void) { - const char* const env = getenv(ENV_NBTHREADS); + const char* const env = getenv(ENV_NBWORKERS); if (env != NULL) { const char* ptr = env; if ((*ptr>='0') && (*ptr<='9')) { unsigned nbThreads; if (readU32FromCharChecked(&ptr, &nbThreads)) { - DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large \n", ENV_NBTHREADS, env); - return ZSTDCLI_NBTHREADS_DEFAULT; + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large \n", ENV_NBWORKERS, env); + return defaultNbWorkers; } else if (*ptr == 0) { return nbThreads; } } - DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: not a valid unsigned value \n", ENV_NBTHREADS, env); + DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: not a valid unsigned value \n", ENV_NBWORKERS, env); } - return ZSTDCLI_NBTHREADS_DEFAULT; -} + return defaultNbWorkers; +#else + (void)defaultNbWorkers; + return 1; #endif +} #define NEXT_FIELD(ptr) { \ if (*argument == '=') { \ @@ -772,32 +831,32 @@ static unsigned init_nbThreads(void) { } else { \ argNb++; \ if (argNb >= argCount) { \ - DISPLAY("error: missing command argument \n"); \ + DISPLAYLEVEL(1, "error: missing command argument \n"); \ CLEAN_RETURN(1); \ } \ ptr = argv[argNb]; \ assert(ptr != NULL); \ if (ptr[0]=='-') { \ - DISPLAY("error: command cannot be separated from its argument by another command \n"); \ + DISPLAYLEVEL(1, "error: command cannot be separated from its argument by another command \n"); \ CLEAN_RETURN(1); \ } } } -#define NEXT_UINT32(val32) { \ - const char* __nb; \ - NEXT_FIELD(__nb); \ - val32 = readU32FromChar(&__nb); \ - if(*__nb != 0) { \ - errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ - } \ +#define NEXT_UINT32(_varu32) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + _varu32 = readU32FromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB, G, GB, GiB are allowed"); \ + } \ } -#define NEXT_TSIZE(valTsize) { \ - const char* __nb; \ - NEXT_FIELD(__nb); \ - valTsize = readSizeTFromChar(&__nb); \ - if(*__nb != 0) { \ - errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ - } \ +#define NEXT_TSIZE(_varTsize) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + _varTsize = readSizeTFromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB, G, GB, GiB are allowed"); \ + } \ } typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode; @@ -824,7 +883,6 @@ int main(int argCount, const char* argv[]) ldmFlag = 0, main_pause = 0, adapt = 0, - useRowMatchFinder = 0, adaptMin = MINCLEVEL, adaptMax = MAXCLEVEL, rsyncable = 0, @@ -835,19 +893,25 @@ int main(int argCount, const char* argv[]) singleThread = 0, defaultLogicalCores = 0, showDefaultCParams = 0, - ultra=0, - contentSize=1; - unsigned nbWorkers = 0; - double compressibility = 0.5; + contentSize = 1, + removeSrcFile = 0, + cLevel = init_cLevel(), + ultra = 0, + cLevelLast = MINCLEVEL - 1, /* for benchmark range */ + setThreads_non1 = 0; + unsigned nbWorkers = init_nbWorkers(NBWORKERS_UNSET); + ZSTD_ParamSwitch_e mmapDict = ZSTD_ps_auto; + ZSTD_ParamSwitch_e useRowMatchFinder = ZSTD_ps_auto; + FIO_compressionType_t cType = FIO_zstdCompression; + double compressibility = -1.0; /* lorem ipsum generator */ unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */ - size_t blockSize = 0; + size_t chunkSize = 0; FIO_prefs_t* const prefs = FIO_createPreferences(); FIO_ctx_t* const fCtx = FIO_createContext(); + FIO_progressSetting_e progress = FIO_ps_auto; zstd_operation_mode operation = zom_compress; ZSTD_compressionParameters compressionParams; - int cLevel = init_cLevel(); - int cLevelLast = MINCLEVEL - 1; /* lower than minimum */ unsigned recursive = 0; unsigned memLimit = 0; FileNamesTable* filenames = UTIL_allocateFileNamesTable((size_t)argCount); /* argCount >= 1 */ @@ -875,37 +939,33 @@ int main(int argCount, const char* argv[]) #ifndef ZSTD_NOBENCH BMK_advancedParams_t benchParams = BMK_initAdvancedParams(); #endif - ZSTD_paramSwitch_e literalCompressionMode = ZSTD_ps_auto; - + ZSTD_ParamSwitch_e literalCompressionMode = ZSTD_ps_auto; /* init */ checkLibVersion(); (void)recursive; (void)cLevelLast; /* not used when ZSTD_NOBENCH set */ (void)memLimit; assert(argCount >= 1); - if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAY("zstd: allocation error \n"); exit(1); } + if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAYLEVEL(1, "zstd: allocation error \n"); exit(1); } programName = lastNameFromPath(programName); -#ifdef ZSTD_MULTITHREAD - nbWorkers = init_nbThreads(); -#endif /* preset behaviors */ - if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=0, singleThread=0; + if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=NBWORKERS_AUTOCPU, singleThread=0; if (exeNameMatch(programName, ZSTD_UNZSTD)) operation=zom_decompress; if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ - suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); FIO_setRemoveSrcFile(prefs, 1); + suffix = GZ_EXTENSION; cType = FIO_gzipCompression; removeSrcFile=1; dictCLevel = cLevel = 6; /* gzip default is -6 */ } - if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(prefs, 1); } /* behave like gunzip, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; removeSrcFile=1; } /* behave like gunzip, also supports multiple formats */ if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like lzma */ - if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unlzma, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like xz */ - if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unxz, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like lz4 */ - if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like unlz4, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like lzma */ + if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like unlzma, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like xz */ + if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like unxz, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; } /* behave like lz4 */ + if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; cType = FIO_lz4Compression; } /* behave like unlz4, also supports multiple formats */ memset(&compressionParams, 0, sizeof(compressionParams)); /* init crash handler */ @@ -914,6 +974,7 @@ int main(int argCount, const char* argv[]) /* command switches */ for (argNb=1; argNb